repo
stringclasses
358 values
pull_number
int64
6
67.9k
instance_id
stringlengths
12
49
issue_numbers
sequencelengths
1
7
base_commit
stringlengths
40
40
patch
stringlengths
87
101M
test_patch
stringlengths
72
22.3M
problem_statement
stringlengths
3
256k
hints_text
stringlengths
0
545k
created_at
stringlengths
20
20
PASS_TO_PASS
sequencelengths
0
0
FAIL_TO_PASS
sequencelengths
0
0
pypi/warehouse
11,986
pypi__warehouse-11986
[ "12061", "12061" ]
0de33b4e2f847564358f0ec718a4d7b9cf8e633d
diff --git a/warehouse/api/billing.py b/warehouse/api/billing.py new file mode 100644 --- /dev/null +++ b/warehouse/api/billing.py @@ -0,0 +1,143 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import stripe + +from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent, HTTPNotFound +from pyramid.view import view_config + +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService +from warehouse.subscriptions.models import StripeSubscriptionStatus + + +def handle_billing_webhook_event(request, event): + billing_service = request.find_service(IBillingService, context=None) + subscription_service = request.find_service(ISubscriptionService, context=None) + + match event["type"]: + # Occurs when a Checkout Session has been successfully completed. + case "checkout.session.completed": + checkout_session = event["data"]["object"] + # Get expanded checkout session object + checkout_session = billing_service.get_checkout_session( + checkout_session["id"], + # Provide mock_checkout_session used by MockStripeBillingService only. + mock_checkout_session=checkout_session, + ) + status = checkout_session["status"] + customer_id = checkout_session["customer"]["id"] + billing_email = checkout_session["customer"]["email"] + subscription_id = checkout_session["subscription"]["id"] + if status != "complete": + raise HTTPBadRequest(f"Invalid checkout session status '{status}'") + if not customer_id: + raise HTTPBadRequest("Invalid customer ID") + if not subscription_id: + raise HTTPBadRequest("Invalid subscription ID") + if id := subscription_service.find_subscriptionid(subscription_id): + # Set subscription status to active. + subscription_service.update_subscription_status( + id, StripeSubscriptionStatus.Active + ) + else: + # Get expanded subscription object + subscription_items = checkout_session["subscription"]["items"]["data"] + # Activate subscription for customer. + for subscription_item in subscription_items: + subscription_service.add_subscription( + customer_id, + subscription_id, + subscription_item["id"], + billing_email, + ) + # Occurs whenever a customer’s subscription ends. + case "customer.subscription.deleted": + subscription = event["data"]["object"] + status = subscription["status"] + customer_id = subscription["customer"] + subscription_id = subscription["id"] + if not status or not StripeSubscriptionStatus.has_value(status): + raise HTTPBadRequest(f"Invalid subscription status '{status}'") + if not customer_id: + raise HTTPBadRequest("Invalid customer ID") + if not subscription_id: + raise HTTPBadRequest("Invalid subscription ID") + if id := subscription_service.find_subscriptionid(subscription_id): + # Set subscription status to canceled. + subscription_service.update_subscription_status( + id, StripeSubscriptionStatus.Canceled + ) + else: + raise HTTPNotFound("Subscription not found") + # Occurs whenever a subscription changes e.g. status changes. + case "customer.subscription.updated": + subscription = event["data"]["object"] + status = subscription["status"] + customer_id = subscription["customer"] + subscription_id = subscription["id"] + if not status or not StripeSubscriptionStatus.has_value(status): + raise HTTPBadRequest(f"Invalid subscription status '{status}'") + if not customer_id: + raise HTTPBadRequest("Invalid customer ID") + if not subscription_id: + raise HTTPBadRequest("Invalid subscription ID") + + if id := subscription_service.find_subscriptionid(subscription_id): + # Update subscription status. + subscription_service.update_subscription_status(id, status) + else: + raise HTTPNotFound("Subscription not found") + # Occurs whenever a customer is deleted. + case "customer.deleted": + customer = event["data"]["object"] + customer_id = customer["id"] + if not customer_id: + raise HTTPBadRequest("Invalid customer ID") + if subscription_service.get_subscriptions_by_customer(customer_id): + # Delete the customer and all associated subscription data + subscription_service.delete_customer(customer_id) + else: + raise HTTPNotFound("Customer subscription data not found") + # Occurs whenever a customer is updated. + case "customer.updated": + customer = event["data"]["object"] + customer_id = customer["id"] + billing_email = customer["email"] + if not customer_id: + raise HTTPBadRequest("Invalid customer ID") + if not billing_email: + raise HTTPBadRequest("Invalid billing email") + # Update customer email + subscription_service.update_customer_email(customer_id, billing_email) + + +@view_config( + route_name="api.billing.webhook", + require_csrf=False, + require_methods=["POST"], + uses_session=True, +) +def billing_webhook(request): + billing_service = request.find_service(IBillingService, context=None) + + try: + payload = request.body + sig_header = request.headers.get("Stripe-Signature") + event = billing_service.webhook_received(payload, sig_header) + except ValueError: + raise HTTPBadRequest("Invalid payload") + except stripe.error.SignatureVerificationError: + raise HTTPBadRequest("Invalid signature") + + handle_billing_webhook_event(request, event) + + return HTTPNoContent() diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -227,6 +227,7 @@ def configure(settings=None): coercer=int, default=21600, # 6 hours ) + maybe_set_compound(settings, "billing", "backend", "BILLING_BACKEND") maybe_set_compound(settings, "files", "backend", "FILES_BACKEND") maybe_set_compound(settings, "simple", "backend", "SIMPLE_BACKEND") maybe_set_compound(settings, "docs", "backend", "DOCS_BACKEND") @@ -552,6 +553,9 @@ def configure(settings=None): # Register our organization support. config.include(".organizations") + # Register our subscription support. + config.include(".subscriptions") + # Allow the packaging app to register any services it has. config.include(".packaging") diff --git a/warehouse/csp.py b/warehouse/csp.py --- a/warehouse/csp.py +++ b/warehouse/csp.py @@ -94,7 +94,7 @@ def includeme(config): ], "default-src": [NONE], "font-src": [SELF, "fonts.gstatic.com"], - "form-action": [SELF], + "form-action": [SELF, "https://checkout.stripe.com"], "frame-ancestors": [NONE], "frame-src": [NONE], "img-src": [ diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -13,6 +13,8 @@ import base64 import io +from urllib.parse import urljoin + import pyqrcode from paginate_sqlalchemy import SqlalchemyOrmPage as SQLAlchemyORMPage @@ -133,6 +135,7 @@ OrganizationInvitationStatus, OrganizationRole, OrganizationRoleType, + OrganizationType, Team, TeamProjectRole, TeamProjectRoleType, @@ -150,6 +153,8 @@ RoleInvitationStatus, ) from warehouse.rate_limiting import IRateLimiter +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService +from warehouse.subscriptions.services import MockStripeBillingService from warehouse.utils.http import is_safe_url from warehouse.utils.organization import confirm_organization, confirm_team from warehouse.utils.paginate import paginate_url_factory @@ -1210,6 +1215,7 @@ def organization_members(request, organization): route_name="manage.organizations", renderer="manage/organizations.html", uses_session=True, + require_active_organization=False, # Allow list/create orgs without active org. require_csrf=True, require_methods=False, permission="manage:user", @@ -1270,15 +1276,17 @@ def default_response(self): @view_config(request_method="GET") def manage_organizations(self): + # Organizations must be enabled. if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound + raise HTTPNotFound() return self.default_response @view_config(request_method="POST", request_param=CreateOrganizationForm.__params__) def create_organization(self): + # Organizations must be enabled. if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound + raise HTTPNotFound() form = CreateOrganizationForm( self.request.POST, @@ -1347,6 +1355,14 @@ def create_organization(self): else: return {"create_organization_form": form} + if form.orgtype.data == OrganizationType.Company: + return HTTPSeeOther( + self.request.route_path( + "manage.organization.activate_subscription", + organization_name=organization.normalized_name, + ) + ) + return HTTPSeeOther(self.request.path) @@ -1355,6 +1371,7 @@ def create_organization(self): context=Organization, renderer="manage/organization/settings.html", uses_session=True, + require_active_organization=True, require_csrf=True, require_methods=False, permission="manage:organization", @@ -1369,6 +1386,7 @@ def __init__(self, organization, request): self.organization_service = request.find_service( IOrganizationService, context=None ) + self.billing_service = request.find_service(IBillingService, context=None) @property def active_projects(self): @@ -1394,16 +1412,10 @@ def default_response(self): @view_config(request_method="GET", permission="view:organization") def manage_organization(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - return self.default_response @view_config(request_method="POST", request_param=SaveOrganizationForm.__params__) def save_organization(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - form = SaveOrganizationForm( self.request.POST, organization_service=self.organization_service, @@ -1423,9 +1435,6 @@ def save_organization(self): + SaveOrganizationNameForm.__params__, ) def save_organization_name(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - confirm_organization( self.organization, self.request, @@ -1481,9 +1490,6 @@ def save_organization_name(self): @view_config(request_method="POST", request_param=["confirm_organization_name"]) def delete_organization(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - confirm_organization( self.organization, self.request, fail_route="manage.organization.settings" ) @@ -1507,6 +1513,11 @@ def delete_organization(self): # Get owners before deleting organization. owner_users = set(organization_owners(self.request, self.organization)) + # Cancel any subscriptions tied to this organization. + if self.organization.subscriptions: + for subscription in self.organization.subscriptions: + self.billing_service.cancel_subscription(subscription.subscription_id) + self.organization_service.delete_organization(self.organization.id) send_admin_organization_deleted_email( @@ -1523,11 +1534,129 @@ def delete_organization(self): return HTTPSeeOther(self.request.route_path("manage.organizations")) +@view_defaults( + context=Organization, + uses_session=True, + require_active_organization=False, # Allow reactivate billing for inactive org. + require_csrf=True, + require_methods=False, + permission="manage:billing", + has_translations=True, + require_reauth=True, +) +class ManageOrganizationBillingViews: + def __init__(self, organization, request): + self.organization = organization + self.request = request + self.billing_service = request.find_service(IBillingService, context=None) + self.subscription_service = request.find_service( + ISubscriptionService, context=None + ) + self.organization_service = request.find_service( + IOrganizationService, context=None + ) + + @property + def customer_id(self): + if self.organization.customer is None: + customer = self.billing_service.create_customer( + name=( + self.request.registry.settings["site.name"] + + " Organization - " + + self.organization.name + ), + description=self.organization.description, + ) + stripe_customer = self.subscription_service.add_stripe_customer( + customer_id=customer["id"], + ) + self.organization_service.add_organization_stripe_customer( + organization_id=self.organization.id, + stripe_customer_id=stripe_customer.id, + ) + return customer["id"] + return self.organization.customer.customer_id + + @property + def price_id(self): + # Get or create default subscription price with subscription service. + default_subscription_price = ( + self.subscription_service.get_or_create_default_subscription_price() + ) + # Synchronize product and price with billing service. + self.billing_service.sync_product( + default_subscription_price.subscription_product + ) + self.billing_service.sync_price(default_subscription_price) + return default_subscription_price.price_id + + @property + def return_url(self): + return urljoin( + self.request.application_url, + self.request.GET.get( + "next", self.request.route_path("manage.organizations") + ), + ) + + def create_subscription(self): + # Create checkout session. + checkout_session = self.billing_service.create_checkout_session( + customer_id=self.customer_id, + price_ids=[self.price_id], + success_url=self.return_url, + cancel_url=self.return_url, + ) + create_subscription_url = checkout_session["url"] + if isinstance(self.billing_service, MockStripeBillingService): + # Use local mock of billing UI. + create_subscription_url = self.request.route_path( + "mock.billing.checkout-session", + organization_name=self.organization.normalized_name, + ) + return HTTPSeeOther(create_subscription_url) + + def manage_subscription(self): + portal_session = self.billing_service.create_portal_session( + customer_id=self.customer_id, + return_url=self.return_url, + ) + manage_subscription_url = portal_session["url"] + if isinstance(self.billing_service, MockStripeBillingService): + # Use local mock of billing UI. + manage_subscription_url = self.request.route_path( + "mock.billing.portal-session", + organization_name=self.organization.normalized_name, + ) + return HTTPSeeOther(manage_subscription_url) + + @view_config( + route_name="manage.organization.activate_subscription", + renderer="manage/organization/activate_subscription.html", + ) + def activate_subscription(self): + return {"organization": self.organization} + + @view_config(route_name="manage.organization.subscription") + def create_or_manage_subscription(self): + # Organizations must be enabled. + if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): + raise HTTPNotFound() + + if not self.organization.subscriptions: + # Create subscription if there are no existing subscription. + return self.create_subscription() + else: + # Manage subscription if there is an existing subscription. + return self.manage_subscription() + + @view_defaults( route_name="manage.organization.teams", context=Organization, renderer="manage/organization/teams.html", uses_session=True, + require_active_organization=True, require_csrf=True, require_methods=False, permission="manage:organization", @@ -1556,14 +1685,14 @@ def default_response(self): @view_config(request_method="GET", permission="view:organization") def manage_teams(self): if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound + raise HTTPNotFound() return self.default_response @view_config(request_method="POST") def create_team(self): if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound + raise HTTPNotFound() # Get and validate form from default response. default_response = self.default_response @@ -1621,6 +1750,7 @@ def create_team(self): context=Organization, renderer="manage/organization/projects.html", uses_session=True, + require_active_organization=True, require_csrf=True, require_methods=False, permission="manage:organization", @@ -1676,16 +1806,10 @@ def default_response(self): @view_config(request_method="GET", permission="view:organization") def manage_organization_projects(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - return self.default_response @view_config(request_method="POST", permission="add:project") def add_organization_project(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - # Get and validate form from default response. default_response = self.default_response form = default_response["add_organization_project_form"] @@ -1787,6 +1911,7 @@ def add_organization_project(self): context=Organization, renderer="manage/organization/roles.html", uses_session=True, + require_active_organization=True, require_methods=False, permission="view:organization", has_translations=True, @@ -1795,9 +1920,6 @@ def add_organization_project(self): def manage_organization_roles( organization, request, _form_class=CreateOrganizationRoleForm ): - if request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - organization_service = request.find_service(IOrganizationService, context=None) user_service = request.find_service(IUserService, context=None) form = _form_class( @@ -1935,6 +2057,7 @@ def manage_organization_roles( route_name="manage.organization.revoke_invite", context=Organization, uses_session=True, + require_active_organization=True, require_methods=["POST"], permission="manage:organization", has_translations=True, @@ -2015,6 +2138,7 @@ def revoke_organization_invitation(organization, request): route_name="manage.organization.change_role", context=Organization, uses_session=True, + require_active_organization=True, require_methods=["POST"], permission="manage:organization", has_translations=True, @@ -2090,6 +2214,7 @@ def change_organization_role( route_name="manage.organization.delete_role", context=Organization, uses_session=True, + require_active_organization=True, require_methods=["POST"], permission="view:organization", has_translations=True, @@ -2169,6 +2294,7 @@ def delete_organization_role(organization, request): context=Team, renderer="manage/team/settings.html", uses_session=True, + require_active_organization=True, require_csrf=True, require_methods=False, permission="manage:team", @@ -2197,16 +2323,10 @@ def default_response(self): @view_config(request_method="GET", permission="view:team") def manage_team(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - return self.default_response @view_config(request_method="POST", request_param=SaveTeamForm.__params__) def save_team(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - form = SaveTeamForm( self.request.POST, organization_id=self.team.organization_id, @@ -2229,9 +2349,6 @@ def save_team(self): @view_config(request_method="POST", request_param=["confirm_team_name"]) def delete_team(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - # Confirm team name. confirm_team(self.team, self.request, fail_route="manage.team.settings") @@ -2287,6 +2404,7 @@ def delete_team(self): context=Team, renderer="manage/team/projects.html", uses_session=True, + require_active_organization=True, require_csrf=True, require_methods=False, permission="manage:team", @@ -2326,9 +2444,6 @@ def default_response(self): @view_config(request_method="GET", permission="view:team") def manage_team_projects(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - return self.default_response @@ -2337,6 +2452,7 @@ def manage_team_projects(self): context=Team, renderer="manage/team/roles.html", uses_session=True, + require_active_organization=True, require_csrf=True, require_methods=False, permission="manage:team", @@ -2373,16 +2489,10 @@ def default_response(self): @view_config(request_method="GET", permission="view:team") def manage_team_roles(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - return self.default_response @view_config(request_method="POST") def create_team_role(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - # Get and validate form from default response. default_response = self.default_response form = default_response["form"] @@ -2481,9 +2591,6 @@ def create_team_role(self): permission="view:team", ) def delete_team_role(self): - if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): - raise HTTPNotFound - # Get team role. role_id = self.request.POST["role_id"] role = self.organization_service.get_team_role(role_id) diff --git a/warehouse/migrations/versions/fe2e3d22b3fa_add_billing_and_subscriptions.py b/warehouse/migrations/versions/fe2e3d22b3fa_add_billing_and_subscriptions.py new file mode 100644 --- /dev/null +++ b/warehouse/migrations/versions/fe2e3d22b3fa_add_billing_and_subscriptions.py @@ -0,0 +1,416 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +add_billing_and_subscriptions + +Revision ID: fe2e3d22b3fa +Revises: 7eaad728b806 +Create Date: 2022-09-08 21:34:59.975082 +""" + +import sqlalchemy as sa + +from alembic import op +from sqlalchemy.dialects import postgresql + +revision = "fe2e3d22b3fa" +down_revision = "7eaad728b806" + +# Note: It is VERY important to ensure that a migration does not lock for a +# long period of time and to ensure that each individual migration does +# not break compatibility with the *previous* version of the code base. +# This is because the migrations will be ran automatically as part of the +# deployment process, but while the previous version of the code is still +# up and running. Thus backwards incompatible changes must be broken up +# over multiple migrations inside of multiple pull requests in order to +# phase them in over multiple deploys. +# +# By default, migrations cannot wait more than 4s on acquiring a lock +# and each individual statement cannot take more than 5s. This helps +# prevent situations where a slow migration takes the entire site down. +# +# If you need to increase this timeout for a migration, you can do so +# by adding: +# +# op.execute("SET statement_timeout = 5000") +# op.execute("SET lock_timeout = 4000") +# +# To whatever values are reasonable for this migration as part of your +# migration. + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "stripe_customers", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("customer_id", sa.Text(), nullable=False), + sa.Column("billing_email", sa.Text(), nullable=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("customer_id"), + ) + op.create_table( + "stripe_subscription_products", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("product_id", sa.Text(), nullable=True), + sa.Column("product_name", sa.Text(), nullable=False), + sa.Column("description", sa.Text(), nullable=False), + sa.Column( + "is_active", sa.Boolean(), server_default=sa.text("true"), nullable=False + ), + sa.Column("tax_code", sa.Text(), nullable=True), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "organization_projects", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("organization_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_id"], ["projects.id"], onupdate="CASCADE", ondelete="CASCADE" + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint( + "organization_id", + "project_id", + name="_organization_projects_organization_project_uc", + ), + ) + op.create_index( + "organization_projects_organization_id_idx", + "organization_projects", + ["organization_id"], + unique=False, + ) + op.create_index( + "organization_projects_project_id_idx", + "organization_projects", + ["project_id"], + unique=False, + ) + op.create_table( + "organization_stripe_customers", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("organization_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("stripe_customer_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["stripe_customer_id"], + ["stripe_customers.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint( + "organization_id", + "stripe_customer_id", + name="_organization_stripe_customers_organization_customer_uc", + ), + ) + op.create_index( + "organization_stripe_customers_organization_id_idx", + "organization_stripe_customers", + ["organization_id"], + unique=False, + ) + op.create_index( + "organization_stripe_customers_stripe_customer_id_idx", + "organization_stripe_customers", + ["stripe_customer_id"], + unique=False, + ) + op.create_table( + "stripe_subscription_prices", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("price_id", sa.Text(), nullable=True), + sa.Column("currency", sa.Text(), nullable=False), + sa.Column( + "subscription_product_id", postgresql.UUID(as_uuid=True), nullable=False + ), + sa.Column("unit_amount", sa.Integer(), nullable=False), + sa.Column( + "is_active", sa.Boolean(), server_default=sa.text("true"), nullable=False + ), + sa.Column( + "recurring", + sa.Enum( + "month", "year", "week", "day", name="stripesubscriptionpriceinterval" + ), + nullable=False, + ), + sa.Column("tax_behavior", sa.Text(), nullable=True), + sa.ForeignKeyConstraint( + ["subscription_product_id"], + ["stripe_subscription_products.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "stripe_subscriptions", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("stripe_customer_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("subscription_id", sa.Text(), nullable=False), + sa.Column( + "subscription_price_id", postgresql.UUID(as_uuid=True), nullable=False + ), + sa.Column( + "status", + sa.Enum( + "active", + "past_due", + "unpaid", + "canceled", + "incomplete", + "incomplete_expired", + "trialing", + name="stripesubscriptionstatus", + ), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["stripe_customer_id"], + ["stripe_customers.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["subscription_price_id"], + ["stripe_subscription_prices.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint( + "stripe_customer_id", + "subscription_id", + name="_stripe_subscription_customer_subscription_uc", + ), + ) + op.create_index( + "stripe_subscriptions_stripe_customer_id_idx", + "stripe_subscriptions", + ["stripe_customer_id"], + unique=False, + ) + op.create_index( + "stripe_subscriptions_subscription_id_idx", + "stripe_subscriptions", + ["subscription_id"], + unique=False, + ) + op.create_table( + "organization_stripe_subscriptions", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("organization_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("subscription_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["subscription_id"], + ["stripe_subscriptions.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint( + "organization_id", + "subscription_id", + name="_organization_stripe_subscriptions_organization_subscription_uc", + ), + ) + op.create_index( + "organization_stripe_subscriptions_organization_id_idx", + "organization_stripe_subscriptions", + ["organization_id"], + unique=False, + ) + op.create_index( + "organization_stripe_subscriptions_subscription_id_idx", + "organization_stripe_subscriptions", + ["subscription_id"], + unique=False, + ) + op.create_table( + "stripe_subscription_items", + sa.Column( + "id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("subscription_item_id", sa.Text(), nullable=True), + sa.Column("subscription_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column( + "subscription_price_id", postgresql.UUID(as_uuid=True), nullable=False + ), + sa.Column("quantity", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["subscription_id"], + ["stripe_subscriptions.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["subscription_price_id"], + ["stripe_subscription_prices.id"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id"), + ) + op.drop_index( + "organization_project_organization_id_idx", table_name="organization_project" + ) + op.drop_index( + "organization_project_project_id_idx", table_name="organization_project" + ) + op.drop_table("organization_project") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "organization_project", + sa.Column( + "id", + postgresql.UUID(), + server_default=sa.text("gen_random_uuid()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "organization_id", postgresql.UUID(), autoincrement=False, nullable=False + ), + sa.Column("project_id", postgresql.UUID(), autoincrement=False, nullable=False), + sa.ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + name="organization_project_organization_id_fkey", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_id"], + ["projects.id"], + name="organization_project_project_id_fkey", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id", name="organization_project_pkey"), + sa.UniqueConstraint( + "organization_id", + "project_id", + name="_organization_project_organization_project_uc", + ), + ) + op.create_index( + "organization_project_project_id_idx", + "organization_project", + ["project_id"], + unique=False, + ) + op.create_index( + "organization_project_organization_id_idx", + "organization_project", + ["organization_id"], + unique=False, + ) + op.drop_table("stripe_subscription_items") + op.drop_index( + "organization_stripe_subscriptions_subscription_id_idx", + table_name="organization_stripe_subscriptions", + ) + op.drop_index( + "organization_stripe_subscriptions_organization_id_idx", + table_name="organization_stripe_subscriptions", + ) + op.drop_table("organization_stripe_subscriptions") + op.drop_index( + "stripe_subscriptions_subscription_id_idx", table_name="stripe_subscriptions" + ) + op.drop_index( + "stripe_subscriptions_stripe_customer_id_idx", table_name="stripe_subscriptions" + ) + op.drop_table("stripe_subscriptions") + op.drop_table("stripe_subscription_prices") + op.drop_index( + "organization_stripe_customers_stripe_customer_id_idx", + table_name="organization_stripe_customers", + ) + op.drop_index( + "organization_stripe_customers_organization_id_idx", + table_name="organization_stripe_customers", + ) + op.drop_table("organization_stripe_customers") + op.drop_index( + "organization_projects_project_id_idx", table_name="organization_projects" + ) + op.drop_index( + "organization_projects_organization_id_idx", table_name="organization_projects" + ) + op.drop_table("organization_projects") + op.drop_table("stripe_subscription_products") + op.drop_table("stripe_customers") + # ### end Alembic commands ### diff --git a/warehouse/mock/__init__.py b/warehouse/mock/__init__.py new file mode 100644 --- /dev/null +++ b/warehouse/mock/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/warehouse/mock/billing.py b/warehouse/mock/billing.py new file mode 100644 --- /dev/null +++ b/warehouse/mock/billing.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import random + +from string import ascii_letters, digits + +from pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther +from pyramid.view import view_config, view_defaults + +from warehouse.admin.flags import AdminFlagValue +from warehouse.api.billing import handle_billing_webhook_event +from warehouse.organizations.models import Organization +from warehouse.subscriptions.interfaces import IBillingService +from warehouse.subscriptions.services import MockStripeBillingService + + +@view_defaults( + context=Organization, + uses_session=True, + require_methods=False, + permission="manage:billing", + has_translations=True, + require_reauth=True, +) +class MockBillingViews: + def __init__(self, organization, request): + billing_service = request.find_service(IBillingService, context=None) + if request.flags.enabled( + AdminFlagValue.DISABLE_ORGANIZATIONS + ) or not isinstance(billing_service, MockStripeBillingService): + raise HTTPNotFound + self.organization = organization + self.request = request + + @view_config( + route_name="mock.billing.checkout-session", + renderer="mock/billing/checkout-session.html", + ) + def mock_checkout_session(self): + return {"organization": self.organization} + + @view_config( + route_name="mock.billing.portal-session", + renderer="mock/billing/portal-session.html", + ) + def mock_portal_session(self): + return {"organization": self.organization} + + @view_config(route_name="mock.billing.trigger-checkout-session-completed") + def mock_trigger_checkout_session_completed(self): + mock_event = { + "type": "checkout.session.completed", + "data": { + "object": { + "id": ( + "mockcs_" + + "".join(random.choices(digits + ascii_letters, k=58)) + ), + "customer": ( + self.organization.customer + and self.organization.customer.customer_id + ), + "customer_email": ( + self.organization.customer + and self.organization.customer.billing_email + ), + "status": "complete", + "subscription": ( + "mocksub_" + + "".join(random.choices(digits + ascii_letters, k=24)) + ), + }, + }, + } + handle_billing_webhook_event(self.request, mock_event) + + return HTTPSeeOther(self.request.route_path("manage.organizations")) diff --git a/warehouse/organizations/__init__.py b/warehouse/organizations/__init__.py --- a/warehouse/organizations/__init__.py +++ b/warehouse/organizations/__init__.py @@ -17,6 +17,7 @@ from warehouse.organizations.tasks import ( delete_declined_organizations, update_organization_invitation_status, + update_organziation_subscription_usage_record, ) @@ -27,5 +28,7 @@ def includeme(config): config.add_periodic_task( crontab(minute="*/5"), update_organization_invitation_status ) - config.add_periodic_task(crontab(minute=0, hour=0), delete_declined_organizations) + config.add_periodic_task( + crontab(minute=0, hour=0), update_organziation_subscription_usage_record + ) diff --git a/warehouse/organizations/interfaces.py b/warehouse/organizations/interfaces.py --- a/warehouse/organizations/interfaces.py +++ b/warehouse/organizations/interfaces.py @@ -158,6 +158,33 @@ def delete_organization_project(organization_id, project_id): Removes an association between the specified organization and project """ + def get_organization_subscription(organization_id, subscription_id): + """ + Return the organization subscription object that represents the given + organization subscription id or None + """ + + def add_organization_subscription(organization_id, subscription_id): + """ + Adds an association between the specified organization and subscription + """ + + def delete_organization_subscription(organization_id, subscription_id): + """ + Delete association between specified organization and subscription + """ + + def get_organization_stripe_customer(organization_id): + """ + Return the organization stripe customer object that is + associated to the given organization id or None + """ + + def add_organization_stripe_customer(organization_id, stripe_customer_id): + """ + Adds an association between the specified organization and customer + """ + def get_teams_by_organization(organization_id): """ Return a list of all team objects for the specified organization, diff --git a/warehouse/organizations/models.py b/warehouse/organizations/models.py --- a/warehouse/organizations/models.py +++ b/warehouse/organizations/models.py @@ -79,14 +79,14 @@ class OrganizationRole(db.Model): class OrganizationProject(db.Model): - __tablename__ = "organization_project" + __tablename__ = "organization_projects" __table_args__ = ( - Index("organization_project_organization_id_idx", "organization_id"), - Index("organization_project_project_id_idx", "project_id"), + Index("organization_projects_organization_id_idx", "organization_id"), + Index("organization_projects_project_id_idx", "project_id"), UniqueConstraint( "organization_id", "project_id", - name="_organization_project_organization_project_uc", + name="_organization_projects_organization_project_uc", ), ) @@ -105,6 +105,68 @@ class OrganizationProject(db.Model): project = orm.relationship("Project", lazy=False) +class OrganizationStripeSubscription(db.Model): + + __tablename__ = "organization_stripe_subscriptions" + __table_args__ = ( + Index( + "organization_stripe_subscriptions_organization_id_idx", "organization_id" + ), + Index( + "organization_stripe_subscriptions_subscription_id_idx", "subscription_id" + ), + UniqueConstraint( + "organization_id", + "subscription_id", + name="_organization_stripe_subscriptions_organization_subscription_uc", + ), + ) + + __repr__ = make_repr("organization_id", "subscription_id") + + organization_id = Column( + ForeignKey("organizations.id", onupdate="CASCADE", ondelete="CASCADE"), + nullable=False, + ) + subscription_id = Column( + ForeignKey("stripe_subscriptions.id", onupdate="CASCADE", ondelete="CASCADE"), + nullable=False, + ) + + organization = orm.relationship("Organization", lazy=False) + subscription = orm.relationship("StripeSubscription", lazy=False) + + +class OrganizationStripeCustomer(db.Model): + + __tablename__ = "organization_stripe_customers" + __table_args__ = ( + Index("organization_stripe_customers_organization_id_idx", "organization_id"), + Index( + "organization_stripe_customers_stripe_customer_id_idx", "stripe_customer_id" + ), + UniqueConstraint( + "organization_id", + "stripe_customer_id", + name="_organization_stripe_customers_organization_customer_uc", + ), + ) + + __repr__ = make_repr("organization_id", "stripe_customer_id") + + organization_id = Column( + ForeignKey("organizations.id", onupdate="CASCADE", ondelete="CASCADE"), + nullable=False, + ) + stripe_customer_id = Column( + ForeignKey("stripe_customers.id", onupdate="CASCADE", ondelete="CASCADE"), + nullable=False, + ) + + organization = orm.relationship("Organization", lazy=False) + customer = orm.relationship("StripeCustomer", lazy=False) + + class OrganizationType(str, enum.Enum): Community = "Community" @@ -196,6 +258,12 @@ class Organization(HasEvents, db.Model): projects = orm.relationship( "Project", secondary=OrganizationProject.__table__, back_populates="organization", viewonly=True # type: ignore # noqa ) + customer = orm.relationship( + "StripeCustomer", secondary=OrganizationStripeCustomer.__table__, back_populates="organization", uselist=False, viewonly=True # type: ignore # noqa + ) + subscriptions = orm.relationship( + "StripeSubscription", secondary=OrganizationStripeSubscription.__table__, back_populates="organization", viewonly=True # type: ignore # noqa + ) @property def owners(self): @@ -327,6 +395,14 @@ def __acl__(self): ) return acls + @property + def active_subscription(self): + for subscription in self.subscriptions: + if not subscription.is_restricted: + return subscription + else: + return None + class OrganizationNameCatalog(db.Model): diff --git a/warehouse/organizations/services.py b/warehouse/organizations/services.py --- a/warehouse/organizations/services.py +++ b/warehouse/organizations/services.py @@ -25,10 +25,13 @@ OrganizationNameCatalog, OrganizationProject, OrganizationRole, + OrganizationStripeCustomer, + OrganizationStripeSubscription, Team, TeamProjectRole, TeamRole, ) +from warehouse.subscriptions.models import StripeSubscription, StripeSubscriptionItem NAME_FIELD = "name" @@ -313,9 +316,27 @@ def delete_organization(self, organization_id): self.db.query(OrganizationProject).filter_by(organization=organization).delete() # Delete roles self.db.query(OrganizationRole).filter_by(organization=organization).delete() + # Delete billing data if it exists + if organization.subscriptions: + for subscription in organization.subscriptions: + # Delete subscription items + self.db.query(StripeSubscriptionItem).filter_by( + subscription=subscription + ).delete() + # Delete link to organization + self.db.query(OrganizationStripeSubscription).filter_by( + subscription=subscription + ).delete() + # Delete customer link to organization + self.db.query(OrganizationStripeCustomer).filter_by( + organization=organization + ).delete() + # Delete subscription object + self.db.query(StripeSubscription).filter( + StripeSubscription.id == subscription.id + ).delete() # Delete teams (and related data) self.delete_teams_by_organization(organization_id) - # TODO: Delete any stored card data from payment processor # Delete organization self.db.delete(organization) self.db.flush() @@ -377,7 +398,7 @@ def add_organization_project(self, organization_id, project_id): def delete_organization_project(self, organization_id, project_id): """ - Performs soft delete of association between specified organization and project + Delete association between specified organization and project """ organization_project = self.get_organization_project( organization_id, project_id @@ -386,6 +407,72 @@ def delete_organization_project(self, organization_id, project_id): self.db.delete(organization_project) self.db.flush() + def get_organization_subscription(self, organization_id, subscription_id): + """ + Return the organization subscription object that represents the given + organization subscription id or None + """ + return ( + self.db.query(OrganizationStripeSubscription) + .filter( + OrganizationStripeSubscription.organization_id == organization_id, + OrganizationStripeSubscription.subscription_id == subscription_id, + ) + .first() + ) + + def add_organization_subscription(self, organization_id, subscription_id): + """ + Adds an association between the specified organization and subscription + """ + organization_subscription = OrganizationStripeSubscription( + organization_id=organization_id, + subscription_id=subscription_id, + ) + + self.db.add(organization_subscription) + self.db.flush() + + return organization_subscription + + def delete_organization_subscription(self, organization_id, subscription_id): + """ + Delete association between specified organization and subscription + """ + organization_subscription = self.get_organization_subscription( + organization_id, subscription_id + ) + + self.db.delete(organization_subscription) + self.db.flush() + + def get_organization_stripe_customer(self, organization_id): + """ + Return the organization stripe customer object that is + associated to the given organization id or None + """ + return ( + self.db.query(OrganizationStripeCustomer) + .filter( + OrganizationStripeCustomer.organization_id == organization_id, + ) + .first() + ) + + def add_organization_stripe_customer(self, organization_id, stripe_customer_id): + """ + Adds an association between the specified organization and customer + """ + organization_stripe_customer = OrganizationStripeCustomer( + organization_id=organization_id, + stripe_customer_id=stripe_customer_id, + ) + + self.db.add(organization_stripe_customer) + self.db.flush() + + return organization_stripe_customer + def get_teams_by_organization(self, organization_id): """ Return a list of all team objects for the specified organization, diff --git a/warehouse/organizations/tasks.py b/warehouse/organizations/tasks.py --- a/warehouse/organizations/tasks.py +++ b/warehouse/organizations/tasks.py @@ -19,7 +19,9 @@ Organization, OrganizationInvitation, OrganizationInvitationStatus, + OrganizationStripeSubscription, ) +from warehouse.subscriptions.interfaces import IBillingService CLEANUP_AFTER = datetime.timedelta(days=30) @@ -63,3 +65,17 @@ def delete_declined_organizations(request): additional={"deleted_by": "CRON"}, ) organization_service.delete_organization(organization.id) + + [email protected](ignore_result=True, acks_late=True) +def update_organziation_subscription_usage_record(request): + # Get organizations with a subscription + organization_subscriptions = request.db.query(OrganizationStripeSubscription).all() + + # Call the Billing API to update the usage record of this subscription item + for organization_subscription in organization_subscriptions: + billing_service = request.find_service(IBillingService, context=None) + billing_service.create_or_update_usage_record( + organization_subscription.subscription.subscription_item.subscription_item_id, # type: ignore # noqa + len(organization_subscription.organization.users), + ) diff --git a/warehouse/predicates.py b/warehouse/predicates.py --- a/warehouse/predicates.py +++ b/warehouse/predicates.py @@ -16,6 +16,9 @@ from pyramid.exceptions import ConfigurationError from pyramid.util import is_same_domain +from warehouse.admin.flags import AdminFlagValue +from warehouse.organizations.models import Organization, OrganizationType, Team + class DomainPredicate: def __init__(self, val, config): @@ -55,6 +58,46 @@ def __call__(self, context, request): return all(sub(context, request) for sub in self.sub_predicates) +class ActiveOrganizationPredicate: + def __init__(self, val, config): + self.val = bool(val) + + def text(self): + return f"require_active_organization = {self.val}" + + phash = text + + def __call__(self, context: Organization | Team, request): + """Check organizations are enabled globally and this organization is active. + + 1. `AdminFlagValue.DISABLE_ORGANIZATIONS` flag is off. + 2. `Organization.is_active` is true. + 3. `Organization.active_subscription` exists if organization is a company. + + """ + if self.val is False: + return True + + organization = ( + context if isinstance(context, Organization) else context.organization + ) + + return ( + # Organization accounts are enabled. + not request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS) + # Organization is active. + and organization.is_active + # Organization has active subscription if it is a Company. + and ( + organization.orgtype != OrganizationType.Company + or organization.active_subscription + ) + ) + + def includeme(config): config.add_route_predicate("domain", DomainPredicate) config.add_view_predicate("require_headers", HeadersPredicate) + config.add_view_predicate( + "require_active_organization", ActiveOrganizationPredicate + ) diff --git a/warehouse/routes.py b/warehouse/routes.py --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -180,6 +180,7 @@ def includeme(config): "/account/verify-project-role/", domain=warehouse, ) + # Management (views for logged-in users) config.add_route("manage.account", "/manage/account/", domain=warehouse) config.add_route( @@ -240,6 +241,20 @@ def includeme(config): traverse="/{organization_name}", domain=warehouse, ) + config.add_route( + "manage.organization.activate_subscription", + "/manage/organization/{organization_name}/subscription/activate/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) + config.add_route( + "manage.organization.subscription", + "/manage/organization/{organization_name}/subscription/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) config.add_route( "manage.organization.projects", "/manage/organization/{organization_name}/projects/", @@ -470,6 +485,7 @@ def includeme(config): ) # API URLs + config.add_route("api.billing.webhook", "/billing/webhook/", domain=warehouse) config.add_route("api.simple.index", "/simple/", domain=warehouse) config.add_route( "api.simple.detail", @@ -479,6 +495,29 @@ def includeme(config): domain=warehouse, ) + # Mock URLs + config.add_route( + "mock.billing.checkout-session", + "/mock/billing/{organization_name}/checkout/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) + config.add_route( + "mock.billing.portal-session", + "/mock/billing/{organization_name}/portal/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) + config.add_route( + "mock.billing.trigger-checkout-session-completed", + "/mock/billing/{organization_name}/checkout/completed/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) + # Legacy URLs config.add_route( "legacy.api.json.project", diff --git a/warehouse/subscriptions/__init__.py b/warehouse/subscriptions/__init__.py new file mode 100644 --- /dev/null +++ b/warehouse/subscriptions/__init__.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService +from warehouse.subscriptions.services import subscription_factory + + +def includeme(config): + # Register our subscription service + config.register_service_factory(subscription_factory, ISubscriptionService) + # Register whatever payment service provider has been configured + billing_class = config.maybe_dotted(config.registry.settings["billing.backend"]) + config.register_service_factory(billing_class.create_service, IBillingService) diff --git a/warehouse/subscriptions/interfaces.py b/warehouse/subscriptions/interfaces.py new file mode 100644 --- /dev/null +++ b/warehouse/subscriptions/interfaces.py @@ -0,0 +1,292 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from zope.interface import Interface + + +class IGenericBillingService(Interface): + def create_service(context, request): + """ + Create appropriate billing service based on environment + """ + + def get_checkout_session(session_id): + """ + Fetch the Checkout Session to based on the session_id passed to the success page + """ + + def get_customer(subscription_id): + """ + Fetch the Customer resource attached to the Subscription + """ + + def create_customer(name, description): + """ + Create the Customer resource via Billing API with the given name and description + """ + + def create_checkout_session(customer_id, price_ids, success_url, cancel_url): + """ + # Create new Checkout Session for the order + # For full details see https://stripe.com/docs/api/checkout/sessions/create + """ + + def create_portal_session(customer_id, return_url): + """ + Return customer portal session to allow customer to managing their subscription + """ + + def webhook_received(payload, sig_header): + """ + Return parsed webhook event from Stripe + """ + + def create_or_update_product(name, description, tax_code, unit_label): + """ + Create product resource via Billing API, or update an active + product resource with the same name + """ + + def create_product(name, description, tax_code, unit_label): + """ + Create and return a product resource via Billing API + """ + + def retrieve_product(product_id): + """ + Get a product resource by id via Billing API + """ + + def update_product(product_id, name, description, tax_code, unit_label): + """ + Update a product resource via Billing API + only allowing update of those attributes we use + return the updated product + """ + + def list_all_products(limit=10): + """ + Get list of all price resources via Billing API + Limit can range between 1 and 100, default is 10 + """ + + def delete_product(product_id): + """ + Delete a product resource via Billing API + """ + + def search_products(query, limit=10): + """ + Search for product resources via Billing API + example: query="active:'true'" + """ + + def sync_product(subscription_product): + """ + Synchronize a product resource via Billing API with a + subscription product from the database. + """ + + def create_or_update_price(unit_amount, currency, product_id, tax_behavior): + """ + Create price resource via Billing API, or update an active price + resource with the same product and currency + """ + + def create_price(unit_amount, currency, product_id, tax_behavior): + """ + Create and return a price resource via Billing API + """ + + def retrieve_price(price_id): + """ + Get a price resource via Billing API + """ + + def update_price(price_id, **parameters): + """ + Update a price resource by id via Billing API + only allowing update of those attributes we use + return the updated price + """ + + def list_all_prices(limit=10): + """ + Get list of all price resources via Billing API + Limit can range between 1 and 100, default is 10 + """ + + def search_prices(query, limit=10): + """ + Search for price resources via Billing API + example: query="active:'true'" + """ + + def sync_price(subscription_price): + """ + Synchronize a price resource via Billing API with a + subscription price from the database. + """ + + def cancel_subscription(subscription_id): + """ + Cancels a customer’s subscription immediately. + The customer will not be charged again for the subscription. + """ + + def create_or_update_usage_record(subscription_item_id, organization_member_count): + """ + Creates a usage record via Billing API + for a specified subscription item and date with default=now, + and fills it with a quantity=number of members in the org. + """ + + +class IBillingService(IGenericBillingService): + pass + + +class ISubscriptionService(Interface): + def get_subscription(id): + """ + Get a subscription by id + """ + + def find_subscriptionid(subscription_id): + """ + Find the unique subscription identifier for the subscription, + by the payment service provider subscription id or None + """ + + def add_subscription( + customer_id, subscription_id, subscription_item_id, billing_email + ): + """ + Attempts to create a subscription object for the organization + with the specified customer ID and subscription ID + """ + + def update_subscription_status(id, status): + """ + Update the status of a subscription object by subscription.id + """ + + def delete_subscription(id): + """ + Delete a subscription by ID + """ + + def get_subscriptions_by_customer(customer_id): + """ + Get a list of subscriptions tied to the given customer ID + """ + + def get_stripe_customer(stripe_customer_id): + """ + Get a stripe customer by id + """ + + def find_stripe_customer_id(customer_id): + """ + Get the stripe customer UUID tied to the given customer ID + """ + + def delete_customer(customer_id): + """ + Deletes a customer and all associated subscription data + """ + + def add_stripe_customer(customer_id): + """ + Create a StripeCustomer object to associate to the Stripe customer ID + """ + + def update_customer_email(customer_id, billing_email): + """ + Update the customer's billing email + """ + + def get_subscription_product(subscription_product_id): + """ + Get a product by subscription product id + """ + + def get_subscription_products(): + """ + Get a list of all subscription products + """ + + def find_subscription_productid(search_term): + """ + Find the unique product identifier for the product name, + product id or None if nothing is found + """ + + def add_subscription_product(product_name, description, product_id, tax_code): + """ + Add a subscription product + """ + + def update_subscription_product(subscription_product_id, **changes): + """ + Accepts a subscription product object + and attempts an update with those attributes + """ + + def delete_subscription_product(subscription_product_id): + """ + Delete a subscription product + """ + + def get_or_create_default_subscription_price(): + """ + Get the default subscription price or initialize one if nothing is found + """ + + def get_subscription_price(subscription_price_id): + """ + Get a subscription price by id + """ + + def get_subscription_prices(): + """ + Get a list of all subscription prices + """ + + def find_subscription_priceid(search_term): + """ + Find the unique price identifier for the price id, product id, + subscription product id or None if nothing is found + """ + + def add_subscription_price( + price_id, + currency, + subscription_product_id, + unit_amount, + recurring, + tax_behavior, + ): + """ + Add a subscription price + """ + + def update_subscription_price(subscription_price_id, **changes): + """ + Accepts a subscription price object + and attempts an update with those attributes + """ + + def delete_subscription_price(subscription_price_id): + """ + Delete a subscription price + """ diff --git a/warehouse/subscriptions/models.py b/warehouse/subscriptions/models.py new file mode 100644 --- /dev/null +++ b/warehouse/subscriptions/models.py @@ -0,0 +1,220 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum + +from sqlalchemy import ( + Boolean, + Column, + Enum, + ForeignKey, + Index, + Integer, + Text, + UniqueConstraint, + orm, + sql, +) +from sqlalchemy.dialects.postgresql import UUID + +from warehouse import db +from warehouse.i18n import localize as _ +from warehouse.organizations.models import ( + Organization, + OrganizationStripeCustomer, + OrganizationStripeSubscription, +) +from warehouse.utils.attrs import make_repr +from warehouse.utils.enum import StrLabelEnum + + +class StripeSubscriptionStatus(StrLabelEnum): + # Name = "value", _("Label") + Active = "active", _("Active") + PastDue = "past_due", _("Past Due") + Unpaid = "unpaid", _("Unpaid") + Canceled = "canceled", _("Canceled") + Incomplete = "incomplete", _("Incomplete") + IncompleteExpired = "incomplete_expired", _("Incomplete Expired") + Trialing = "trialing", _("Trialing") + + @classmethod + def has_value(cls, value): + return value in set(item.value for item in StripeSubscriptionStatus) + + +class StripeSubscriptionPriceInterval(str, enum.Enum): + + Month = "month" + Year = "year" + Week = "week" + Day = "day" + + +class StripeCustomer(db.Model): + + __tablename__ = "stripe_customers" + + __repr__ = make_repr("customer_id", "billing_email") + + customer_id = Column( + Text, nullable=False, unique=True + ) # generated by Payment Service Provider + billing_email = Column(Text) + + organization = orm.relationship( + Organization, + secondary=OrganizationStripeCustomer.__table__, # type: ignore + back_populates="customer", + uselist=False, + viewonly=True, + ) + subscriptions = orm.relationship("StripeSubscription", lazy=False) + + +class StripeSubscription(db.Model): + + __tablename__ = "stripe_subscriptions" + __table_args__ = ( + Index("stripe_subscriptions_stripe_customer_id_idx", "stripe_customer_id"), + Index("stripe_subscriptions_subscription_id_idx", "subscription_id"), + UniqueConstraint( + "stripe_customer_id", + "subscription_id", + name="_stripe_subscription_customer_subscription_uc", + ), + ) + + __repr__ = make_repr("subscription_id", "stripe_customer_id") + + stripe_customer_id = Column( + UUID(as_uuid=True), + ForeignKey("stripe_customers.id", onupdate="CASCADE", ondelete="CASCADE"), + nullable=False, + ) + subscription_id = Column( + Text, nullable=False + ) # generated by Payment Service Provider + subscription_price_id = Column( + UUID(as_uuid=True), + ForeignKey( + "stripe_subscription_prices.id", onupdate="CASCADE", ondelete="CASCADE" + ), + nullable=False, + ) + status = Column( + Enum(StripeSubscriptionStatus, values_callable=lambda x: [e.value for e in x]), + nullable=False, + ) + + subscription_price = orm.relationship("StripeSubscriptionPrice", lazy=False) + subscription_item = orm.relationship( + "StripeSubscriptionItem", + back_populates="subscription", + lazy=False, + uselist=False, + ) + organization = orm.relationship( + Organization, + secondary=OrganizationStripeSubscription.__table__, # type: ignore + back_populates="subscriptions", + uselist=False, + viewonly=True, + ) + customer = orm.relationship( + "StripeCustomer", + back_populates="subscriptions", + lazy=False, + uselist=False, + ) + + @property + def is_restricted(self): + return ( + self.status != StripeSubscriptionStatus.Active.value + and self.status != StripeSubscriptionStatus.Trialing.value + ) + + +class StripeSubscriptionProduct(db.Model): + + __tablename__ = "stripe_subscription_products" + + __repr__ = make_repr("product_name") + + product_id = Column(Text, nullable=True) # generated by Payment Service Provider + product_name = Column(Text, nullable=False) + description = Column(Text, nullable=False) + is_active = Column(Boolean, nullable=False, server_default=sql.true()) + tax_code = Column(Text, nullable=True) # https://stripe.com/docs/tax/tax-categories + + +class StripeSubscriptionPrice(db.Model): + + __tablename__ = "stripe_subscription_prices" + + __repr__ = make_repr("price_id", "unit_amount", "recurring") + + price_id = Column(Text, nullable=True) # generated by Payment Service Provider + currency = Column(Text, nullable=False) # https://stripe.com/docs/currencies + subscription_product_id = Column( + UUID(as_uuid=True), + ForeignKey( + "stripe_subscription_products.id", onupdate="CASCADE", ondelete="CASCADE" + ), + nullable=False, + ) + unit_amount = Column(Integer, nullable=False) # positive integer in cents + is_active = Column(Boolean, nullable=False, server_default=sql.true()) + recurring = Column( + Enum( + StripeSubscriptionPriceInterval, + values_callable=lambda x: [e.value for e in x], + ), + nullable=False, + ) + tax_behavior = Column( + Text, nullable=True + ) # TODO: Enum? inclusive, exclusive, unspecified + + subscription_product = orm.relationship("StripeSubscriptionProduct", lazy=False) + + +class StripeSubscriptionItem(db.Model): + + __tablename__ = "stripe_subscription_items" + + __repr__ = make_repr( + "subscription_item_id", "subscription_id", "subscription_price_id", "quantity" + ) + + subscription_item_id = Column( + Text, nullable=True + ) # generated by Payment Service Provider + subscription_id = Column( + UUID(as_uuid=True), + ForeignKey("stripe_subscriptions.id", onupdate="CASCADE", ondelete="CASCADE"), + nullable=False, + ) + subscription_price_id = Column( + UUID(as_uuid=True), + ForeignKey( + "stripe_subscription_prices.id", onupdate="CASCADE", ondelete="CASCADE" + ), + nullable=False, + ) + quantity = Column(Integer, nullable=False) # positive integer or zero + + subscription = orm.relationship( + "StripeSubscription", lazy=False, back_populates="subscription_item" + ) + subscription_price = orm.relationship("StripeSubscriptionPrice", lazy=False) diff --git a/warehouse/subscriptions/services.py b/warehouse/subscriptions/services.py new file mode 100644 --- /dev/null +++ b/warehouse/subscriptions/services.py @@ -0,0 +1,738 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +from string import ascii_letters, digits + +import stripe + +from sqlalchemy import or_ +from sqlalchemy.orm.exc import NoResultFound +from zope.interface import implementer + +from warehouse.organizations.models import ( + OrganizationStripeCustomer, + OrganizationStripeSubscription, +) +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService +from warehouse.subscriptions.models import ( + StripeCustomer, + StripeSubscription, + StripeSubscriptionItem, + StripeSubscriptionPrice, + StripeSubscriptionPriceInterval, + StripeSubscriptionProduct, + StripeSubscriptionStatus, +) + + +class GenericBillingService: + def __init__(self, api, publishable_key, webhook_secret): + self.api = api + self.publishable_key = publishable_key + self.webhook_secret = webhook_secret + + @classmethod + def create_service(cls, context, request): + """ + Create appropriate billing service based on environment + """ + raise NotImplementedError + + def get_checkout_session(self, session_id, **kwargs): + """ + Fetch the Checkout Session to based on the session_id passed to the success page + """ + checkout_session = self.api.checkout.Session.retrieve( + session_id, + expand=["customer", "line_items", "subscription"], + ) + return checkout_session + + def get_customer(self, subscription_id): + """ + Fetch the Customer resource attached to the Subscription + """ + subscription = self.api.Subscription.retrieve( + subscription_id, + expand=["customer"], + ) + return subscription.customer + + def create_customer(self, name, description): + """ + Create the Customer resource via Billing API with the given name and description + """ + return self.api.Customer.create( + name=name, + description=description, + ) + + def create_checkout_session(self, customer_id, price_ids, success_url, cancel_url): + """ + # Create new Checkout Session for the order + # For full details see https://stripe.com/docs/api/checkout/sessions/create + """ + checkout_session = self.api.checkout.Session.create( + customer=customer_id, + success_url=success_url, + cancel_url=cancel_url, + mode="subscription", + line_items=[{"price": price_id} for price_id in price_ids], + # Uncomment `automatic_tax` to calculate tax automatically. + # Requires active tax settings on Stripe Dashboard. + # https://dashboard.stripe.com/settings/tax/activate + # automatic_tax={"enabled": True}, + ) + return checkout_session + + def create_portal_session(self, customer_id, return_url): + """ + Return customer portal session to allow customer to managing their subscription + """ + portal_session = self.api.billing_portal.Session.create( + customer=customer_id, + return_url=return_url, + ) + return portal_session + + # See Stripe webhook documentation: + # https://stripe.com/docs/api/webhook_endpoints/create#create_webhook_endpoint-enabled_events + # https://stripe.com/docs/webhooks/quickstart + def webhook_received(self, payload, sig_header): + """ + Return parsed webhook event from Stripe + """ + return stripe.Webhook.construct_event(payload, sig_header, self.webhook_secret) + + def create_or_update_product(self, name, description, tax_code, unit_label): + """ + Create product resource via Billing API, or update an active + product resource with the same name + """ + # Search for active product with the given name. + # (a) Search exact or substring match for name as supported by Stripe API. + # https://stripe.com/docs/search#query-fields-for-products + product_search = self.search_products(f'active:"true" name:"{name}"') + # (b) Make sure name is an exact match. + products = [ + product for product in product_search["data"] if product["name"] == name + ] + if products: + product = max(products, key=lambda p: p["created"]) + return self.update_product( + product["id"], name, description, tax_code, unit_label + ) + else: + return self.create_product(name, description, tax_code, unit_label) + + def create_product(self, name, description, tax_code, unit_label): + """ + Create and return a product resource via Billing API + """ + return self.api.Product.create( + name=name, + description=description, + tax_code=tax_code, + unit_label=unit_label, + ) + + def retrieve_product(self, product_id): + """ + Get a product resource by id via Billing API + """ + return self.api.Product.retrieve(product_id) + + def update_product(self, product_id, name, description, tax_code, unit_label): + """ + Update a product resource via Billing API + only allowing update of those attributes we use + return the updated product + """ + return self.api.Product.modify( + product_id, + name=name, + description=description, + tax_code=tax_code, + unit_label=unit_label, + ) + + def list_all_products(self, limit=10): + """ + Get list of all price resources via Billing API + Limit can range between 1 and 100, default is 10 + """ + return self.api.Product.list(limit=limit) + + def delete_product(self, product_id): + """ + Delete a product resource via Billing API + """ + return self.api.Product.delete(product_id) + + def search_products(self, query, limit=10): + """ + Search for product resources via Billing API + example: query="active:'true'" + """ + return self.api.Product.search(query=query, limit=limit) + + def sync_product(self, subscription_product): + """ + Synchronize a product resource via Billing API with a + subscription product from the database. + """ + product = self.create_or_update_product( + name=subscription_product.product_name, + description=subscription_product.description, + # See Stripe docs for tax codes. https://stripe.com/docs/tax/tax-categories + tax_code=subscription_product.tax_code, + unit_label="user", + ) + subscription_product.product_id = product["id"] + + def create_or_update_price(self, unit_amount, currency, product_id, tax_behavior): + """ + Create price resource via Billing API, or update an active price + resource with the same product and currency + """ + # Search for active price that match all non-updatable fields. + # (a) Use query fields supported by Stripe API. + # https://stripe.com/docs/search#query-fields-for-prices + price_search = self.search_prices( + f'active:"true" product:"{product_id}" currency:"{currency}"' + ) + # (b) Filter for other fields not supported by Stripe API. + prices = [p for p in price_search["data"] if p["unit_amount"] == unit_amount] + # Create new price if no match found. + if not prices: + return self.create_price( + unit_amount, + currency, + product_id, + tax_behavior, + ) + # Update most recent matching price and archive other matching prices. + # https://stripe.com/docs/api/prices/update + [*others, price] = sorted(prices, key=lambda p: p["created"]) + for other in others: + self.update_price(other["id"], active=False) + return self.update_price(price["id"], tax_behavior=tax_behavior) + + def create_price(self, unit_amount, currency, product_id, tax_behavior): + """ + Create and return a price resource via Billing API + """ + return self.api.Price.create( + unit_amount=unit_amount, + currency=currency, + recurring={ + # Hardcode 1 month. Different interval does not make sense with metered. + "interval": "month", + # Set "metered" and "max" to enable Stripe usage records. + # https://stripe.com/docs/products-prices/pricing-models#aggregate-metered-usage + "usage_type": "metered", + "aggregate_usage": "max", + }, + product=product_id, + tax_behavior=tax_behavior, + ) + + def retrieve_price(self, price_id): + """ + Get a price resource via Billing API + """ + return self.api.Price.retrieve(price_id) + + def update_price(self, price_id, **parameters): + """ + Update a price resource by id via Billing API + only allowing update of those attributes we use + return the updated price + """ + return self.api.Price.modify(price_id, **parameters) + + def list_all_prices(self, limit=10): + """ + Get list of all price resources via Billing API + Limit can range between 1 and 100, default is 10 + """ + return self.api.Price.list(limit=limit) + + def search_prices(self, query, limit=10): + """ + Search for price resources via Billing API + example: query="active:'true'" + """ + return self.api.Price.search(query=query, limit=limit) + + def sync_price(self, subscription_price): + """ + Synchronize a price resource via Billing API with a + subscription price from the database. + """ + price = self.create_or_update_price( + unit_amount=subscription_price.unit_amount, + currency=subscription_price.currency, + product_id=subscription_price.subscription_product.product_id, + tax_behavior=subscription_price.tax_behavior, + ) + subscription_price.price_id = price["id"] + + def cancel_subscription(self, subscription_id): + """ + Cancels a customer’s subscription immediately. + The customer will not be charged again for the subscription. + """ + return self.api.Subscription.delete(subscription_id) + + def create_or_update_usage_record( + self, subscription_item_id, organization_member_count + ): + """ + Creates a usage record via Billing API + for a specified subscription item and date with default=now, + and fills it with a quantity=number of members in the org. + """ + return self.api.SubscriptionItem.create_usage_record( + subscription_item_id, + action="set", + quantity=organization_member_count, + ) + + +@implementer(IBillingService) +class MockStripeBillingService(GenericBillingService): + @classmethod + def create_service(cls, context, request): + # Override api_base to hit mock-stripe in development + stripe.api_base = request.registry.settings["billing.api_base"] + stripe.api_version = request.registry.settings["billing.api_version"] + stripe.api_key = "sk_test_123" + publishable_key = "pk_test_123" + webhook_secret = "whsec_123" + + return cls(stripe, publishable_key, webhook_secret) + + def create_customer(self, name, description): + # Mock Stripe doesn't return a customer_id so create a mock id by default + customer = super().create_customer(name, description) + customer["id"] = "mockcus_" + "".join( + random.choices(digits + ascii_letters, k=14) + ) + return customer + + def get_checkout_session(self, session_id, mock_checkout_session={}, **kwargs): + # Mock Stripe doesn't persist data so allow passing in a mock_checkout_session. + checkout_session = super().get_checkout_session(session_id) + # Fill in customer ID, status, and subscription ID from mock_checkout_session. + checkout_session["customer"]["id"] = mock_checkout_session.get( + "customer", checkout_session["customer"]["id"] + ) + checkout_session["customer"]["email"] = mock_checkout_session.get( + "customer_email", checkout_session["customer"]["email"] + ) + checkout_session["status"] = mock_checkout_session.get( + "status", checkout_session["status"] + ) + checkout_session["subscription"]["id"] = mock_checkout_session.get( + "subscription", checkout_session["subscription"]["id"] + ) + return checkout_session + + +@implementer(IBillingService) +class StripeBillingService(GenericBillingService): + @classmethod + def create_service(cls, context, request): + stripe.api_version = request.registry.settings["billing.api_version"] + stripe.api_key = request.registry.settings["billing.secret_key"] + publishable_key = request.registry.settings["billing.publishable_key"] + webhook_secret = request.registry.settings["billing.webhook_key"] + + return cls(stripe, publishable_key, webhook_secret) + + +@implementer(ISubscriptionService) +class StripeSubscriptionService: + def __init__(self, db_session): + self.db = db_session + + def get_subscription(self, id): + """ + Get a subscription by id + """ + return self.db.query(StripeSubscription).get(id) + + def find_subscriptionid(self, subscription_id): + """ + Find the unique subscription identifier for the subscription, + by the payment service provider subscription id or None + """ + try: + (id,) = ( + self.db.query(StripeSubscription.id) + .filter( + StripeSubscription.subscription_id == subscription_id, + ) + .one() + ) + except NoResultFound: + return + + return id + + def add_subscription( + self, customer_id, subscription_id, subscription_item_id, billing_email + ): + """ + Attempts to create a subscription object for the organization + with the specified customer ID and subscription ID + """ + # Get default subscription price. + subscription_price = self.get_or_create_default_subscription_price() + + # Get the stripe customer. + stripe_customer = self.get_stripe_customer( + self.find_stripe_customer_id(customer_id) + ) + + # Set the billing email + stripe_customer.billing_email = billing_email + + # Add new subscription. + subscription = StripeSubscription( + stripe_customer_id=stripe_customer.id, + subscription_id=subscription_id, + subscription_price_id=subscription_price.id, + status=StripeSubscriptionStatus.Active, # default active subscription + ) + + # Get the organization stripe customer. + organization_stripe_customer = ( + self.db.query(OrganizationStripeCustomer) + .filter(OrganizationStripeCustomer.stripe_customer_id == stripe_customer.id) + .one() + ) + + # Link to organization. + organization_subscription = OrganizationStripeSubscription( + organization=organization_stripe_customer.organization, + subscription=subscription, + ) + + self.db.add(subscription) + self.db.add(organization_subscription) + self.db.flush() # get back the subscription id + + # Create new subscription item. + subscription_item = StripeSubscriptionItem( + subscription_item_id=subscription_item_id, + subscription_id=subscription.id, + subscription_price_id=subscription_price.id, + quantity=len(organization_stripe_customer.organization.users), + ) + + self.db.add(subscription_item) + self.db.flush() + + return subscription + + def update_subscription_status(self, id, status): + """ + Update the status of a subscription object by subscription.id + """ + self.db.query(StripeSubscription).filter( + StripeSubscription.id == id, + ).update({StripeSubscription.status: status}) + + def delete_subscription(self, id): + """ + Delete a subscription by ID + """ + subscription = self.get_subscription(id) + + # Delete link to organization + self.db.query(OrganizationStripeSubscription).filter_by( + subscription=subscription + ).delete() + + # Delete subscription items + self.db.query(StripeSubscriptionItem).filter_by( + subscription=subscription + ).delete() + + self.db.delete(subscription) + self.db.flush() + + def get_subscriptions_by_customer(self, customer_id): + """ + Get a list of subscriptions tied to the given customer ID + """ + stripe_customer_id = self.find_stripe_customer_id(customer_id) + return ( + self.db.query(StripeSubscription) + .filter(StripeSubscription.stripe_customer_id == stripe_customer_id) + .all() + ) + + def get_stripe_customer(self, stripe_customer_id): + """ + Get a stripe customer by id + """ + return self.db.query(StripeCustomer).get(stripe_customer_id) + + def find_stripe_customer_id(self, customer_id): + """ + Get the stripe customer UUID tied to the given customer ID + """ + try: + (id,) = ( + self.db.query(StripeCustomer.id) + .filter( + StripeCustomer.customer_id == customer_id, + ) + .one() + ) + except NoResultFound: + return + + return id + + def delete_customer(self, customer_id): + """ + Deletes a customer and all associated subscription data + """ + subscriptions = self.get_subscriptions_by_customer(customer_id) + + for subscription in subscriptions: + self.delete_subscription(subscription.id) + + stripe_customer_id = self.find_stripe_customer_id(customer_id) + # Delete OrganizationStripeCustomer association + self.db.query(OrganizationStripeCustomer).filter( + OrganizationStripeCustomer.stripe_customer_id == stripe_customer_id + ).delete() + + # Delete StripeCustomer object + self.db.query(StripeCustomer).filter( + StripeCustomer.id == stripe_customer_id + ).delete() + + def add_stripe_customer(self, customer_id): + """ + Create a StripeCustomer object to associate to the Stripe customer ID + """ + stripe_customer = StripeCustomer( + customer_id=customer_id, + ) + + self.db.add(stripe_customer) + self.db.flush() + + return stripe_customer + + def update_customer_email(self, customer_id, billing_email): + """ + Update the customer's billing email + """ + stripe_customer_id = self.find_stripe_customer_id(customer_id) + self.db.query(StripeCustomer).filter( + StripeCustomer.id == stripe_customer_id, + ).update({StripeCustomer.billing_email: billing_email}) + + def get_subscription_product(self, subscription_product_id): + """ + Get a product by subscription product id + """ + return self.db.query(StripeSubscriptionProduct).get(subscription_product_id) + + def get_subscription_products(self): + """ + Get a list of all products + """ + return ( + self.db.query(StripeSubscriptionProduct) + .order_by(StripeSubscriptionProduct.product_name) + .all() + ) + + def find_subscription_productid(self, search_term): + """ + Find the unique product identifier for the product name, + product id or None if nothing is found + """ + try: + (subscription_product_id,) = ( + self.db.query(StripeSubscriptionProduct.id) + .filter( + or_( + StripeSubscriptionProduct.product_name == search_term, + StripeSubscriptionProduct.product_id == search_term, + ) + ) + .one() + ) + except NoResultFound: + return + + return subscription_product_id + + def add_subscription_product(self, product_name, description, product_id, tax_code): + """ + Add a subscription product + """ + subscription_product = StripeSubscriptionProduct( + product_name=product_name, + description=description, + product_id=product_id, + tax_code=tax_code, + ) + + self.db.add(subscription_product) + self.db.flush() + + return subscription_product + + def update_subscription_product(self, subscription_product_id, **changes): + """ + Accepts a subscription product object + and attempts an update with those attributes + """ + subscription_product = self.get_subscription_product(subscription_product_id) + for attr, value in changes.items(): + setattr(subscription_product, attr, value) + + return subscription_product + + def delete_subscription_product(self, subscription_product_id): + """ + Delete a subscription product + """ + subscription_product = self.get_subscription_product(subscription_product_id) + + self.db.delete(subscription_product) + self.db.flush() + + def get_or_create_default_subscription_price(self): + """ + Get the default subscription price or initialize one if nothing is found + """ + try: + subscription_price = ( + self.db.query(StripeSubscriptionPrice) + .filter(StripeSubscriptionPrice.is_active) + .one() + ) + except NoResultFound: + subscription_product = self.add_subscription_product( + product_name="PyPI", + description="Organization account for companies", + product_id=None, + # See Stripe docs for tax codes. https://stripe.com/docs/tax/tax-categories # noqa: E501 + tax_code="txcd_10103001", # Software as a service (SaaS) - business use + ) + subscription_price = self.add_subscription_price( + price_id=None, + currency="usd", + subscription_product_id=subscription_product.id, + unit_amount=700, + recurring=StripeSubscriptionPriceInterval.Month, + tax_behavior="inclusive", + ) + + return subscription_price + + def get_subscription_price(self, subscription_price_id): + """ + Get a subscription price by id + """ + return self.db.query(StripeSubscriptionPrice).get(subscription_price_id) + + def get_subscription_prices(self): + """ + Get a list of all subscription prices + """ + return ( + self.db.query(StripeSubscriptionPrice) + .order_by(StripeSubscriptionPrice.id) + .all() + ) + + def find_subscription_priceid(self, search_term): + """ + Find the unique price identifier for the price id, + subscription product id or None if nothing is found + """ + try: + (subscription_price_id,) = ( + self.db.query(StripeSubscriptionPrice.id) + .filter( + StripeSubscriptionPrice.price_id == search_term, + ) + .one() + ) + except NoResultFound: + return + + return subscription_price_id + + def add_subscription_price( + self, + price_id, + currency, + subscription_product_id, + unit_amount, + recurring, + tax_behavior, + ): + """ + Add a subscription price + """ + subscription_price = StripeSubscriptionPrice( + price_id=price_id, + currency=currency, + subscription_product_id=subscription_product_id, + unit_amount=unit_amount, + recurring=recurring, + tax_behavior=tax_behavior, + ) + + self.db.add(subscription_price) + self.db.flush() + + return subscription_price + + def update_subscription_price(self, subscription_price_id, **changes): + """ + Accepts a subscription price object + and attempts an update with those attributes + """ + subscription_price = self.get_subscription_price(subscription_price_id) + for attr, value in changes.items(): + setattr(subscription_price, attr, value) + + return subscription_price + + def delete_subscription_price(self, subscription_price_id): + """ + Delete a subscription price + """ + subscription_price = self.get_subscription_price(subscription_price_id) + + self.db.delete(subscription_price) + self.db.flush() + + +def subscription_factory(context, request): + return StripeSubscriptionService(request.db) diff --git a/warehouse/utils/enum.py b/warehouse/utils/enum.py new file mode 100644 --- /dev/null +++ b/warehouse/utils/enum.py @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum + + +class StrLabelEnum(str, enum.Enum): + """Base class for Enum with string value and display label.""" + + label: str + + # Name = "value", _("Label") + def __new__(cls, value: str, label: str): + obj = str.__new__(cls, value) + obj._value_ = value + obj.label = label + return obj
diff --git a/tests/common/db/organizations.py b/tests/common/db/organizations.py --- a/tests/common/db/organizations.py +++ b/tests/common/db/organizations.py @@ -22,6 +22,8 @@ OrganizationProject, OrganizationRole, OrganizationRoleType, + OrganizationStripeCustomer, + OrganizationStripeSubscription, Team, TeamProjectRole, TeamProjectRoleType, @@ -32,6 +34,7 @@ from .accounts import UserFactory from .base import WarehouseFactory from .packaging import ProjectFactory +from .subscriptions import StripeCustomerFactory, StripeSubscriptionFactory fake = faker.Faker() @@ -108,6 +111,24 @@ class Meta: project = factory.SubFactory(ProjectFactory) +class OrganizationStripeSubscriptionFactory(WarehouseFactory): + class Meta: + model = OrganizationStripeSubscription + + id = factory.Faker("uuid4", cast_to=None) + organization = factory.SubFactory(OrganizationFactory) + subscription = factory.SubFactory(StripeSubscriptionFactory) + + +class OrganizationStripeCustomerFactory(WarehouseFactory): + class Meta: + model = OrganizationStripeCustomer + + id = factory.Faker("uuid4", cast_to=None) + organization = factory.SubFactory(OrganizationFactory) + customer = factory.SubFactory(StripeCustomerFactory) + + class TeamFactory(WarehouseFactory): class Meta: model = Team diff --git a/tests/common/db/subscriptions.py b/tests/common/db/subscriptions.py new file mode 100644 --- /dev/null +++ b/tests/common/db/subscriptions.py @@ -0,0 +1,80 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import factory + +from warehouse.subscriptions.models import ( + StripeCustomer, + StripeSubscription, + StripeSubscriptionItem, + StripeSubscriptionPrice, + StripeSubscriptionProduct, + StripeSubscriptionStatus, +) + +from .base import WarehouseFactory + + +class StripeCustomerFactory(WarehouseFactory): + class Meta: + model = StripeCustomer + + id = factory.Faker("uuid4", cast_to=None) + customer_id = factory.Faker("uuid4") + billing_email = factory.Faker("safe_email") + + +class StripeSubscriptionProductFactory(WarehouseFactory): + class Meta: + model = StripeSubscriptionProduct + + id = factory.Faker("uuid4", cast_to=None) + product_id = "prod_123" + product_name = factory.Faker("pystr", max_chars=12) + description = factory.Faker("sentence") + + +class StripeSubscriptionPriceFactory(WarehouseFactory): + class Meta: + model = StripeSubscriptionPrice + + id = factory.Faker("uuid4", cast_to=None) + price_id = "price_123" + currency = "usd" + unit_amount = 2500 + recurring = "month" + + subscription_product = factory.SubFactory(StripeSubscriptionProductFactory) + + +class StripeSubscriptionFactory(WarehouseFactory): + class Meta: + model = StripeSubscription + + id = factory.Faker("uuid4", cast_to=None) + subscription_id = factory.Faker("uuid4") + status = StripeSubscriptionStatus.Active + + subscription_price = factory.SubFactory(StripeSubscriptionPriceFactory) + customer = factory.SubFactory(StripeCustomerFactory) + + +class StripeSubscriptionItemFactory(WarehouseFactory): + class Meta: + model = StripeSubscriptionItem + + id = factory.Faker("uuid4", cast_to=None) + subscription_item_id = "si_123" + quantity = 1 + + subscription = factory.SubFactory(StripeSubscriptionFactory) + subscription_price = factory.SubFactory(StripeSubscriptionPriceFactory) diff --git a/tests/conftest.py b/tests/conftest.py --- a/tests/conftest.py +++ b/tests/conftest.py @@ -24,6 +24,7 @@ import pretend import pyramid.testing import pytest +import stripe import webtest as _webtest from jinja2 import Environment, FileSystemLoader @@ -48,6 +49,8 @@ from warehouse.metrics import IMetricsService from warehouse.organizations import services as organization_services from warehouse.organizations.interfaces import IOrganizationService +from warehouse.subscriptions import services as subscription_services +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService from .common.db import Session from .common.db.accounts import EmailFactory, UserFactory @@ -123,14 +126,22 @@ def find_service(self, iface=None, context=None, name=""): @pytest.fixture def pyramid_services( - email_service, metrics, organization_service, token_service, user_service + billing_service, + email_service, + metrics, + organization_service, + subscription_service, + token_service, + user_service, ): services = _Services() # Register our global services. + services.register_service(billing_service, IBillingService, None, name="") services.register_service(email_service, IEmailSender, None, name="") services.register_service(metrics, IMetricsService, None, name="") services.register_service(organization_service, IOrganizationService, None, name="") + services.register_service(subscription_service, ISubscriptionService, None, name="") services.register_service(token_service, ITokenService, None, name="password") services.register_service(token_service, ITokenService, None, name="email") services.register_service(user_service, IUserService, None, name="") @@ -242,6 +253,7 @@ def app_config(database): "simple.backend": "warehouse.packaging.services.LocalSimpleStorage", "docs.backend": "warehouse.packaging.services.LocalDocsStorage", "sponsorlogos.backend": "warehouse.admin.services.LocalSponsorLogoStorage", + "billing.backend": "warehouse.subscriptions.services.MockStripeBillingService", "mail.backend": "warehouse.email.services.SMTPEmailSender", "malware_check.backend": ( "warehouse.malware.services.PrinterMalwareCheckService" @@ -308,6 +320,23 @@ def organization_service(db_session, remote_addr): ) [email protected] +def billing_service(app_config): + stripe.api_base = app_config.registry.settings["billing.api_base"] + stripe.api_version = app_config.registry.settings["billing.api_version"] + stripe.api_key = "sk_test_123" + return subscription_services.MockStripeBillingService( + api=stripe, + publishable_key="pk_test_123", + webhook_secret="whsec_123", + ) + + [email protected] +def subscription_service(db_session): + return subscription_services.StripeSubscriptionService(db_session) + + @pytest.fixture def token_service(app_config): return account_services.TokenService(secret="secret", salt="salt", max_age=21600) diff --git a/tests/functional/manage/test_views.py b/tests/functional/manage/test_views.py --- a/tests/functional/manage/test_views.py +++ b/tests/functional/manage/test_views.py @@ -15,7 +15,6 @@ from webob.multidict import MultiDict from warehouse.accounts.interfaces import IPasswordBreachedService, IUserService -from warehouse.admin.flags import AdminFlagValue from warehouse.manage import views from warehouse.organizations.interfaces import IOrganizationService from warehouse.organizations.models import OrganizationType @@ -55,6 +54,7 @@ def test_create_organization( user_service, organization_service, db_request, + enable_organizations, monkeypatch, ): pyramid_services.register_service(user_service, IUserService, None) @@ -79,11 +79,6 @@ def test_create_organization( ), } ) - monkeypatch.setattr( - db_request, - "flags", - pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), - ) send_email = pretend.call_recorder(lambda *a, **kw: None) monkeypatch.setattr( views, "send_admin_new_organization_requested_email", send_email @@ -95,9 +90,6 @@ def test_create_organization( db_request.POST["name"] ) - assert db_request.flags.enabled.calls == [ - pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS), - ] assert organization.name == db_request.POST["name"] assert organization.display_name == db_request.POST["display_name"] assert organization.orgtype == OrganizationType[db_request.POST["orgtype"]] diff --git a/tests/unit/api/test_billing.py b/tests/unit/api/test_billing.py new file mode 100644 --- /dev/null +++ b/tests/unit/api/test_billing.py @@ -0,0 +1,601 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import pretend +import pytest +import stripe + +from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent, HTTPNotFound + +from warehouse.api import billing + +from ...common.db.organizations import ( + OrganizationFactory, + OrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory, +) +from ...common.db.subscriptions import StripeCustomerFactory, StripeSubscriptionFactory + + +class TestHandleBillingWebhookEvent: + # checkout.session.completed + def test_handle_billing_webhook_event_checkout_complete_update( + self, db_request, subscription_service, monkeypatch, billing_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + event = { + "type": "checkout.session.completed", + "data": { + "object": { + "id": "cs_test_12345", + "customer": stripe_customer.customer_id, + "status": "complete", + "subscription": subscription.subscription_id, + }, + }, + } + + checkout_session = { + "id": "cs_test_12345", + "customer": { + "id": stripe_customer.customer_id, + "email": "[email protected]", + }, + "status": "complete", + "subscription": { + "id": subscription.subscription_id, + "items": { + "data": [{"id": "si_12345"}], + }, + }, + } + + get_checkout_session = pretend.call_recorder(lambda *a, **kw: checkout_session) + monkeypatch.setattr( + billing_service, "get_checkout_session", get_checkout_session + ) + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_checkout_complete_add( + self, db_request, subscription_service, monkeypatch, billing_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + + event = { + "type": "checkout.session.completed", + "data": { + "object": { + "id": "cs_test_12345", + "customer": stripe_customer.customer_id, + "status": "complete", + "subscription": "sub_12345", + }, + }, + } + + checkout_session = { + "id": "cs_test_12345", + "customer": { + "id": stripe_customer.customer_id, + "email": "[email protected]", + }, + "status": "complete", + "subscription": { + "id": "sub_12345", + "items": { + "data": [{"id": "si_12345"}], + }, + }, + } + + get_checkout_session = pretend.call_recorder(lambda *a, **kw: checkout_session) + monkeypatch.setattr( + billing_service, "get_checkout_session", get_checkout_session + ) + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_checkout_complete_invalid_status( + self, db_request + ): + event = { + "type": "checkout.session.completed", + "data": { + "object": { + "id": "cs_test_12345", + "customer": "cus_1234", + "status": "invalid_status", + "subscription": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_checkout_complete_invalid_customer( + self, db_request, monkeypatch, billing_service + ): + event = { + "type": "checkout.session.completed", + "data": { + "object": { + "id": "cs_test_12345", + "customer": "", + "status": "complete", + "subscription": "sub_12345", + }, + }, + } + + checkout_session = { + "id": "cs_test_12345", + "customer": { + "id": "", + "email": "[email protected]", + }, + "status": "complete", + "subscription": { + "id": "sub_12345", + "items": { + "data": [{"id": "si_12345"}], + }, + }, + } + + get_checkout_session = pretend.call_recorder(lambda *a, **kw: checkout_session) + monkeypatch.setattr( + billing_service, "get_checkout_session", get_checkout_session + ) + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_checkout_complete_invalid_subscription( + self, db_request, monkeypatch, billing_service + ): + event = { + "type": "checkout.session.completed", + "data": { + "object": { + "id": "cs_test_12345", + "customer": "cus_1234", + "status": "complete", + "subscription": "", + }, + }, + } + + checkout_session = { + "id": "cs_test_12345", + "customer": { + "id": "cus_1234", + "email": "[email protected]", + }, + "status": "complete", + "subscription": { + "id": "", + }, + } + + get_checkout_session = pretend.call_recorder(lambda *a, **kw: checkout_session) + monkeypatch.setattr( + billing_service, "get_checkout_session", get_checkout_session + ) + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + # customer.subscription.deleted + def test_handle_billing_webhook_event_subscription_deleted_update( + self, db_request, subscription_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + event = { + "type": "customer.subscription.deleted", + "data": { + "object": { + "customer": stripe_customer.customer_id, + "status": "canceled", + "id": subscription.subscription_id, + }, + }, + } + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_deleted_not_found( + self, db_request, subscription_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + + event = { + "type": "customer.subscription.deleted", + "data": { + "object": { + "customer": stripe_customer.customer_id, + "status": "canceled", + "id": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPNotFound): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_deleted_invalid_status( + self, db_request + ): + event = { + "type": "customer.subscription.deleted", + "data": { + "object": { + "customer": "cus_1234", + "status": "invalid_status", + "id": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_deleted_invalid_customer( + self, db_request + ): + event = { + "type": "customer.subscription.deleted", + "data": { + "object": { + "customer": "", + "status": "canceled", + "id": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_deleted_invalid_subscription( + self, db_request + ): + event = { + "type": "customer.subscription.deleted", + "data": { + "object": { + "customer": "cus_1234", + "status": "canceled", + "id": "", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + # customer.subscription.updated + def test_handle_billing_webhook_event_subscription_updated_update( + self, db_request, subscription_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + event = { + "type": "customer.subscription.updated", + "data": { + "object": { + "customer": stripe_customer.customer_id, + "status": "canceled", + "id": subscription.subscription_id, + }, + }, + } + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_updated_not_found( + self, db_request, subscription_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + + event = { + "type": "customer.subscription.updated", + "data": { + "object": { + "customer": stripe_customer.customer_id, + "status": "canceled", + "id": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPNotFound): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_updated_no_change( + self, db_request + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + assert subscription.status == "active" + + event = { + "type": "customer.subscription.updated", + "data": { + "object": { + "customer": stripe_customer.customer_id, + "status": "active", + "id": subscription.subscription_id, + }, + }, + } + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_updated_invalid_status( + self, db_request + ): + event = { + "type": "customer.subscription.updated", + "data": { + "object": { + "customer": "cus_1234", + "status": "invalid_status", + "id": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_updated_invalid_customer( + self, db_request + ): + event = { + "type": "customer.subscription.updated", + "data": { + "object": { + "customer": "", + "status": "canceled", + "id": "sub_12345", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_subscription_updated_invalid_subscription( + self, db_request + ): + event = { + "type": "customer.subscription.updated", + "data": { + "object": { + "customer": "cus_1234", + "status": "canceled", + "id": "", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + # customer.deleted + def test_handle_billing_webhook_event_customer_deleted( + self, db_request, subscription_service + ): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + event = { + "type": "customer.deleted", + "data": { + "object": { + "id": stripe_customer.customer_id, + }, + }, + } + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_customer_deleted_no_subscriptions( + self, db_request + ): + event = { + "type": "customer.deleted", + "data": { + "object": { + "id": "cus_12345", + }, + }, + } + + with pytest.raises(HTTPNotFound): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_customer_deleted_invalid_customer( + self, db_request + ): + event = { + "type": "customer.deleted", + "data": { + "object": { + "id": "", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_unmatched_event(self, db_request): + event = { + "type": "your.birthday", + "data": { + "object": { + "id": "day_1234", + }, + }, + } + + billing.handle_billing_webhook_event(db_request, event) + + # customer.updated + def test_handle_billing_webhook_event_customer_updated_email(self, db_request): + event = { + "type": "customer.updated", + "data": { + "object": { + "id": "cus_12345", + "email": "[email protected]", + }, + }, + } + + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_customer_updated_invalid_customer( + self, db_request + ): + event = { + "type": "customer.updated", + "data": { + "object": { + "id": "", + "email": "[email protected]", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + def test_handle_billing_webhook_event_no_billing_email(self, db_request): + event = { + "type": "customer.updated", + "data": { + "object": { + "id": "cus_12345", + "email": "", + }, + }, + } + + with pytest.raises(HTTPBadRequest): + billing.handle_billing_webhook_event(db_request, event) + + +class TestBillingWebhook: + def test_billing_webhook(self, pyramid_request, billing_service, monkeypatch): + pyramid_request.body = json.dumps({"type": "mock.webhook.payload"}) + pyramid_request.headers = {"Stripe-Signature": "mock-stripe-signature"} + + monkeypatch.setattr( + billing_service, + "webhook_received", + lambda p, s: json.loads(p), + ) + + monkeypatch.setattr( + billing, "handle_billing_webhook_event", lambda *a, **kw: None + ) + + result = billing.billing_webhook(pyramid_request) + + assert isinstance(result, HTTPNoContent) + + def test_billing_webhook_value_error( + self, pyramid_request, billing_service, monkeypatch + ): + pyramid_request.body = json.dumps({"type": "mock.webhook.payload"}) + pyramid_request.headers = {"Stripe-Signature": "mock-stripe-signature"} + + def webhook_received(payload, sig_header): + raise ValueError() + + monkeypatch.setattr(billing_service, "webhook_received", webhook_received) + + with pytest.raises(HTTPBadRequest): + billing.billing_webhook(pyramid_request) + + def test_billing_webhook_signature_error( + self, pyramid_request, billing_service, monkeypatch + ): + pyramid_request.body = json.dumps({"type": "mock.webhook.payload"}) + pyramid_request.headers = {"Stripe-Signature": "mock-stripe-signature"} + + def webhook_received(payload, sig_header): + raise stripe.error.SignatureVerificationError("signature error", sig_header) + + monkeypatch.setattr(billing_service, "webhook_received", webhook_received) + + with pytest.raises(HTTPBadRequest): + billing.billing_webhook(pyramid_request) diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -75,6 +75,8 @@ OrganizationInvitationFactory, OrganizationProjectFactory, OrganizationRoleFactory, + OrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory, TeamFactory, TeamProjectRoleFactory, TeamRoleFactory, @@ -89,6 +91,11 @@ RoleInvitationFactory, UserFactory, ) +from ...common.db.subscriptions import ( + StripeCustomerFactory, + StripeSubscriptionFactory, + StripeSubscriptionPriceFactory, +) class TestManageAccount: @@ -2375,7 +2382,7 @@ def test_default_response(self, monkeypatch): def test_manage_organizations(self, monkeypatch): request = pretend.stub( find_service=lambda *a, **kw: pretend.stub(), - flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), + flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), ) default_response = {"default": "response"} @@ -2385,25 +2392,19 @@ def test_manage_organizations(self, monkeypatch): view = views.ManageOrganizationsViews(request) result = view.manage_organizations() - assert request.flags.enabled.calls == [ - pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS), - ] assert result == default_response - def test_manage_organizations_disable_organizations(self, monkeypatch): + def test_manage_organizations_disable_organizations(self): request = pretend.stub( find_service=lambda *a, **kw: pretend.stub(), - flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: True)), + flags=pretend.stub(enabled=pretend.call_recorder(lambda f: True)), ) view = views.ManageOrganizationsViews(request) with pytest.raises(HTTPNotFound): view.manage_organizations() - assert request.flags.enabled.calls == [ - pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS), - ] - def test_create_organization(self, monkeypatch): + def test_create_organization(self, enable_organizations, monkeypatch): admins = [] user_service = pretend.stub( get_admins=pretend.call_recorder(lambda *a, **kw: admins), @@ -2452,12 +2453,16 @@ def test_create_organization(self, monkeypatch): IUserService: user_service, IOrganizationService: organization_service, }[interface], - flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), + flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), remote_addr="0.0.0.0", path="request-path", ) - create_organization_obj = pretend.stub(validate=lambda: True, data=request.POST) + create_organization_obj = pretend.stub( + data=request.POST, + orgtype=pretend.stub(data=request.POST["orgtype"]), + validate=lambda: True, + ) create_organization_cls = pretend.call_recorder( lambda *a, **kw: create_organization_obj ) @@ -2477,9 +2482,164 @@ def test_create_organization(self, monkeypatch): view = views.ManageOrganizationsViews(request) result = view.create_organization() - assert request.flags.enabled.calls == [ - pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS), + assert user_service.get_admins.calls == [pretend.call()] + assert organization_service.add_organization.calls == [ + pretend.call( + name=organization.name, + display_name=organization.display_name, + orgtype=organization.orgtype, + link_url=organization.link_url, + description=organization.description, + ) + ] + assert organization_service.add_catalog_entry.calls == [ + pretend.call(organization.id) + ] + assert organization_service.add_organization_role.calls == [ + pretend.call( + organization.id, + request.user.id, + OrganizationRoleType.Owner, + ) + ] + assert organization_service.record_event.calls == [ + pretend.call( + organization.id, + tag="organization:create", + additional={"created_by_user_id": str(request.user.id)}, + ), + pretend.call( + organization.id, + tag="organization:catalog_entry:add", + additional={"submitted_by_user_id": str(request.user.id)}, + ), + pretend.call( + organization.id, + tag="organization:organization_role:invite", + additional={ + "submitted_by_user_id": str(request.user.id), + "role_name": "Owner", + "target_user_id": str(request.user.id), + }, + ), + pretend.call( + organization.id, + tag="organization:organization_role:accepted", + additional={ + "submitted_by_user_id": str(request.user.id), + "role_name": "Owner", + "target_user_id": str(request.user.id), + }, + ), ] + assert user_service.record_event.calls == [ + pretend.call( + request.user.id, + tag="account:organization_role:accepted", + additional={ + "submitted_by_user_id": str(request.user.id), + "organization_name": organization.name, + "role_name": "Owner", + }, + ), + ] + assert send_email.calls == [ + pretend.call( + request, + admins, + organization_name=organization.name, + initiator_username=request.user.username, + organization_id=organization.id, + ), + pretend.call( + request, + request.user, + organization_name=organization.name, + ), + ] + assert isinstance(result, HTTPSeeOther) + + def test_create_organization_with_subscription( + self, enable_organizations, monkeypatch + ): + admins = [] + user_service = pretend.stub( + get_admins=pretend.call_recorder(lambda *a, **kw: admins), + record_event=pretend.call_recorder(lambda *a, **kw: None), + ) + + organization = pretend.stub( + id=pretend.stub(), + name="psf", + normalized_name="psf", + display_name="Python Software Foundation", + orgtype="Company", + link_url="https://www.python.org/psf/", + description=( + "To promote, protect, and advance the Python programming " + "language, and to support and facilitate the growth of a " + "diverse and international community of Python programmers" + ), + is_active=False, + is_approved=None, + ) + catalog_entry = pretend.stub() + role = pretend.stub() + organization_service = pretend.stub( + add_organization=pretend.call_recorder(lambda *a, **kw: organization), + add_catalog_entry=pretend.call_recorder(lambda *a, **kw: catalog_entry), + add_organization_role=pretend.call_recorder(lambda *a, **kw: role), + record_event=pretend.call_recorder(lambda *a, **kw: None), + ) + + request = pretend.stub( + POST={ + "name": organization.name, + "display_name": organization.display_name, + "orgtype": organization.orgtype, + "link_url": organization.link_url, + "description": organization.description, + }, + domain=pretend.stub(), + user=pretend.stub( + id=pretend.stub(), + username=pretend.stub(), + has_primary_verified_email=True, + ), + session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), + find_service=lambda interface, **kw: { + IUserService: user_service, + IOrganizationService: organization_service, + }[interface], + flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), + remote_addr="0.0.0.0", + route_path=lambda *a, **kw: "manage-subscription-url", + ) + + create_organization_obj = pretend.stub( + data=request.POST, + orgtype=pretend.stub(data=request.POST["orgtype"]), + validate=lambda: True, + ) + create_organization_cls = pretend.call_recorder( + lambda *a, **kw: create_organization_obj + ) + monkeypatch.setattr(views, "CreateOrganizationForm", create_organization_cls) + + send_email = pretend.call_recorder(lambda *a, **kw: None) + monkeypatch.setattr( + views, "send_admin_new_organization_requested_email", send_email + ) + monkeypatch.setattr(views, "send_new_organization_requested_email", send_email) + + default_response = {"default": "response"} + monkeypatch.setattr( + views.ManageOrganizationsViews, "default_response", default_response + ) + + view = views.ManageOrganizationsViews(request) + result = view.create_organization() + assert user_service.get_admins.calls == [pretend.call()] assert organization_service.add_organization.calls == [ pretend.call( @@ -2556,6 +2716,7 @@ def test_create_organization(self, monkeypatch): ), ] assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "manage-subscription-url" def test_create_organization_validation_fails(self, monkeypatch): admins = [] @@ -2593,7 +2754,7 @@ def test_create_organization_validation_fails(self, monkeypatch): IUserService: user_service, IOrganizationService: organization_service, }[interface], - flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), + flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), remote_addr="0.0.0.0", ) @@ -2614,9 +2775,6 @@ def test_create_organization_validation_fails(self, monkeypatch): view = views.ManageOrganizationsViews(request) result = view.create_organization() - assert request.flags.enabled.calls == [ - pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS), - ] assert user_service.get_admins.calls == [] assert organization_service.add_organization.calls == [] assert organization_service.add_catalog_entry.calls == [] @@ -2625,18 +2783,15 @@ def test_create_organization_validation_fails(self, monkeypatch): assert send_email.calls == [] assert result == {"create_organization_form": create_organization_obj} - def test_create_organizations_disable_organizations(self, monkeypatch): + def test_create_organization_disable_organizations(self): request = pretend.stub( find_service=lambda *a, **kw: pretend.stub(), - flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: True)), + flags=pretend.stub(enabled=pretend.call_recorder(lambda f: True)), ) view = views.ManageOrganizationsViews(request) with pytest.raises(HTTPNotFound): view.create_organization() - assert request.flags.enabled.calls == [ - pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS), - ] class TestManageOrganizationSettings: @@ -2684,13 +2839,6 @@ def test_manage_organization( ), ] - def test_manage_organization_disable_organizations(self, db_request): - organization = OrganizationFactory.create() - - view = views.ManageOrganizationSettingsViews(organization, db_request) - with pytest.raises(HTTPNotFound): - view.manage_organization() - def test_save_organization( self, db_request, organization_service, enable_organizations, monkeypatch ): @@ -2766,13 +2914,6 @@ def test_save_organization_validation_fails( } assert organization_service.update_organization.calls == [] - def test_save_organization_disable_organizations(self, db_request): - organization = OrganizationFactory.create() - - view = views.ManageOrganizationSettingsViews(organization, db_request) - with pytest.raises(HTTPNotFound): - view.save_organization() - def test_save_organization_name( self, db_request, @@ -2900,13 +3041,6 @@ def rename_organization(organization_id, organization_name): } assert organization_service.rename_organization.calls == [] - def test_save_organization_name_disable_organizations(self, db_request): - organization = OrganizationFactory.create(name="old-name") - - view = views.ManageOrganizationSettingsViews(organization, db_request) - with pytest.raises(HTTPNotFound): - view.save_organization_name() - def test_delete_organization( self, db_request, @@ -3009,12 +3143,301 @@ def test_delete_organization_with_active_projects( assert organization_service.delete_organization.calls == [] assert db_request.route_path.calls == [] - def test_delete_organization_disable_organizations(self, db_request): + def test_delete_organization_with_subscriptions( + self, + db_request, + pyramid_user, + organization_service, + user_service, + enable_organizations, + monkeypatch, + ): organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + db_request.POST = {"confirm_organization_name": organization.name} + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "/manage/organizations/" + ) + + monkeypatch.setattr( + organization_service, + "delete_organization", + pretend.call_recorder(lambda *a, **kw: None), + ) + + admins = [] + monkeypatch.setattr( + user_service, + "get_admins", + pretend.call_recorder(lambda *a, **kw: admins), + ) + + send_email = pretend.call_recorder(lambda *a, **kw: None) + monkeypatch.setattr(views, "send_admin_organization_deleted_email", send_email) + monkeypatch.setattr(views, "send_organization_deleted_email", send_email) + monkeypatch.setattr( + views, "organization_owners", lambda *a, **kw: [pyramid_user] + ) view = views.ManageOrganizationSettingsViews(organization, db_request) + result = view.delete_organization() + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/manage/organizations/" + assert organization_service.delete_organization.calls == [ + pretend.call(organization.id) + ] + assert send_email.calls == [ + pretend.call( + db_request, + admins, + organization_name=organization.name, + ), + pretend.call( + db_request, + {pyramid_user}, + organization_name=organization.name, + ), + ] + assert db_request.route_path.calls == [pretend.call("manage.organizations")] + + +class TestManageOrganizationBillingViews: + @pytest.fixture + def organization(self): + organization = OrganizationFactory.create() + OrganizationStripeCustomerFactory.create(organization=organization) + return organization + + @pytest.fixture + def organization_no_customer(self): + return OrganizationFactory.create() + + @pytest.fixture + def subscription(self, organization): + return StripeSubscriptionFactory.create( + stripe_customer_id=organization.customer.customer_id + ) + + @pytest.fixture + def organization_subscription(self, organization, subscription): + return OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + @pytest.fixture + def subscription_price(self): + return StripeSubscriptionPriceFactory.create() + + def test_customer_id( + self, + db_request, + subscription_service, + organization, + ): + billing_service = pretend.stub( + create_customer=lambda *a, **kw: {"id": organization.customer.customer_id}, + ) + + view = views.ManageOrganizationBillingViews(organization, db_request) + view.billing_service = billing_service + customer_id = view.customer_id + + assert customer_id == organization.customer.customer_id + + def test_customer_id_local_mock( + self, + db_request, + billing_service, + subscription_service, + organization_no_customer, + ): + db_request.registry.settings["site.name"] = "PyPI" + + view = views.ManageOrganizationBillingViews( + organization_no_customer, db_request + ) + customer_id = view.customer_id + + assert customer_id.startswith("mockcus_") + + def test_disable_organizations( + self, + db_request, + billing_service, + subscription_service, + organization, + ): + view = views.ManageOrganizationBillingViews(organization, db_request) + with pytest.raises(HTTPNotFound): - view.delete_organization() + view.create_or_manage_subscription() + + def test_activate_subscription( + self, + db_request, + organization, + enable_organizations, + ): + view = views.ManageOrganizationBillingViews(organization, db_request) + result = view.activate_subscription() + + assert result == {"organization": organization} + + def test_create_subscription( + self, + db_request, + subscription_service, + organization, + subscription_price, + enable_organizations, + monkeypatch, + ): + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "mock-session-url" + ) + + # Stub for billing service is not instance of MockStripeBillingService. + create_checkout_session = pretend.call_recorder( + lambda *a, **kw: {"url": "session-url"} + ) + + billing_service = pretend.stub( + create_checkout_session=create_checkout_session, + create_customer=lambda *a, **kw: {"id": organization.customer.customer_id}, + sync_price=lambda *a, **kw: None, + sync_product=lambda *a, **kw: None, + ) + + view = views.ManageOrganizationBillingViews(organization, db_request) + view.billing_service = billing_service + result = view.create_or_manage_subscription() + + assert create_checkout_session.calls == [ + pretend.call( + customer_id=organization.customer.customer_id, + price_ids=[subscription_price.price_id], + success_url=view.return_url, + cancel_url=view.return_url, + ), + ] + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "session-url" + + def test_create_subscription_local_mock( + self, + db_request, + billing_service, + subscription_service, + organization, + subscription_price, + enable_organizations, + monkeypatch, + ): + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "mock-session-url" + ) + + # Fixture for billing service is instance of MockStripeBillingService. + create_checkout_session = pretend.call_recorder( + lambda *a, **kw: {"url": "session-url"} + ) + monkeypatch.setattr( + billing_service, "create_checkout_session", create_checkout_session + ) + + view = views.ManageOrganizationBillingViews(organization, db_request) + result = view.create_or_manage_subscription() + + assert create_checkout_session.calls == [ + pretend.call( + customer_id=view.customer_id, + price_ids=[subscription_price.price_id], + success_url=view.return_url, + cancel_url=view.return_url, + ), + ] + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "mock-session-url" + + def test_manage_subscription( + self, + db_request, + billing_service, + subscription_service, + organization, + organization_subscription, + enable_organizations, + monkeypatch, + ): + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "mock-session-url" + ) + + # Stub for billing service is not instance of MockStripeBillingService. + create_portal_session = pretend.call_recorder( + lambda *a, **kw: {"url": "session-url"} + ) + billing_service = pretend.stub( + create_portal_session=create_portal_session, + sync_price=lambda *a, **kw: None, + sync_product=lambda *a, **kw: None, + ) + + view = views.ManageOrganizationBillingViews(organization, db_request) + view.billing_service = billing_service + result = view.create_or_manage_subscription() + + assert create_portal_session.calls == [ + pretend.call( + customer_id=organization.customer.customer_id, + return_url=view.return_url, + ), + ] + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "session-url" + + def test_manage_subscription_local_mock( + self, + db_request, + billing_service, + subscription_service, + organization, + organization_subscription, + enable_organizations, + monkeypatch, + ): + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "mock-session-url" + ) + + # Fixture for billing service is instance of MockStripeBillingService. + create_portal_session = pretend.call_recorder( + lambda *a, **kw: {"url": "session-url"} + ) + monkeypatch.setattr( + billing_service, "create_portal_session", create_portal_session + ) + + view = views.ManageOrganizationBillingViews(organization, db_request) + result = view.create_or_manage_subscription() + + assert create_portal_session.calls == [ + pretend.call( + customer_id=organization.customer.customer_id, + return_url=view.return_url, + ), + ] + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "mock-session-url" class TestManageOrganizationTeams: @@ -3173,13 +3596,6 @@ def test_manage_organization_projects( } assert len(add_organization_project_cls.calls) == 1 - def test_manage_organization_projects_disable_organizations(self, db_request): - organization = OrganizationFactory.create() - - view = views.ManageOrganizationProjectsViews(organization, db_request) - with pytest.raises(HTTPNotFound): - view.manage_organization_projects() - def test_add_organization_project_existing_project( self, db_request, @@ -3548,13 +3964,6 @@ def test_add_organization_project_new_project_name_conflict( ] assert len(organization.projects) == 1 - def test_add_organization_project_disable_organizations(self, db_request): - organization = OrganizationFactory.create() - - view = views.ManageOrganizationProjectsViews(organization, db_request) - with pytest.raises(HTTPNotFound): - view.add_organization_project() - class TestManageOrganizationRoles: def test_get_manage_organization_roles(self, db_request, enable_organizations): @@ -3574,12 +3983,6 @@ def form_class(*a, **kw): "form": form_obj, } - def test_get_manage_organization_roles_disable_organizations(self, db_request): - organization = OrganizationFactory.create(name="foobar") - - with pytest.raises(HTTPNotFound): - views.manage_organization_roles(organization, db_request) - @freeze_time(datetime.datetime.utcnow()) @pytest.mark.parametrize("orgtype", list(OrganizationType)) def test_post_new_organization_role( @@ -4466,13 +4869,6 @@ def test_manage_team( "save_team_form": form, } - def test_manage_team_disable_organizations(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamSettingsViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.manage_team() - def test_save_team(self, db_request, organization_service, enable_organizations): team = TeamFactory.create(name="Team Name") db_request.POST = MultiDict({"name": "New Team Name"}) @@ -4504,13 +4900,6 @@ def test_save_team_validation_fails( ("This team name has already been used. " "Choose a different team name.") ] - def test_save_team_disable_organizations(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamSettingsViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.save_team() - def test_delete_team( self, db_request, @@ -4595,13 +4984,6 @@ def test_delete_team_wrong_confirm( ) ] - def test_delete_organization_disable_organizations(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamSettingsViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.delete_team() - class TestManageTeamProjects: def test_manage_team_projects( @@ -4632,13 +5014,6 @@ def test_manage_team_projects( "projects_requiring_2fa": set(), } - def test_manage_team_projects_disable_teams(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamProjectsViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.manage_team_projects() - class TestManageTeamRoles: def test_manage_team_roles( @@ -4662,13 +5037,6 @@ def test_manage_team_roles( "form": form, } - def test_manage_team_roles_disable_organizations(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamRolesViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.manage_team_roles() - def test_create_team_role( self, db_request, @@ -4847,6 +5215,7 @@ def test_create_team_role_not_a_member( "roles": [], "form": form, } + assert form.username.errors == [ ( "No organization owner, manager, or member found " @@ -4854,13 +5223,6 @@ def test_create_team_role_not_a_member( ) ] - def test_create_team_role_disable_organizations(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamRolesViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.create_team_role() - def test_delete_team_role( self, db_request, @@ -5046,13 +5408,6 @@ def test_delete_team_role_not_a_manager( ] assert isinstance(result, HTTPSeeOther) - def test_delete_team_role_disable_organizations(self, db_request): - team = TeamFactory.create() - - view = views.ManageTeamRolesViews(team, db_request) - with pytest.raises(HTTPNotFound): - view.delete_team_role() - class TestManageProjects: def test_manage_projects(self, db_request): diff --git a/tests/unit/mock/__init__.py b/tests/unit/mock/__init__.py new file mode 100644 --- /dev/null +++ b/tests/unit/mock/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/mock/test_billing.py b/tests/unit/mock/test_billing.py new file mode 100644 --- /dev/null +++ b/tests/unit/mock/test_billing.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther + +from warehouse.mock import billing + +from ...common.db.organizations import OrganizationFactory + + +class TestMockBillingViews: + @pytest.fixture + def organization(self): + return OrganizationFactory.create() + + def test_disable_organizations(self, db_request, organization): + with pytest.raises(HTTPNotFound): + billing.MockBillingViews(organization, db_request) + + def test_mock_checkout_session( + self, db_request, organization, enable_organizations + ): + view = billing.MockBillingViews(organization, db_request) + result = view.mock_checkout_session() + + assert result == {"organization": organization} + + def test_mock_portal_session(self, db_request, organization, enable_organizations): + view = billing.MockBillingViews(organization, db_request) + result = view.mock_portal_session() + + assert result == {"organization": organization} + + def test_mock_trigger_checkout_session_completed( + self, db_request, organization, enable_organizations, monkeypatch + ): + monkeypatch.setattr( + db_request, + "route_path", + lambda *a, **kw: "/manage/organizations/", + ) + monkeypatch.setattr( + billing, + "handle_billing_webhook_event", + lambda *a, **kw: None, + ) + + view = billing.MockBillingViews(organization, db_request) + result = view.mock_trigger_checkout_session_completed() + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/manage/organizations/" diff --git a/tests/unit/organizations/test_init.py b/tests/unit/organizations/test_init.py --- a/tests/unit/organizations/test_init.py +++ b/tests/unit/organizations/test_init.py @@ -20,6 +20,7 @@ from warehouse.organizations.tasks import ( delete_declined_organizations, update_organization_invitation_status, + update_organziation_subscription_usage_record, ) @@ -40,4 +41,7 @@ def test_includeme(): assert config.add_periodic_task.calls == [ pretend.call(crontab(minute="*/5"), update_organization_invitation_status), pretend.call(crontab(minute=0, hour=0), delete_declined_organizations), + pretend.call( + crontab(minute=0, hour=0), update_organziation_subscription_usage_record + ), ] diff --git a/tests/unit/organizations/test_models.py b/tests/unit/organizations/test_models.py --- a/tests/unit/organizations/test_models.py +++ b/tests/unit/organizations/test_models.py @@ -27,8 +27,14 @@ OrganizationFactory as DBOrganizationFactory, OrganizationNameCatalogFactory as DBOrganizationNameCatalogFactory, OrganizationRoleFactory as DBOrganizationRoleFactory, + OrganizationStripeCustomerFactory as DBOrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory as DBOrganizationStripeSubscriptionFactory, TeamFactory as DBTeamFactory, ) +from ...common.db.subscriptions import ( + StripeCustomerFactory as DBStripeCustomerFactory, + StripeSubscriptionFactory as DBStripeSubscriptionFactory, +) class TestOrganizationFactory: @@ -289,3 +295,30 @@ def test_acl(self, db_session): ], key=lambda x: x[1], ) + + def test_active_subscription(self, db_session): + organization = DBOrganizationFactory.create() + stripe_customer = DBStripeCustomerFactory.create() + DBOrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = DBStripeSubscriptionFactory.create(customer=stripe_customer) + DBOrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + assert organization.active_subscription is not None + + def test_active_subscription_none(self, db_session): + organization = DBOrganizationFactory.create() + stripe_customer = DBStripeCustomerFactory.create() + DBOrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = DBStripeSubscriptionFactory.create( + customer=stripe_customer, + status="canceled", + ) + DBOrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + assert organization.active_subscription is None diff --git a/tests/unit/organizations/test_services.py b/tests/unit/organizations/test_services.py --- a/tests/unit/organizations/test_services.py +++ b/tests/unit/organizations/test_services.py @@ -22,23 +22,29 @@ OrganizationProject, OrganizationRole, OrganizationRoleType, + OrganizationStripeCustomer, + OrganizationStripeSubscription, OrganizationType, Team, TeamProjectRole, TeamRole, ) +from warehouse.subscriptions.models import StripeSubscription from ...common.db.organizations import ( OrganizationFactory, OrganizationInvitationFactory, OrganizationProjectFactory, OrganizationRoleFactory, + OrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory, TeamFactory, TeamProjectRoleFactory, TeamRoleFactory, UserFactory, ) from ...common.db.packaging import ProjectFactory +from ...common.db.subscriptions import StripeCustomerFactory, StripeSubscriptionFactory def test_database_organizations_factory(): @@ -320,7 +326,14 @@ def test_decline_organization(self, organization_service): def test_delete_organization(self, organization_service, db_request): organization = OrganizationFactory.create() - TeamFactory.create(organization=organization) + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) TeamFactory.create(organization=organization) organization_service.delete_organization(organization.id) @@ -353,6 +366,27 @@ def test_delete_organization(self, organization_service, db_request): .count() ) ) + assert not ( + ( + db_request.db.query(OrganizationStripeSubscription) + .filter_by(organization=organization, subscription=subscription) + .count() + ) + ) + assert not ( + ( + db_request.db.query(OrganizationStripeCustomer) + .filter_by(organization=organization, customer=stripe_customer) + .count() + ) + ) + assert not ( + ( + db_request.db.query(StripeSubscription) + .filter(StripeSubscription.id == subscription.id) + .count() + ) + ) assert not ( (db_request.db.query(Team).filter_by(organization=organization).count()) ) @@ -443,6 +477,75 @@ def test_delete_organization_project(self, organization_service, db_request): .count() ) + def test_add_organization_subscription(self, organization_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + + organization_service.add_organization_subscription( + organization.id, subscription.id + ) + assert ( + db_request.db.query(OrganizationStripeSubscription) + .filter( + OrganizationStripeSubscription.organization_id == organization.id, + OrganizationStripeSubscription.subscription_id == subscription.id, + ) + .count() + ) + + def test_delete_organization_subscription(self, organization_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + organization_service.delete_organization_subscription( + organization.id, subscription.id + ) + assert not ( + db_request.db.query(OrganizationStripeSubscription) + .filter( + OrganizationStripeSubscription.organization_id == organization.id, + OrganizationStripeSubscription.subscription_id == subscription.id, + ) + .count() + ) + + def test_get_organization_stripe_customer(self, organization_service): + organization = OrganizationFactory.create() + organization_stripe_customer = OrganizationStripeCustomerFactory.create( + organization=organization + ) + + assert ( + organization_service.get_organization_stripe_customer(organization.id) + == organization_stripe_customer + ) + + def test_add_organization_stripe_customer(self, organization_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + organization_service.add_organization_stripe_customer( + organization.id, stripe_customer.id + ) + assert ( + db_request.db.query(OrganizationStripeCustomer) + .filter( + OrganizationStripeCustomer.organization_id == organization.id, + OrganizationStripeCustomer.stripe_customer_id == stripe_customer.id, + ) + .count() + ) + def test_get_teams_by_organization(self, organization_service): organization = OrganizationFactory.create() diff --git a/tests/unit/organizations/test_tasks.py b/tests/unit/organizations/test_tasks.py --- a/tests/unit/organizations/test_tasks.py +++ b/tests/unit/organizations/test_tasks.py @@ -15,17 +15,33 @@ import pretend from warehouse.accounts.interfaces import ITokenService, TokenExpired -from warehouse.organizations.models import Organization, OrganizationInvitationStatus +from warehouse.organizations.models import ( + Organization, + OrganizationInvitationStatus, + OrganizationRoleType, +) from warehouse.organizations.tasks import ( delete_declined_organizations, update_organization_invitation_status, + update_organziation_subscription_usage_record, ) +from warehouse.subscriptions.interfaces import IBillingService from ...common.db.organizations import ( OrganizationFactory, OrganizationInvitationFactory, + OrganizationRoleFactory, + OrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory, UserFactory, ) +from ...common.db.subscriptions import ( + StripeCustomerFactory, + StripeSubscriptionFactory, + StripeSubscriptionItemFactory, + StripeSubscriptionPriceFactory, + StripeSubscriptionProductFactory, +) class TestUpdateInvitationStatus: @@ -109,3 +125,59 @@ def test_delete_declined_organizations(self, db_request): ) assert db_request.db.query(Organization).count() == 1 + + +class TestUpdateOrganizationSubscriptionUsage: + def test_update_organization_subscription_usage_record(self, db_request): + # Create an organization with a subscription and members + organization = OrganizationFactory.create() + # Add a couple members + owner_user = UserFactory.create() + OrganizationRoleFactory( + organization=organization, + user=owner_user, + role_name=OrganizationRoleType.Owner, + ) + member_user = UserFactory.create() + OrganizationRoleFactory( + organization=organization, + user=member_user, + role_name=OrganizationRoleType.Member, + ) + # Wire up the customer, subscripton, organization, and subscription item + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription_product = StripeSubscriptionProductFactory.create() + subscription_price = StripeSubscriptionPriceFactory.create( + subscription_product=subscription_product + ) + subscription = StripeSubscriptionFactory.create( + customer=stripe_customer, + subscription_price=subscription_price, + ) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + StripeSubscriptionItemFactory.create(subscription=subscription) + + create_or_update_usage_record = pretend.call_recorder( + lambda *a, **kw: { + "subscription_item_id": "si_1234", + "organization_member_count": "5", + } + ) + billing_service = pretend.stub( + create_or_update_usage_record=create_or_update_usage_record, + ) + + db_request.find_service = pretend.call_recorder( + lambda *a, **kw: billing_service + ) + + update_organziation_subscription_usage_record(db_request) + + assert db_request.find_service.calls == [ + pretend.call(IBillingService, context=None) + ] diff --git a/tests/unit/subscriptions/__init__.py b/tests/unit/subscriptions/__init__.py new file mode 100644 --- /dev/null +++ b/tests/unit/subscriptions/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/subscriptions/test_init.py b/tests/unit/subscriptions/test_init.py new file mode 100644 --- /dev/null +++ b/tests/unit/subscriptions/test_init.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pretend + +from warehouse import subscriptions +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService +from warehouse.subscriptions.services import subscription_factory + + +def test_includeme(): + billing_class = pretend.stub( + create_service=pretend.call_recorder(lambda *a, **kw: pretend.stub()) + ) + + settings = dict() + + config = pretend.stub( + maybe_dotted=lambda dotted: billing_class, + register_service_factory=pretend.call_recorder( + lambda factory, iface, name=None: None + ), + registry=pretend.stub( + settings={ + "billing.backend": "hand.some", + } + ), + get_settings=lambda: settings, + ) + + subscriptions.includeme(config) + + assert config.register_service_factory.calls == [ + pretend.call(subscription_factory, ISubscriptionService), + pretend.call(billing_class.create_service, IBillingService), + ] diff --git a/tests/unit/subscriptions/test_models.py b/tests/unit/subscriptions/test_models.py new file mode 100644 --- /dev/null +++ b/tests/unit/subscriptions/test_models.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from warehouse.subscriptions.models import StripeSubscriptionStatus + +from ...common.db.organizations import ( + OrganizationFactory as DBOrganizationFactory, + OrganizationStripeCustomerFactory as DBOrganizationStripeCustomerFactory, +) +from ...common.db.subscriptions import ( + StripeCustomerFactory as DBStripeCustomerFactory, + StripeSubscriptionFactory as DBStripeSubscriptionFactory, +) + + +class TestStripeSubscription: + def test_is_restricted(self, db_session): + organization = DBOrganizationFactory.create() + stripe_customer = DBStripeCustomerFactory.create() + DBOrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = DBStripeSubscriptionFactory.create( + customer=stripe_customer, + status="past_due", + ) + assert subscription.is_restricted + + def test_not_is_restricted(self, db_session): + organization = DBOrganizationFactory.create() + stripe_customer = DBStripeCustomerFactory.create() + DBOrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = DBStripeSubscriptionFactory.create(customer=stripe_customer) + assert not subscription.is_restricted + + +class TestStripeSubscriptionStatus: + def test_has_value(self, db_session): + organization = DBOrganizationFactory.create() + stripe_customer = DBStripeCustomerFactory.create() + DBOrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = DBStripeSubscriptionFactory.create(customer=stripe_customer) + assert StripeSubscriptionStatus.has_value(subscription.status) + assert not StripeSubscriptionStatus.has_value("invalid_status") diff --git a/tests/unit/subscriptions/test_services.py b/tests/unit/subscriptions/test_services.py new file mode 100644 --- /dev/null +++ b/tests/unit/subscriptions/test_services.py @@ -0,0 +1,744 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib + +import pretend +import pytest +import stripe + +from zope.interface.verify import verifyClass + +from warehouse.organizations.models import ( + OrganizationStripeCustomer, + OrganizationStripeSubscription, +) +from warehouse.subscriptions import services +from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService +from warehouse.subscriptions.models import ( + StripeSubscriptionPrice, + StripeSubscriptionPriceInterval, + StripeSubscriptionStatus, +) +from warehouse.subscriptions.services import ( + GenericBillingService, + MockStripeBillingService, + StripeBillingService, +) + +from ...common.db.organizations import ( + OrganizationFactory, + OrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory, +) +from ...common.db.subscriptions import ( + StripeCustomerFactory, + StripeSubscriptionFactory, + StripeSubscriptionPriceFactory, + StripeSubscriptionProductFactory, +) + + +class TestStripeBillingService: + def test_verify_service(self): + assert verifyClass(IBillingService, StripeBillingService) + + def test_basic_init(self): + api = pretend.stub() + + billing_service = StripeBillingService( + api=api, + publishable_key="secret_to_everybody", + webhook_secret="keep_it_secret_keep_it_safe", + ) + + assert billing_service.api is api + assert billing_service.publishable_key == "secret_to_everybody" + assert billing_service.webhook_secret == "keep_it_secret_keep_it_safe" + + def test_create_service(self): + # Reload stripe to reset the global stripe.api_key to default. + importlib.reload(stripe) + + request = pretend.stub( + registry=pretend.stub( + settings={ + "billing.api_base": "http://localhost:12111", + "billing.api_version": "2020-08-27", + "billing.secret_key": "sk_test_123", + "billing.publishable_key": "pk_test_123", + "billing.webhook_key": "whsec_123", + } + ) + ) + billing_service = StripeBillingService.create_service(None, request) + # Assert api_base isn't overwritten with mock service even if we try + assert not billing_service.api.api_base == "http://localhost:12111" + assert billing_service.api.api_version == "2020-08-27" + assert billing_service.api.api_key == "sk_test_123" + assert billing_service.publishable_key == "pk_test_123" + assert billing_service.webhook_secret == "whsec_123" + + +class TestMockStripeBillingService: + def test_verify_service(self): + assert verifyClass(IBillingService, MockStripeBillingService) + + def test_basic_init(self): + api = pretend.stub() + + billing_service = MockStripeBillingService( + api=api, + publishable_key="secret_to_everybody", + webhook_secret="keep_it_secret_keep_it_safe", + ) + + assert billing_service.api is api + assert billing_service.publishable_key == "secret_to_everybody" + assert billing_service.webhook_secret == "keep_it_secret_keep_it_safe" + + def test_create_service(self): + request = pretend.stub( + registry=pretend.stub( + settings={ + "billing.api_base": "http://localhost:12111", + "billing.api_version": "2020-08-27", + "billing.secret_key": "sk_test_123", + "billing.publishable_key": "pk_test_123", + "billing.webhook_key": "whsec_123", + } + ) + ) + billing_service = MockStripeBillingService.create_service(None, request) + assert billing_service.api.api_base == "http://localhost:12111" + assert billing_service.api.api_version == "2020-08-27" + assert billing_service.api.api_key == "sk_test_123" + assert billing_service.publishable_key == "pk_test_123" + assert billing_service.webhook_secret == "whsec_123" + + def test_get_checkout_session(self, billing_service): + random_session = billing_service.api.checkout.Session.list(limit=1) + + assert random_session.data[0].object == "checkout.session" + + retrieved_session = billing_service.get_checkout_session( + random_session.data[0].id + ) + + assert retrieved_session.id == random_session.data[0].id + + def test_get_customer(self, billing_service, subscription_service): + customer = billing_service.get_customer(subscription_id="sub_12345") + + assert customer is not None + assert customer["id"] + + def test_create_customer(self, billing_service, organization_service): + organization = OrganizationFactory.create() + + customer = billing_service.create_customer( + name=organization.name, + description=organization.description, + ) + + assert customer is not None + assert customer["id"] + + def test_create_checkout_session(self, billing_service, subscription_service): + subscription_price = StripeSubscriptionPriceFactory.create() + success_url = "http://what.ever" + cancel_url = "http://no.way" + + checkout_session = billing_service.create_checkout_session( + customer_id="cus_123", + price_ids=[subscription_price.price_id], + success_url=success_url, + cancel_url=cancel_url, + ) + + assert checkout_session.id is not None + + def test_create_portal_session(self, billing_service): + return_url = "http://return.url" + + session_url = billing_service.create_portal_session( + customer_id="cus_123", + return_url=return_url, + ) + assert session_url is not None + + def test_webhook_received(self, billing_service, monkeypatch): + payload = pretend.stub() + sig_header = pretend.stub() + + construct_event = pretend.call_recorder(lambda *a, **kw: None) + monkeypatch.setattr(stripe.Webhook, "construct_event", construct_event) + + billing_service.webhook_received(payload, sig_header) + + assert construct_event.calls == [ + pretend.call(payload, sig_header, billing_service.webhook_secret), + ] + + def test_create_or_update_product( + self, billing_service, subscription_service, monkeypatch + ): + subscription_product = StripeSubscriptionProductFactory.create() + + search_products = pretend.call_recorder( + lambda *a, **kw: { + "data": [ + { + "id": str(subscription_product.id), + "name": subscription_product.product_name, + "created": 0, + }, + ], + } + ) + monkeypatch.setattr(billing_service, "search_products", search_products) + + product = billing_service.create_or_update_product( + name=subscription_product.product_name, + description=subscription_product.description, + tax_code=subscription_product.tax_code, + unit_label="user", + ) + + assert product is not None + + def test_create_or_update_product_new_product(self, billing_service, monkeypatch): + search_products = pretend.call_recorder(lambda *a, **kw: {"data": []}) + monkeypatch.setattr(billing_service, "search_products", search_products) + + product = billing_service.create_or_update_product( + name="Vitamin PyPI", + description="Take two and call me in the morning.", + tax_code="txcd_10103001", # "Software as a service (SaaS) - business use" + unit_label="user", + ) + + assert product is not None + + def test_create_product(self, billing_service, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + + product = billing_service.create_product( + name=subscription_product.product_name, + description=subscription_product.description, + tax_code=subscription_product.tax_code, + unit_label="user", + ) + + assert product is not None + + def test_retrieve_product(self, billing_service, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + + product = billing_service.retrieve_product( + product_id=subscription_product.product_id, + ) + + assert product is not None + + def test_update_product(self, billing_service, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + + product = billing_service.update_product( + product_id=subscription_product.product_id, + name=subscription_product.product_name, + description=subscription_product.description, + tax_code=subscription_product.tax_code, + unit_label="user", + ) + + # stripe-mock has no persistence so we can't really check if we're + # updating the object or not, so just make sure we got one back + assert product is not None + + def test_list_all_products(self, billing_service): + products = billing_service.list_all_products() + + assert products is not None + + def test_delete_product(self, billing_service, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + + product = billing_service.delete_product( + product_id=subscription_product.product_id + ) + assert product.deleted + + def test_search_products(self, billing_service): + products = billing_service.search_products(query="active:'true'") + + assert products is not None + + def test_create_price(self, billing_service, subscription_service): + subscription_price = StripeSubscriptionPriceFactory.create() + + price = billing_service.create_price( + unit_amount=subscription_price.unit_amount, + currency=subscription_price.currency, + product_id=subscription_price.subscription_product.id, + tax_behavior=subscription_price.tax_behavior, + ) + + assert price is not None + + def test_retrieve_price(self, billing_service, subscription_service): + subscription_price = StripeSubscriptionPriceFactory.create() + + price = billing_service.retrieve_price( + price_id=subscription_price.price_id, + ) + + assert price is not None + + def test_update_price(self, billing_service, subscription_service): + subscription_price = StripeSubscriptionPriceFactory.create() + + price = billing_service.update_price( + price_id=subscription_price.price_id, + active="false", + ) + + assert not price.active + + def test_list_all_prices(self, billing_service): + prices = billing_service.list_all_prices() + + assert prices is not None + + def test_search_prices(self, billing_service): + prices = billing_service.search_prices(query="active:'true'") + + assert prices is not None + + def test_create_or_update_price( + self, billing_service, subscription_service, monkeypatch + ): + subscription_price = StripeSubscriptionPriceFactory.create() + price = { + "id": "price_1", + "unit_amount": subscription_price.unit_amount, + "currency": subscription_price.currency, + "recurring": { + "interval": "month", + "usage_type": "metered", + "aggregate_usage": "max", + }, + "product_id": subscription_price.subscription_product.id, + "tax_behavior": subscription_price.tax_behavior, + "created": 1, + } + other = { + "id": "price_0", + "unit_amount": subscription_price.unit_amount, + "currency": subscription_price.currency, + "recurring": { + "interval": "month", + "usage_type": "metered", + "aggregate_usage": "max", + }, + "product_id": subscription_price.subscription_product.id, + "tax_behavior": subscription_price.tax_behavior, + "created": 0, + } + monkeypatch.setattr( + billing_service, "search_prices", lambda *a, **kw: {"data": [price, other]} + ) + + price = billing_service.create_or_update_price( + unit_amount=subscription_price.unit_amount, + currency=subscription_price.currency, + product_id=subscription_price.subscription_product.id, + tax_behavior=subscription_price.tax_behavior, + ) + + assert price["id"] == "price_1" + + def test_cancel_subscription(self, billing_service, subscription_service): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + db_subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + + subscription = billing_service.cancel_subscription( + subscription_id=db_subscription.subscription_id + ) + + # I would check to ensure the status is Canceled but mock stripe + # doesn't care enough to update the status for whatever reason ¯\_(ツ)_/¯ + assert subscription.status is not None + + def test_create_or_update_usage_record(self, billing_service, subscription_service): + result = billing_service.create_or_update_usage_record("si_1234", 5) + + # Ensure we got a record back with the subscription_item and quantity + assert result.id + assert result.subscription_item == "si_1234" + assert result.quantity == 5 + + +class TestGenericBillingService: + def test_basic_init(self): + api = pretend.stub() + + billing_service = GenericBillingService( + api=api, + publishable_key="secret_to_everybody", + webhook_secret="keep_it_secret_keep_it_safe", + ) + + assert billing_service.api is api + assert billing_service.publishable_key == "secret_to_everybody" + assert billing_service.webhook_secret == "keep_it_secret_keep_it_safe" + + def test_notimplementederror(self): + with pytest.raises(NotImplementedError): + GenericBillingService.create_service(pretend.stub(), pretend.stub()) + + +def test_subscription_factory(): + db = pretend.stub() + context = pretend.stub() + request = pretend.stub(db=db) + + service = services.subscription_factory(context, request) + assert service.db is db + + +class TestStripeSubscriptionService: + def test_verify_service(self): + assert verifyClass(ISubscriptionService, services.StripeSubscriptionService) + + def test_service_creation(self, remote_addr): + session = pretend.stub() + service = services.StripeSubscriptionService(session) + + assert service.db is session + + def test_find_subscriptionid_nonexistent_sub(self, subscription_service): + assert subscription_service.find_subscriptionid("fake_news") is None + + def test_find_subscriptionid(self, subscription_service): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + + assert ( + subscription_service.find_subscriptionid(subscription.subscription_id) + == subscription.id + ) + + def test_add_subscription(self, billing_service, subscription_service): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + + new_subscription = subscription_service.add_subscription( + customer_id=stripe_customer.customer_id, + subscription_id="sub_12345", + subscription_item_id="si_12345", + billing_email="[email protected]", + ) + + subscription_service.db.flush() + + subscription_from_db = subscription_service.get_subscription( + new_subscription.id + ) + + assert ( + subscription_from_db.customer.customer_id + == new_subscription.customer.customer_id + ) + assert subscription_from_db.subscription_id == new_subscription.subscription_id + assert ( + subscription_from_db.subscription_price_id + == new_subscription.subscription_price_id + ) + assert subscription_from_db.status == StripeSubscriptionStatus.Active.value + assert stripe_customer.billing_email == "[email protected]" + + def test_update_subscription_status(self, subscription_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + + assert subscription.status == StripeSubscriptionStatus.Active.value + + subscription_service.update_subscription_status( + subscription.id, + status=StripeSubscriptionStatus.Active.value, + ) + + assert subscription.status == StripeSubscriptionStatus.Active.value + + def test_delete_subscription(self, subscription_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + + subscription_service.delete_subscription(subscription.id) + + assert subscription_service.get_subscription(subscription.id) is None + assert not ( + ( + db_request.db.query(OrganizationStripeSubscription) + .filter_by(subscription=subscription) + .count() + ) + ) + + def test_get_subscriptions_by_customer(self, subscription_service): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + subscription1 = StripeSubscriptionFactory.create(customer=stripe_customer) + + subscriptions = subscription_service.get_subscriptions_by_customer( + stripe_customer.customer_id + ) + + assert subscription in subscriptions + assert subscription1 in subscriptions + + def test_delete_customer(self, subscription_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + subscription = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription + ) + subscription1 = StripeSubscriptionFactory.create(customer=stripe_customer) + OrganizationStripeSubscriptionFactory.create( + organization=organization, subscription=subscription1 + ) + + subscription_service.delete_customer(stripe_customer.customer_id) + + assert subscription_service.get_subscription(subscription.id) is None + assert not ( + ( + db_request.db.query(OrganizationStripeSubscription) + .filter_by(subscription=subscription) + .count() + ) + ) + assert subscription_service.get_subscription(subscription1.id) is None + assert not ( + ( + db_request.db.query(OrganizationStripeSubscription) + .filter_by(subscription=subscription1) + .count() + ) + ) + # assert not + assert not ( + ( + db_request.db.query(OrganizationStripeCustomer) + .filter_by(organization=organization) + .count() + ) + ) + + def test_update_customer_email(self, subscription_service, db_request): + organization = OrganizationFactory.create() + stripe_customer = StripeCustomerFactory.create() + OrganizationStripeCustomerFactory.create( + organization=organization, customer=stripe_customer + ) + + subscription_service.update_customer_email( + stripe_customer.customer_id, + billing_email="[email protected]", + ) + + assert stripe_customer.billing_email == "[email protected]" + + def test_get_subscription_products(self, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + subscription_product_deux = StripeSubscriptionProductFactory.create() + subscription_products = subscription_service.get_subscription_products() + + assert subscription_product in subscription_products + assert subscription_product_deux in subscription_products + + def test_find_subscription_productid_nonexistent_prod(self, subscription_service): + assert subscription_service.find_subscription_productid("can't_see_me") is None + + def test_find_subscription_productid(self, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + assert ( + subscription_service.find_subscription_productid( + subscription_product.product_name + ) + == subscription_product.id + ) + assert ( + subscription_service.find_subscription_productid( + subscription_product.product_id + ) + == subscription_product.id + ) + + def test_add_subscription_product(self, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + + new_subscription_product = subscription_service.add_subscription_product( + product_name=subscription_product.product_name, + description=subscription_product.description, + product_id=subscription_product.product_id, + tax_code=subscription_product.tax_code, + ) + subscription_service.db.flush() + product_from_db = subscription_service.get_subscription_product( + new_subscription_product.id + ) + + assert product_from_db.product_name == subscription_product.product_name + assert product_from_db.description == subscription_product.description + assert product_from_db.product_id == subscription_product.product_id + assert product_from_db.tax_code == subscription_product.tax_code + assert product_from_db.is_active + + def test_update_subscription_product(self, subscription_service, db_request): + subscription_product = StripeSubscriptionProductFactory.create( + product_name="original_name" + ) + + subscription_service.update_subscription_product( + subscription_product.id, + product_name="updated_product_name", + ) + + db_subscription_product = subscription_service.get_subscription_product( + subscription_product.id + ) + + assert db_subscription_product.product_name == "updated_product_name" + + def test_delete_subscription_product(self, subscription_service): + subscription_product = StripeSubscriptionProductFactory.create() + + subscription_service.delete_subscription_product(subscription_product.id) + + assert ( + subscription_service.get_subscription_product(subscription_product.id) + is None + ) + + def test_get_subscription_prices(self, subscription_service): + subscription_price = StripeSubscriptionPriceFactory.create() + subscription_price_deux = StripeSubscriptionPriceFactory.create() + subscription_prices = subscription_service.get_subscription_prices() + + assert subscription_price in subscription_prices + assert subscription_price_deux in subscription_prices + + def test_find_subscriptionid_nonexistent_price(self, subscription_service): + assert subscription_service.find_subscription_priceid("john_cena") is None + + def test_add_subscription_price(self, subscription_service, db_request): + subscription_product = StripeSubscriptionProductFactory.create() + + subscription_service.add_subscription_price( + "price_321", + "usd", + subscription_product.id, + 1500, + StripeSubscriptionPriceInterval.Month.value, + "taxerrific", + ) + + subscription_price_id = subscription_service.find_subscription_priceid( + "price_321" + ) + subscription_price = subscription_service.get_subscription_price( + subscription_price_id + ) + + assert subscription_price.is_active + assert subscription_price.price_id == "price_321" + assert subscription_price.currency == "usd" + assert subscription_price.subscription_product_id == subscription_product.id + assert subscription_price.unit_amount == 1500 + assert ( + subscription_price.recurring == StripeSubscriptionPriceInterval.Month.value + ) + assert subscription_price.tax_behavior == "taxerrific" + + def test_update_subscription_price(self, subscription_service, db_request): + subscription_price = StripeSubscriptionPriceFactory.create() + + assert subscription_price.price_id == "price_123" + assert ( + subscription_price.recurring == StripeSubscriptionPriceInterval.Month.value + ) + + subscription_service.update_subscription_price( + subscription_price.id, + price_id="price_321", + recurring=StripeSubscriptionPriceInterval.Year.value, + ) + + assert subscription_price.price_id == "price_321" + assert ( + subscription_price.recurring == StripeSubscriptionPriceInterval.Year.value + ) + + db_subscription_price = subscription_service.get_subscription_price( + subscription_price.id + ) + assert db_subscription_price.price_id == "price_321" + assert ( + db_subscription_price.recurring + == StripeSubscriptionPriceInterval.Year.value + ) + + def test_delete_subscription_price(self, subscription_service, db_request): + """ + Delete a subscription price + """ + subscription_price = StripeSubscriptionPriceFactory.create() + + assert db_request.db.query(StripeSubscriptionPrice).get(subscription_price.id) + + subscription_service.delete_subscription_price(subscription_price.id) + + assert not ( + db_request.db.query(StripeSubscriptionPrice).get(subscription_price.id) + ) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -355,6 +355,7 @@ def __init__(self): pretend.call(".malware"), pretend.call(".manage"), pretend.call(".organizations"), + pretend.call(".subscriptions"), pretend.call(".packaging"), pretend.call(".redirects"), pretend.call(".routes"), diff --git a/tests/unit/test_csp.py b/tests/unit/test_csp.py --- a/tests/unit/test_csp.py +++ b/tests/unit/test_csp.py @@ -210,7 +210,7 @@ def test_includeme(): ], "default-src": ["'none'"], "font-src": ["'self'", "fonts.gstatic.com"], - "form-action": ["'self'"], + "form-action": ["'self'", "https://checkout.stripe.com"], "frame-ancestors": ["'none'"], "frame-src": ["'none'"], "img-src": [ diff --git a/tests/unit/test_predicates.py b/tests/unit/test_predicates.py --- a/tests/unit/test_predicates.py +++ b/tests/unit/test_predicates.py @@ -15,7 +15,21 @@ from pyramid.exceptions import ConfigurationError -from warehouse.predicates import DomainPredicate, HeadersPredicate, includeme +from warehouse.organizations.models import OrganizationType +from warehouse.predicates import ( + ActiveOrganizationPredicate, + DomainPredicate, + HeadersPredicate, + includeme, +) +from warehouse.subscriptions.models import StripeSubscriptionStatus + +from ..common.db.organizations import ( + OrganizationFactory, + OrganizationStripeCustomerFactory, + OrganizationStripeSubscriptionFactory, +) +from ..common.db.subscriptions import StripeSubscriptionFactory class TestDomainPredicate: @@ -75,6 +89,89 @@ def test_invalid_value(self, value): assert not predicate(None, pretend.stub(headers={"Foo": "a", "Bar": "baz"})) +class TestActiveOrganizationPredicate: + @pytest.fixture + def organization(self): + organization = OrganizationFactory( + orgtype=OrganizationType.Company, + ) + OrganizationStripeCustomerFactory( + organization=organization, + stripe_customer_id="mock-customer-id", + ) + return organization + + @pytest.fixture + def active_subscription(self, organization): + subscription = StripeSubscriptionFactory( + stripe_customer_id=organization.customer.customer_id, + status=StripeSubscriptionStatus.Active, + ) + OrganizationStripeSubscriptionFactory( + organization=organization, + subscription=subscription, + ) + return subscription + + @pytest.fixture + def inactive_subscription(self, organization): + subscription = StripeSubscriptionFactory( + stripe_customer_id=organization.customer.customer_id, + status=StripeSubscriptionStatus.PastDue, + ) + OrganizationStripeSubscriptionFactory( + organization=organization, + subscription=subscription, + ) + return subscription + + @pytest.mark.parametrize( + ("value", "expected"), + [ + (True, "require_active_organization = True"), + (False, "require_active_organization = False"), + ], + ) + def test_text(self, value, expected): + predicate = ActiveOrganizationPredicate(value, None) + assert predicate.text() == expected + assert predicate.phash() == expected + + def test_disable_predicate(self, db_request, organization): + predicate = ActiveOrganizationPredicate(False, None) + assert predicate(organization, db_request) + + def test_disable_organizations(self, db_request, organization): + predicate = ActiveOrganizationPredicate(True, None) + assert not predicate(organization, db_request) + + def test_inactive_organization( + self, + db_request, + organization, + enable_organizations, + ): + organization.is_active = False + predicate = ActiveOrganizationPredicate(True, None) + assert not predicate(organization, db_request) + + def test_inactive_subscription( + self, + db_request, + organization, + enable_organizations, + inactive_subscription, + ): + predicate = ActiveOrganizationPredicate(True, None) + assert not predicate(organization, db_request) + + def test_active_subscription( + self, db_request, organization, enable_organizations, active_subscription + ): + predicate = ActiveOrganizationPredicate(True, None) + assert predicate(organization, db_request) + + def test_includeme(): config = pretend.stub( add_route_predicate=pretend.call_recorder(lambda name, pred: None), @@ -85,5 +182,6 @@ def test_includeme(): assert config.add_route_predicate.calls == [pretend.call("domain", DomainPredicate)] assert config.add_view_predicate.calls == [ - pretend.call("require_headers", HeadersPredicate) + pretend.call("require_headers", HeadersPredicate), + pretend.call("require_active_organization", ActiveOrganizationPredicate), ] diff --git a/tests/unit/test_routes.py b/tests/unit/test_routes.py --- a/tests/unit/test_routes.py +++ b/tests/unit/test_routes.py @@ -255,6 +255,20 @@ def add_policy(name, filename): traverse="/{organization_name}", domain=warehouse, ), + pretend.call( + "manage.organization.activate_subscription", + "/manage/organization/{organization_name}/subscription/activate/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), + pretend.call( + "manage.organization.subscription", + "/manage/organization/{organization_name}/subscription/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), pretend.call( "manage.organization.projects", "/manage/organization/{organization_name}/projects/", @@ -473,6 +487,7 @@ def add_policy(name, filename): "/_/vulnerabilities/osv/report", domain=warehouse, ), + pretend.call("api.billing.webhook", "/billing/webhook/", domain=warehouse), pretend.call("api.simple.index", "/simple/", domain=warehouse), pretend.call( "api.simple.detail", @@ -481,6 +496,28 @@ def add_policy(name, filename): traverse="/{name}/", domain=warehouse, ), + # Mock URLs + pretend.call( + "mock.billing.checkout-session", + "/mock/billing/{organization_name}/checkout/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), + pretend.call( + "mock.billing.portal-session", + "/mock/billing/{organization_name}/portal/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), + pretend.call( + "mock.billing.trigger-checkout-session-completed", + "/mock/billing/{organization_name}/checkout/completed/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), pretend.call( "legacy.api.json.project", "/pypi/{name}/json",
Remove organization if it is not approved by the admin 1. If an organization is declined by the admin, the org appears in the 'Your organization' list. 2. Even if the billing didn't confirm, the organization creation request goes through to the admin. If the request was declined, the organization appears on the 'Your organization' list with 'Activate Billing' Any organization creation that was declined should be removed from the 'Your organization' list. To recreate the steps 1. Create an organization 2. Admin should decline the request ![test13](https://user-images.githubusercontent.com/37237726/183937282-51a72e01-3885-433c-8e70-06af89e8466d.png) Remove organization if it is not approved by the admin 1. If an organization is declined by the admin, the org appears in the 'Your organization' list. 2. Even if the billing didn't confirm, the organization creation request goes through to the admin. If the request was declined, the organization appears on the 'Your organization' list with 'Activate Billing' Any organization creation that was declined should be removed from the 'Your organization' list. To recreate the steps 1. Create an organization 2. Admin should decline the request ![test13](https://user-images.githubusercontent.com/37237726/183937282-51a72e01-3885-433c-8e70-06af89e8466d.png)
2022-07-29T11:10:45Z
[]
[]
pypi/warehouse
12,008
pypi__warehouse-12008
[ "11776" ]
00414b555bc230936f629a1499024e9ea27ee793
diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py --- a/warehouse/manage/forms.py +++ b/warehouse/manage/forms.py @@ -97,6 +97,7 @@ class CreateInternalRoleForm( team_name = wtforms.SelectField( "Select team", choices=[("", "Select team")], + default="", # Set default to avoid error when there are no team choices. validators=[wtforms.validators.InputRequired()], ) @@ -448,6 +449,7 @@ class AddOrganizationProjectForm(forms.Form): existing_project_name = wtforms.SelectField( "Select project", choices=[("", "Select project")], + default="", # Set default to avoid error when there are no project choices. ) new_project_name = wtforms.StringField() diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1655,7 +1655,7 @@ def manage_organization_projects(self): return self.default_response - @view_config(request_method="POST") + @view_config(request_method="POST", permission="add:project") def add_organization_project(self): if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): raise HTTPNotFound @@ -2613,14 +2613,20 @@ def __init__(self, project, request): @view_config(request_method="GET") def manage_project_settings(self): if self.request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS): + # Disable transfer of project to any organization. organization_choices = set() else: + # Allow transfer of project to organizations owned or managed by user. all_user_organizations = user_organizations(self.request) organizations_owned = set( organization.name for organization in all_user_organizations["organizations_owned"] ) - organization_choices = organizations_owned - ( + organizations_managed = set( + organization.name + for organization in all_user_organizations["organizations_managed"] + ) + organization_choices = (organizations_owned | organizations_managed) - ( {self.project.organization.name} if self.project.organization else set() ) @@ -2944,6 +2950,23 @@ def remove_organization_project(project, request): request.route_path("manage.project.settings", project_name=project.name) ) + if ( + # Check that user has permission to remove projects from organization. + (project.organization and request.user not in project.organization.owners) + # Check that project has an individual owner. + or not project_owners(request, project) + ): + request.session.flash( + ( + "Could not remove project from organization - " + "you do not have the required permissions" + ), + queue="error", + ) + return HTTPSeeOther( + request.route_path("manage.project.settings", project_name=project.name) + ) + confirm_project( project, request, @@ -2952,14 +2975,6 @@ def remove_organization_project(project, request): error_message="Could not remove project from organization", ) - if not project_owners(request, project): - request.session.flash( - "Could not remove project from organization", queue="error" - ) - return HTTPSeeOther( - request.route_path("manage.project.settings", project_name=project.name) - ) - # Remove project from current organization. organization_service = request.find_service(IOrganizationService, context=None) if organization := project.organization: @@ -3018,6 +3033,16 @@ def transfer_organization_project(project, request): request.route_path("manage.project.settings", project_name=project.name) ) + # Check that user has permission to remove projects from organization. + if project.organization and request.user not in project.organization.owners: + request.session.flash( + "Could not transfer project - you do not have the required permissions", + queue="error", + ) + return HTTPSeeOther( + request.route_path("manage.project.settings", project_name=project.name) + ) + confirm_project( project, request, @@ -3031,7 +3056,11 @@ def transfer_organization_project(project, request): organization.name for organization in all_user_organizations["organizations_owned"] ) - organization_choices = organizations_owned - ( + organizations_managed = set( + organization.name + for organization in all_user_organizations["organizations_managed"] + ) + organization_choices = (organizations_owned | organizations_managed) - ( {project.organization.name} if project.organization else set() ) diff --git a/warehouse/organizations/models.py b/warehouse/organizations/models.py --- a/warehouse/organizations/models.py +++ b/warehouse/organizations/models.py @@ -246,6 +246,16 @@ def __acl__(self): # Allow all people in organization read access. # Allow write access depending on role. if role.role_name == OrganizationRoleType.Owner: + # Allowed: + # - View organization ("view:organization") + # - View team ("view:team") + # - Invite/remove organization member ("manage:organization") + # - Add/remove team member ("manage:team") + # - Manage billing ("manage:billing") + # - Create project ("add:project") + # - Delete project ("remove:project") + # Disallowed: + # - (none) acls.append( ( Allow, @@ -255,10 +265,22 @@ def __acl__(self): "view:team", "manage:organization", "manage:team", + "manage:billing", + "add:project", + "remove:project", ], ) ) elif role.role_name == OrganizationRoleType.BillingManager: + # Allowed: + # - View organization ("view:organization") + # - View team ("view:team") + # - Manage billing ("manage:billing") + # Disallowed: + # - Invite/remove organization member ("manage:organization") + # - Add/remove team member ("manage:team") + # - Create project ("add:project") + # - Delete project ("remove:project") acls.append( ( Allow, @@ -267,15 +289,39 @@ def __acl__(self): ) ) elif role.role_name == OrganizationRoleType.Manager: + # Allowed: + # - View organization ("view:organization") + # - View team ("view:team") + # - Add/remove team member ("manage:team") + # - Create project ("add:project") + # Disallowed: + # - Invite/remove organization member ("manage:organization") + # - Manage billing ("manage:billing") + # - Delete project ("remove:project") acls.append( ( Allow, f"user:{role.user.id}", - ["view:organization", "view:team", "manage:team"], + [ + "view:organization", + "view:team", + "manage:team", + "add:project", + ], ) ) else: # No member-specific write access needed for now. + + # Allowed: + # - View organization ("view:organization") + # - View team ("view:team") + # Disallowed: + # - Invite/remove organization member ("manage:organization") + # - Add/remove team member ("manage:team") + # - Manage billing ("manage:billing") + # - Create project ("add:project") + # - Delete project ("remove:project") acls.append( (Allow, f"user:{role.user.id}", ["view:organization", "view:team"]) )
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -5365,9 +5365,15 @@ def test_toggle_2fa_requirement_non_critical( assert event.additional == {"modified_by": db_request.user.username} def test_remove_organization_project_no_confirm(self): - project = pretend.stub(normalized_name="foo") + user = pretend.stub() + project = pretend.stub( + normalized_name="foo", + organization=pretend.stub(owners=[user]), + owners=[user], + ) request = pretend.stub( POST={}, + user=user, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), route_path=lambda *a, **kw: "/foo/bar/", @@ -5386,9 +5392,15 @@ def test_remove_organization_project_no_confirm(self): ] def test_remove_organization_project_wrong_confirm(self): - project = pretend.stub(normalized_name="foo") + user = pretend.stub() + project = pretend.stub( + normalized_name="foo", + organization=pretend.stub(owners=[user]), + owners=[user], + ) request = pretend.stub( POST={"confirm_remove_organization_project_name": "bar"}, + user=user, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), route_path=lambda *a, **kw: "/foo/bar/", @@ -5472,6 +5484,39 @@ def test_remove_organization_project_no_current_organization( assert result.headers["Location"] == "/the-redirect" assert send_organization_project_removed_email.calls == [] + def test_remove_organization_project_not_organization_owner(self): + user = pretend.stub() + project = pretend.stub( + name="foo", + normalized_name="foo", + organization=pretend.stub(owners=[]), + owners=[user], + ) + request = pretend.stub( + POST={}, + user=user, + flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), + session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), + route_path=lambda *a, **kw: "/foo/bar/", + ) + + result = views.remove_organization_project(project, request) + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/foo/bar/" + assert request.flags.enabled.calls == [ + pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS) + ] + assert request.session.flash.calls == [ + pretend.call( + ( + "Could not remove project from organization - " + "you do not have the required permissions" + ), + queue="error", + ) + ] + def test_remove_organization_project_no_individual_owner( self, monkeypatch, db_request ): @@ -5504,7 +5549,13 @@ def test_remove_organization_project_no_individual_owner( pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS) ] assert db_request.session.flash.calls == [ - pretend.call("Could not remove project from organization", queue="error") + pretend.call( + ( + "Could not remove project from organization - " + "you do not have the required permissions" + ), + queue="error", + ) ] assert db_request.route_path.calls == [ pretend.call("manage.project.settings", project_name="foo") @@ -5562,9 +5613,14 @@ def test_remove_organization_project(self, monkeypatch, db_request): ] def test_transfer_organization_project_no_confirm(self): - project = pretend.stub(normalized_name="foo") + user = pretend.stub() + project = pretend.stub( + normalized_name="foo", + organization=pretend.stub(owners=[user]), + ) request = pretend.stub( POST={}, + user=user, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), route_path=lambda *a, **kw: "/foo/bar/", @@ -5583,9 +5639,14 @@ def test_transfer_organization_project_no_confirm(self): ] def test_transfer_organization_project_wrong_confirm(self): - project = pretend.stub(normalized_name="foo") + user = pretend.stub() + project = pretend.stub( + normalized_name="foo", + organization=pretend.stub(owners=[user]), + ) request = pretend.stub( POST={"confirm_transfer_organization_project_name": "bar"}, + user=user, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), route_path=lambda *a, **kw: "/foo/bar/", @@ -5692,6 +5753,38 @@ def test_transfer_organization_project_no_current_organization( ) ] + def test_transfer_organization_project_not_organization_owner(self): + user = pretend.stub() + project = pretend.stub( + name="foo", + normalized_name="foo", + organization=pretend.stub(owners=[]), + ) + request = pretend.stub( + POST={}, + user=user, + flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), + session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), + route_path=lambda *a, **kw: "/foo/bar/", + ) + + result = views.transfer_organization_project(project, request) + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/foo/bar/" + assert request.flags.enabled.calls == [ + pretend.call(AdminFlagValue.DISABLE_ORGANIZATIONS) + ] + assert request.session.flash.calls == [ + pretend.call( + ( + "Could not transfer project - " + "you do not have the required permissions" + ), + queue="error", + ) + ] + def test_transfer_organization_project_no_individual_owner( self, monkeypatch, db_request ): diff --git a/tests/unit/organizations/test_models.py b/tests/unit/organizations/test_models.py --- a/tests/unit/organizations/test_models.py +++ b/tests/unit/organizations/test_models.py @@ -108,6 +108,9 @@ def test_acl(self, db_session): "view:team", "manage:organization", "manage:team", + "manage:billing", + "add:project", + "remove:project", ], ), ( @@ -118,6 +121,9 @@ def test_acl(self, db_session): "view:team", "manage:organization", "manage:team", + "manage:billing", + "add:project", + "remove:project", ], ), ], @@ -141,12 +147,12 @@ def test_acl(self, db_session): ( Allow, f"user:{account_mgr1.user.id}", - ["view:organization", "view:team", "manage:team"], + ["view:organization", "view:team", "manage:team", "add:project"], ), ( Allow, f"user:{account_mgr2.user.id}", - ["view:organization", "view:team", "manage:team"], + ["view:organization", "view:team", "manage:team", "add:project"], ), ], key=lambda x: x[1], @@ -228,6 +234,9 @@ def test_acl(self, db_session): "view:team", "manage:organization", "manage:team", + "manage:billing", + "add:project", + "remove:project", ], ), ( @@ -238,6 +247,9 @@ def test_acl(self, db_session): "view:team", "manage:organization", "manage:team", + "manage:billing", + "add:project", + "remove:project", ], ), ], @@ -261,12 +273,12 @@ def test_acl(self, db_session): ( Allow, f"user:{account_mgr1.user.id}", - ["view:organization", "view:team", "manage:team"], + ["view:organization", "view:team", "manage:team", "add:project"], ), ( Allow, f"user:{account_mgr2.user.id}", - ["view:organization", "view:team", "manage:team"], + ["view:organization", "view:team", "manage:team", "add:project"], ), ], key=lambda x: x[1],
Cannot create a project in an organization account For an owner who does not have any prior projects in PyPI, creating a project in an organization does not work. To recreate the steps- 1. Navigate to 'Your organizations' 2. Select a specific organization and click on 'Manage' 3. Click on 'Projects' 4. Select 'New Project' 5. Enter the name of the project 6. Click on 'Create and add new project' The new project is not added to the organization. Note: This feature does work when the user has prior projects. ![project1](https://user-images.githubusercontent.com/37237726/177815345-023aeadc-94a8-46a0-8e37-a8cf54d2bd70.png)
Priority 3/3
2022-08-02T13:50:44Z
[]
[]
pypi/warehouse
12,018
pypi__warehouse-12018
[ "11990" ]
c95be4a1055f4b36a8852715eb80318c81fc00ca
diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -421,6 +421,7 @@ def configure(settings=None): filters.setdefault("localize_datetime", "warehouse.filters:localize_datetime") filters.setdefault("is_recent", "warehouse.filters:is_recent") filters.setdefault("canonicalize_name", "packaging.utils:canonicalize_name") + filters.setdefault("format_author_email", "warehouse.filters:format_author_email") # We also want to register some global functions for Jinja jglobals = config.get_settings().setdefault("jinja2.globals", {}) diff --git a/warehouse/filters.py b/warehouse/filters.py --- a/warehouse/filters.py +++ b/warehouse/filters.py @@ -19,6 +19,8 @@ import re import urllib.parse +from email.utils import getaddresses + import html5lib import html5lib.serializer import html5lib.treewalkers @@ -163,5 +165,19 @@ def is_recent(timestamp): return False +def format_author_email(metadata_email: str) -> tuple[str, str]: + """ + Return the name and email address from a metadata RFC-822 string. + Use Jinja's `first` and `last` to access each part in a template. + TODO: Support more than one email address, per RFC-822. + """ + author_emails = [] + for author_name, author_email in getaddresses([metadata_email]): + if "@" not in author_email: + return author_name, "" + author_emails.append((author_name, author_email)) + return author_emails[0][0], author_emails[0][1] + + def includeme(config): config.add_request_method(_camo_url, name="camo_url")
diff --git a/tests/unit/test_filters.py b/tests/unit/test_filters.py --- a/tests/unit/test_filters.py +++ b/tests/unit/test_filters.py @@ -227,3 +227,17 @@ def test_is_recent(delta, expected): def test_is_recent_none(): assert filters.is_recent(None) is False + + [email protected]( + ("meta_email", "expected_name", "expected_email"), + [ + ("not-an-email-address", "", ""), + ("[email protected]", "", "[email protected]"), + ('"Foo Bar" <[email protected]>', "Foo Bar", "[email protected]"), + ], +) +def test_format_author_email(meta_email, expected_name, expected_email): + author_name, author_email = filters.format_author_email(meta_email) + assert author_name == expected_name + assert author_email == expected_email
Author from pyproject.toml is displayed incorect <!-- NOTE: This issue should be for problems with PyPI itself, including: * pypi.org * test.pypi.org * files.pythonhosted.org This issue should NOT be for a project installed from PyPI. If you are having an issue with a specific package, you should reach out to the maintainers of that project directly instead. Furthermore, this issue should NOT be for any non-PyPI properties (like python.org, docs.python.org, etc.) If your problem is related to search (a new or updated project doesn't appear in the PyPI search results), please wait for a couple of hours and check again before reporting it. The search index may take some time to be updated. --> **Describe the bug** I had made a new project with a pyprorejt.toml. MY pyproject.toml contains author information this form: ```toml authors = [ { name="Example Author", email="[email protected]" }, ] ``` This is the right form [according to the official packaging guide](https://packaging.python.org/en/latest/tutorials/packaging-projects/#configuring-metadata). But the author is displayed wrong on PyPI. **Expected behavior** ![grafik](https://user-images.githubusercontent.com/15185051/181994557-48b349bd-38f8-4384-9eef-64b0db51a7e4.png) **To Reproduce** Visit my [new project](https://pypi.org/project/desktop-entry-lib/). You will see this: ![grafik](https://user-images.githubusercontent.com/15185051/181994587-eb2f92ea-d6de-44c8-9e9d-11808546243e.png) It is also in the mailto link, which leads to a invalid mailto. **My Platform** Any platform **Additional context** I first thought that was a bug in the build module, so I [reported it there](https://github.com/pypa/build/issues/501). But according to the PEPs the behaviour of the build module is right and the Bug is on the side of PyPI.
This is a funny case: [RFC 2328](https://www.rfc-editor.org/rfc/rfc2368) allows `mailto:` to contain "mailbox"-style addesses (like `Foo Bar <[email protected]>`), while [RFC 6068](https://datatracker.ietf.org/doc/html/rfc6068) (which supersedes 2328) only allows the address-specification form. PyPI should arguably follow the latest RFC, but it seems like support for the older one is still common (for example, macOS's `Mail.app` has no problems with the `mailto:Foo Bar <[email protected]>` format. So, arguably, the `mailto:` URL itself is not invalid. We _could_ probably render the author without their email in angle brackets, but there might be additional data quality issues (e.g. authors with emails only, no names) that make that annoying. I think today warehouse is echoing the input from the metadata only, nothing more. It's stored as an unstructured text column. I'm exploring a custom jinja filter that would format it correctly on display.
2022-08-04T12:19:43Z
[]
[]
pypi/warehouse
12,022
pypi__warehouse-12022
[ "11778" ]
eb6f95b46dbb231a024406b94803fed135cd224e
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -330,6 +330,7 @@ def save_account(self): for email in self.request.user.emails: email.public = email.email == public_email self.request.session.flash("Account details updated", queue="success") + return HTTPSeeOther(self.request.path) return {**self.default_response, "save_account_form": form} @@ -363,7 +364,7 @@ def add_email(self): ), queue="success", ) - return self.default_response + return HTTPSeeOther(self.request.path) return {**self.default_response, "add_email_form": form} @@ -398,6 +399,8 @@ def delete_email(self): self.request.session.flash( f"Email address {email.email} removed", queue="success" ) + return HTTPSeeOther(self.request.path) + return self.default_response @view_config( @@ -443,7 +446,8 @@ def change_primary_email(self): send_primary_email_change_email( self.request, (self.request.user, previous_primary_email) ) - return self.default_response + + return HTTPSeeOther(self.request.path) @view_config(request_method="POST", request_param=["reverify_email_id"]) def reverify_email(self): @@ -474,7 +478,7 @@ def reverify_email(self): f"Verification email for {email.email} resent", queue="success" ) - return self.default_response + return HTTPSeeOther(self.request.path) @view_config(request_method="POST", request_param=ChangePasswordForm.__params__) def change_password(self): @@ -504,6 +508,7 @@ def change_password(self): self.user_service.get_password_timestamp(self.request.user.id) ) self.request.session.flash("Password updated", queue="success") + return HTTPSeeOther(self.request.path) return {**self.default_response, "change_password_form": form} @@ -1040,6 +1045,7 @@ def create_macaroon(self): }, ) + # This is an exception to our pattern of redirecting POST to GET. response.update(serialized_macaroon=serialized_macaroon, macaroon=macaroon) return {**response, "create_macaroon_form": form} @@ -1322,7 +1328,7 @@ def create_organization(self): else: return {"create_organization_form": form} - return self.default_response + return HTTPSeeOther(self.request.path) @view_defaults( @@ -1385,6 +1391,7 @@ def save_organization(self): data = form.data self.organization_service.update_organization(self.organization.id, **data) self.request.session.flash("Organization details updated", queue="success") + return HTTPSeeOther(self.request.path) return {**self.default_response, "save_organization_form": form} @@ -1892,11 +1899,7 @@ def manage_organization_roles( queue="success", ) - form = _form_class( - orgtype=organization.orgtype, - organization_service=organization_service, - user_service=user_service, - ) + return HTTPSeeOther(request.path) roles = set(organization_service.get_organization_roles(organization.id)) invitations = set(organization_service.get_organization_invites(organization.id)) @@ -2867,6 +2870,8 @@ def add_github_oidc_provider(self): "warehouse.oidc.add_provider.ok", tags=["provider:GitHub"] ) + return HTTPSeeOther(self.request.path) + return response @view_config(request_method="POST", request_param=DeleteProviderForm.__params__) @@ -2931,6 +2936,8 @@ def delete_oidc_provider(self): tags=[f"provider:{provider.provider_name}"], ) + return HTTPSeeOther(self.request.path) + return self.default_response @@ -3889,7 +3896,9 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): ), queue="error", ) - return default_response + + # Refresh project collaborators. + return HTTPSeeOther(request.path) if enable_internal_collaborator and user in internal_users: @@ -3987,7 +3996,6 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): ), queue="error", ) - return default_response elif ( user_invite and user_invite.invite_status == RoleInvitationStatus.Pending @@ -4001,7 +4009,6 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): ), queue="error", ) - return default_response else: invite_token = token_service.dumps( { @@ -4069,8 +4076,8 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): queue="success", ) - # Refresh project collaborators. - return HTTPSeeOther(request.path) + # Refresh project collaborators. + return HTTPSeeOther(request.path) @view_config(
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -213,6 +213,7 @@ def test_save_account(self, monkeypatch): ), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), find_service=lambda *a, **kw: user_service, + path="request-path", ) save_account_obj = pretend.stub(validate=lambda: True, data=request.POST) monkeypatch.setattr(views, "SaveAccountForm", lambda *a, **kw: save_account_obj) @@ -221,10 +222,7 @@ def test_save_account(self, monkeypatch): ) view = views.ManageAccountViews(request) - assert view.save_account() == { - **view.default_response, - "save_account_form": save_account_obj, - } + assert isinstance(view.save_account(), HTTPSeeOther) assert request.session.flash.calls == [ pretend.call("Account details updated", queue="success") ] @@ -286,7 +284,7 @@ def test_add_email(self, monkeypatch, pyramid_request): ) view = views.ManageAccountViews(pyramid_request) - assert view.add_email() == view.default_response + assert isinstance(view.add_email(), HTTPSeeOther) assert user_service.add_email.calls == [ pretend.call(pyramid_request.user.id, email_address) ] @@ -359,13 +357,14 @@ def test_delete_email(self, monkeypatch): find_service=lambda *a, **kw: user_service, session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), remote_addr="0.0.0.0", + path="request-path", ) monkeypatch.setattr( views.ManageAccountViews, "default_response", {"_": pretend.stub()} ) view = views.ManageAccountViews(request) - assert view.delete_email() == view.default_response + assert isinstance(view.delete_email(), HTTPSeeOther) assert request.session.flash.calls == [ pretend.call(f"Email address {email.email} removed", queue="success") ] @@ -451,7 +450,8 @@ def test_change_primary_email(self, monkeypatch, db_request): send_email = pretend.call_recorder(lambda *a: None) monkeypatch.setattr(views, "send_primary_email_change_email", send_email) - assert view.change_primary_email() == view.default_response + + assert isinstance(view.change_primary_email(), HTTPSeeOther) assert send_email.calls == [ pretend.call(db_request, (db_request.user, old_primary)) ] @@ -489,7 +489,8 @@ def test_change_primary_email_without_current(self, monkeypatch, db_request): send_email = pretend.call_recorder(lambda *a: None) monkeypatch.setattr(views, "send_primary_email_change_email", send_email) - assert view.change_primary_email() == view.default_response + + assert isinstance(view.change_primary_email(), HTTPSeeOther) assert send_email.calls == [] assert db_request.session.flash.calls == [ pretend.call( @@ -545,6 +546,7 @@ def test_reverify_email(self, monkeypatch): find_service=lambda *a, **kw: pretend.stub(), user=pretend.stub(id=pretend.stub(), username="username", name="Name"), remote_addr="0.0.0.0", + path="request-path", ) send_email = pretend.call_recorder(lambda *a: None) monkeypatch.setattr(views, "send_email_verification_email", send_email) @@ -553,7 +555,7 @@ def test_reverify_email(self, monkeypatch): ) view = views.ManageAccountViews(request) - assert view.reverify_email() == view.default_response + assert isinstance(view.reverify_email(), HTTPSeeOther) assert request.session.flash.calls == [ pretend.call("Verification email for email_address resent", queue="success") ] @@ -607,6 +609,7 @@ def test_reverify_email_already_verified(self, monkeypatch): session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), find_service=lambda *a, **kw: pretend.stub(), user=pretend.stub(id=pretend.stub()), + path="request-path", ) send_email = pretend.call_recorder(lambda *a: None) monkeypatch.setattr(views, "send_email_verification_email", send_email) @@ -615,7 +618,7 @@ def test_reverify_email_already_verified(self, monkeypatch): ) view = views.ManageAccountViews(request) - assert view.reverify_email() == view.default_response + assert isinstance(view.reverify_email(), HTTPSeeOther) assert request.session.flash.calls == [ pretend.call("Email is already verified", queue="error") ] @@ -651,6 +654,7 @@ def test_change_password(self, monkeypatch): refresh=lambda obj: None, ), remote_addr="0.0.0.0", + path="request-path", ) change_pwd_obj = pretend.stub( validate=lambda: True, new_password=pretend.stub(data=new_password) @@ -665,10 +669,7 @@ def test_change_password(self, monkeypatch): ) view = views.ManageAccountViews(request) - assert view.change_password() == { - **view.default_response, - "change_password_form": change_pwd_obj, - } + assert isinstance(view.change_password(), HTTPSeeOther) assert request.session.flash.calls == [ pretend.call("Password updated", queue="success") ] @@ -2456,6 +2457,7 @@ def test_create_organization(self, monkeypatch): }[interface], flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), remote_addr="0.0.0.0", + path="request-path", ) create_organization_obj = pretend.stub(validate=lambda: True, data=request.POST) @@ -2556,7 +2558,7 @@ def test_create_organization(self, monkeypatch): organization_name=organization.name, ), ] - assert result == default_response + assert isinstance(result, HTTPSeeOther) def test_create_organization_validation_fails(self, monkeypatch): admins = [] @@ -2711,10 +2713,7 @@ def test_save_organization( view = views.ManageOrganizationSettingsViews(organization, db_request) result = view.save_organization() - assert result == { - **view.default_response, - "save_organization_form": save_organization_obj, - } + assert isinstance(result, HTTPSeeOther) assert organization_service.update_organization.calls == [ pretend.call(organization.id, **db_request.POST) ] @@ -3574,12 +3573,12 @@ def test_post_new_organization_role( EmailFactory.create(user=new_user, verified=True, primary=True) owner_1 = UserFactory.create(username="owner_1") owner_2 = UserFactory.create(username="owner_2") - owner_1_role = OrganizationRoleFactory.create( + OrganizationRoleFactory.create( organization=organization, user=owner_1, role_name=OrganizationRoleType.Owner, ) - owner_2_role = OrganizationRoleFactory.create( + OrganizationRoleFactory.create( organization=organization, user=owner_2, role_name=OrganizationRoleType.Owner, @@ -3612,26 +3611,20 @@ def test_post_new_organization_role( ) result = views.manage_organization_roles(organization, db_request) - form_obj = result["form"] assert db_request.session.flash.calls == [ pretend.call(f"Invitation sent to '{new_user.username}'", queue="success") ] # Only one role invitation is created - organization_invitation = ( + ( db_request.db.query(OrganizationInvitation) .filter(OrganizationInvitation.user == new_user) .filter(OrganizationInvitation.organization == organization) .one() ) - assert result == { - "organization": organization, - "roles": {owner_1_role, owner_2_role}, - "invitations": {organization_invitation}, - "form": form_obj, - } + assert isinstance(result, HTTPSeeOther) assert send_organization_member_invited_email.calls == [ pretend.call( db_request, @@ -3707,11 +3700,6 @@ def test_post_duplicate_organization_role( organization_service=organization_service, user_service=user_service, ), - pretend.call( - orgtype=organization.orgtype, - organization_service=organization_service, - user_service=user_service, - ), ] assert db_request.session.flash.calls == [ pretend.call( @@ -3722,12 +3710,7 @@ def test_post_duplicate_organization_role( # No additional roles are created assert role == db_request.db.query(OrganizationRole).one() - assert result == { - "organization": organization, - "roles": {role}, - "invitations": set(), - "form": form_obj, - } + assert isinstance(result, HTTPSeeOther) @pytest.mark.parametrize("with_email", [True, False]) def test_post_unverified_email( @@ -3767,11 +3750,6 @@ def test_post_unverified_email( organization_service=organization_service, user_service=user_service, ), - pretend.call( - orgtype=organization.orgtype, - organization_service=organization_service, - user_service=user_service, - ), ] assert db_request.session.flash.calls == [ pretend.call( @@ -3784,12 +3762,7 @@ def test_post_unverified_email( # No additional roles are created assert db_request.db.query(OrganizationRole).all() == [] - assert result == { - "organization": organization, - "roles": set(), - "invitations": set(), - "form": form_obj, - } + assert isinstance(result, HTTPSeeOther) def test_cannot_reinvite_organization_role( self, db_request, organization_service, user_service, enable_organizations @@ -3799,18 +3772,18 @@ def test_cannot_reinvite_organization_role( EmailFactory.create(user=new_user, verified=True, primary=True) owner_1 = UserFactory.create(username="owner_1") owner_2 = UserFactory.create(username="owner_2") - owner_1_role = OrganizationRoleFactory.create( + OrganizationRoleFactory.create( organization=organization, user=owner_1, role_name=OrganizationRoleType.Owner, ) - owner_2_role = OrganizationRoleFactory.create( + OrganizationRoleFactory.create( organization=organization, user=owner_2, role_name=OrganizationRoleType.Owner, ) token_service = db_request.find_service(ITokenService, name="email") - new_organization_invitation = OrganizationInvitationFactory.create( + OrganizationInvitationFactory.create( organization=organization, user=new_user, invite_status=OrganizationInvitationStatus.Pending, @@ -3843,11 +3816,6 @@ def test_cannot_reinvite_organization_role( organization_service=organization_service, user_service=user_service, ), - pretend.call( - orgtype=organization.orgtype, - organization_service=organization_service, - user_service=user_service, - ), ] assert db_request.session.flash.calls == [ pretend.call( @@ -3855,13 +3823,7 @@ def test_cannot_reinvite_organization_role( queue="error", ) ] - - assert result == { - "organization": organization, - "roles": {owner_1_role, owner_2_role}, - "invitations": {new_organization_invitation}, - "form": form_obj, - } + assert isinstance(result, HTTPSeeOther) @freeze_time(datetime.datetime.utcnow()) def test_reinvite_organization_role_after_expiration( @@ -3877,18 +3839,18 @@ def test_reinvite_organization_role_after_expiration( EmailFactory.create(user=new_user, verified=True, primary=True) owner_1 = UserFactory.create(username="owner_1") owner_2 = UserFactory.create(username="owner_2") - owner_1_role = OrganizationRoleFactory.create( + OrganizationRoleFactory.create( organization=organization, user=owner_1, role_name=OrganizationRoleType.Owner, ) - owner_2_role = OrganizationRoleFactory.create( + OrganizationRoleFactory.create( user=owner_2, organization=organization, role_name=OrganizationRoleType.Owner, ) token_service = db_request.find_service(ITokenService, name="email") - new_organization_invitation = OrganizationInvitationFactory.create( + OrganizationInvitationFactory.create( user=new_user, organization=organization, invite_status=OrganizationInvitationStatus.Expired, @@ -3938,31 +3900,20 @@ def test_reinvite_organization_role_after_expiration( organization_service=organization_service, user_service=user_service, ), - pretend.call( - orgtype=organization.orgtype, - organization_service=organization_service, - user_service=user_service, - ), ] assert db_request.session.flash.calls == [ pretend.call(f"Invitation sent to '{new_user.username}'", queue="success") ] # Only one role invitation is created - organization_invitation = ( + ( db_request.db.query(OrganizationInvitation) .filter(OrganizationInvitation.user == new_user) .filter(OrganizationInvitation.organization == organization) .one() ) - assert result["invitations"] == {new_organization_invitation} - assert result == { - "organization": organization, - "roles": {owner_1_role, owner_2_role}, - "invitations": {organization_invitation}, - "form": form_obj, - } + assert isinstance(result, HTTPSeeOther) assert send_organization_member_invited_email.calls == [ pretend.call( db_request, @@ -7416,15 +7367,7 @@ def test_post_duplicate_role(self, db_request): # No additional roles are created assert role == db_request.db.query(Role).one() - assert result == { - "project": project, - "roles": {role}, - "invitations": set(), - "form": form_obj, - "enable_internal_collaborator": False, - "team_project_roles": set(), - "internal_role_form": None, - } + assert isinstance(result, HTTPSeeOther) def test_reinvite_role_after_expiration(self, monkeypatch, db_request): project = ProjectFactory.create(name="foobar") @@ -7576,15 +7519,7 @@ def test_post_unverified_email(self, db_request, with_email): # No additional roles are created assert db_request.db.query(Role).all() == [] - assert result == { - "project": project, - "roles": set(), - "invitations": set(), - "form": form_obj, - "enable_internal_collaborator": False, - "team_project_roles": set(), - "internal_role_form": None, - } + assert isinstance(result, HTTPSeeOther) def test_cannot_reinvite_role(self, db_request): project = ProjectFactory.create(name="foobar") @@ -7592,13 +7527,9 @@ def test_cannot_reinvite_role(self, db_request): EmailFactory.create(user=new_user, verified=True, primary=True) owner_1 = UserFactory.create(username="owner_1") owner_2 = UserFactory.create(username="owner_2") - owner_1_role = RoleFactory.create( - user=owner_1, project=project, role_name="Owner" - ) - owner_2_role = RoleFactory.create( - user=owner_2, project=project, role_name="Owner" - ) - new_user_invitation = RoleInvitationFactory.create( + RoleFactory.create(user=owner_1, project=project, role_name="Owner") + RoleFactory.create(user=owner_2, project=project, role_name="Owner") + RoleInvitationFactory.create( user=new_user, project=project, invite_status="pending" ) @@ -7650,15 +7581,7 @@ def test_cannot_reinvite_role(self, db_request): ) ] - assert result == { - "project": project, - "roles": {owner_1_role, owner_2_role}, - "invitations": {new_user_invitation}, - "form": form_obj, - "enable_internal_collaborator": False, - "team_project_roles": set(), - "internal_role_form": None, - } + assert isinstance(result, HTTPSeeOther) class TestRevokeRoleInvitation: @@ -8749,6 +8672,7 @@ def test_add_github_oidc_provider_preexisting(self, monkeypatch): add=pretend.call_recorder(lambda o: None), ), remote_addr="0.0.0.0", + path="request-path", ) github_provider_form_obj = pretend.stub( @@ -8770,11 +8694,7 @@ def test_add_github_oidc_provider_preexisting(self, monkeypatch): view, "_check_ratelimits", pretend.call_recorder(lambda: None) ) - assert view.add_github_oidc_provider() == { - "oidc_enabled": True, - "project": project, - "github_provider_form": github_provider_form_obj, - } + assert isinstance(view.add_github_oidc_provider(), HTTPSeeOther) assert view.metrics.increment.calls == [ pretend.call( "warehouse.oidc.add_provider.attempt", tags=["provider:GitHub"] @@ -8833,6 +8753,7 @@ def test_add_github_oidc_provider_created(self, monkeypatch): add=pretend.call_recorder(lambda o: setattr(o, "id", "fakeid")), ), remote_addr="0.0.0.0", + path="request-path", ) github_provider_form_obj = pretend.stub( @@ -8860,11 +8781,7 @@ def test_add_github_oidc_provider_created(self, monkeypatch): view, "_check_ratelimits", pretend.call_recorder(lambda: None) ) - assert view.add_github_oidc_provider() == { - "oidc_enabled": True, - "project": project, - "github_provider_form": github_provider_form_obj, - } + assert isinstance(view.add_github_oidc_provider(), HTTPSeeOther) assert view.metrics.increment.calls == [ pretend.call( "warehouse.oidc.add_provider.attempt", tags=["provider:GitHub"] @@ -9131,6 +9048,7 @@ def test_delete_oidc_provider(self, monkeypatch): query=lambda *a: pretend.stub(get=lambda id: provider), ), remote_addr="0.0.0.0", + path="request-path", ) delete_provider_form_obj = pretend.stub( @@ -9153,7 +9071,7 @@ def test_delete_oidc_provider(self, monkeypatch): views.ManageOIDCProviderViews, "default_response", default_response ) - assert view.delete_oidc_provider() == default_response + assert isinstance(view.delete_oidc_provider(), HTTPSeeOther) assert provider not in project.oidc_providers assert view.metrics.increment.calls == [
Confirmation pops up when page is refreshed After a organization account request has been submitted, if the page is refreshed, a pop up appears asking if it should resend the information. To recreate the issue- 1. Click on 'Ypur organizations 2. Enter details to create new organization 3. Click on Create 4. Refresh the page ![project2](https://user-images.githubusercontent.com/37237726/177825505-9612810b-2e79-469c-8fbd-13cdecbfb86b.png)
Priority 3/3 Response from these views should be redirect and not the form again This also occurs with other forms in PyPI. When addressing this bug, we should search through `POST` requests in the code and make sure we are following the [POST-redirect-GET](https://en.wikipedia.org/wiki/Post/Redirect/Get) pattern everywhere.
2022-08-05T06:30:25Z
[]
[]
pypi/warehouse
12,139
pypi__warehouse-12139
[ "12138" ]
604b675e9c554ce7da6d4af8f4019a79282ed864
diff --git a/warehouse/views.py b/warehouse/views.py --- a/warehouse/views.py +++ b/warehouse/views.py @@ -485,6 +485,7 @@ def eligible_projects(self): return set( project.name for project in user_projects(self.request)["projects_requiring_2fa"] + if project.pypi_mandates_2fa ) else: return set()
diff --git a/tests/unit/test_views.py b/tests/unit/test_views.py --- a/tests/unit/test_views.py +++ b/tests/unit/test_views.py @@ -594,11 +594,16 @@ def test_eligible_projects_no_user(self, db_request): def test_eligible_projects_owners_require_2fa(self, db_request): db_request.user = UserFactory.create() + ProjectFactory.create() - project = ProjectFactory.create(owners_require_2fa=True) - RoleFactory.create(user=db_request.user, project=project) + ProjectFactory.create(owners_require_2fa=True) + p1 = ProjectFactory.create(pypi_mandates_2fa=True) + p2 = ProjectFactory.create(owners_require_2fa=True, pypi_mandates_2fa=True) - assert SecurityKeyGiveaway(db_request).eligible_projects == {project.name} + RoleFactory.create(user=db_request.user, project=p1) + RoleFactory.create(user=db_request.user, project=p2) + + assert SecurityKeyGiveaway(db_request).eligible_projects == {p1.name, p2.name} def test_eligible_projects_pypi_mandates_2fa(self, db_request): db_request.user = UserFactory.create()
2FA giveaway lists critical + 2FA required projects, should only list critical <!-- NOTE: This issue should be for problems with PyPI itself, including: * pypi.org * test.pypi.org * files.pythonhosted.org This issue should NOT be for a project installed from PyPI. If you are having an issue with a specific package, you should reach out to the maintainers of that project directly instead. Furthermore, this issue should NOT be for any non-PyPI properties (like python.org, docs.python.org, etc.) If your problem is related to search (a new or updated project doesn't appear in the PyPI search results), please wait for a couple of hours and check again before reporting it. The search index may take some time to be updated. --> **Describe the bug** <!-- A clear and concise description the bug --> The 2FA key giveaway eligibility says: "Congrats, you may be eligible to receive a promo code! The following projects you maintain are considered critical projects:" But not all of these 15 are critical: <img width="903" alt="image" src="https://user-images.githubusercontent.com/1324225/186908565-479f41ab-6b23-4506-8469-f164fd49b96e.png"> **Expected behavior** <!-- A clear and concise description of what you expected to happen --> Only list the critical projects. At https://pypi.org/manage/projects/ only these 4 are listed as critical: prettytable, humanize, ujson, Pillow. **To Reproduce** <!-- Steps to reproduce the bug, or a link to PyPI where the bug is visible --> 1. Visit https://pypi.org/security-key-giveaway/ 2. Click "Log in to determine if you are eligible" and log in **My Platform** <!-- Any details about your specific platform: * If the problem is in the browser, what browser, version, and OS? * If the problem is with a command-line tool, what version of that tool? * If the problem is with connecting to PyPI, include some details about your network, including SSL/TLS implementation in use, internet service provider, and if there are any firewalls or proxies in use. --> n/a **Additional context** <!-- Add any other context, links, etc. about the feature here. --> I have 18 projects at https://pypi.org/manage/projects/ * 4 are critical * 11 I've also set as 2FA required * looks like these 4+11 are being listed as critical on the promo page, but it should just be the 4
Thanks, looks like this line is incorrect: https://github.com/pypi/warehouse/blob/f2d6fc455a135b3b3ba72539d5c9f4c4f0d48223/warehouse/views.py#L487 It's taking into account all projects that require 2FA, not just the critical ones. We'll probably want to additionally filter this comprehension on `project.pypi_mandates_2fa`: https://github.com/pypi/warehouse/blob/604b675e9c554ce7da6d4af8f4019a79282ed864/warehouse/packaging/models.py#L157
2022-08-26T18:59:39Z
[]
[]
pypi/warehouse
12,149
pypi__warehouse-12149
[ "12009" ]
f21a45ccdfa51b62399f3c1952233ab2332ca1e9
diff --git a/warehouse/csp.py b/warehouse/csp.py --- a/warehouse/csp.py +++ b/warehouse/csp.py @@ -115,6 +115,9 @@ def includeme(config): "https://cdn.jsdelivr.net/npm/[email protected]/", # Hash for v3.2.2 of MathJax tex-svg.js "'sha256-1CldwzdEg2k1wTmf7s5RWVd7NMXI/7nxxjJM2C4DqII='", + # Hash for MathJax inline config + # See warehouse/templates/packaging/detail.html + "'sha256-0POaN8stWYQxhzjKS+/eOfbbJ/u4YHO5ZagJvLpMypo='", ], "style-src": [ SELF,
diff --git a/tests/unit/test_csp.py b/tests/unit/test_csp.py --- a/tests/unit/test_csp.py +++ b/tests/unit/test_csp.py @@ -229,6 +229,7 @@ def test_includeme(): "'sha256-U3hKDidudIaxBDEzwGJApJgPEf2mWk6cfMWghrAa6i0='", "https://cdn.jsdelivr.net/npm/[email protected]/", "'sha256-1CldwzdEg2k1wTmf7s5RWVd7NMXI/7nxxjJM2C4DqII='", + "'sha256-0POaN8stWYQxhzjKS+/eOfbbJ/u4YHO5ZagJvLpMypo='", ], "style-src": [ "'self'",
Support for mathematical expressions in Markdown **What's the problem this feature will solve?** GitHub has [started supporting mathematical expressions in Markdown](https://github.blog/2022-05-19-math-support-in-markdown/). It would be nice to have the same possibility in PyPi, since oftentimes the `README.md` on GitHub is used as `long_description` in `setup.py`. <!-- A clear and concise description of what the problem is. --> **Describe the solution you'd like** Implement the same syntax used by GitHub, described [here](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/writing-mathematical-expressions). <!-- A clear and concise description of what you want to happen. --> **Additional context** Example PyPi package for which math expressions are not rendered: https://pypi.org/project/sourcespec/ (section: "Theoretical background"). The original `README.md` on GitHub: https://github.com/SeismicSource/sourcespec/blob/master/README.md <!-- Add any other context, links, etc. about the feature here. -->
Related: https://github.com/pypa/readme_renderer/issues/214 This is blocked on https://github.com/pypa/readme_renderer/issues/214 (but this does work for reStructuredText now since https://github.com/pypi/warehouse/pull/12062) 🥳 I'm very excited to report that the work done to enable MathJax on the frontend now automatically detects the `$$` delimiters that have been added in the Markdown, so no changes to either readme_renderer or warehouse code are necessary! Using the [provided project](https://pypi.org/project/sourcespec/), we can now see that the previously submitted content renders as Math (that I don't fully understand, but who cares?! 😆 ): <img width="836" alt="Screen Shot 2022-08-26 at 18 12 00" src="https://user-images.githubusercontent.com/529516/186997238-fb4172ed-cbf3-45f4-812c-810f2577f100.png"> > Note in particular that the $...$ in-line delimiters are not used by default. That is because dollar signs appear too often in non-mathematical settings, which could cause some text to be treated as mathematics unexpectedly. For example, with single-dollar delimiters, “… the cost is $2.50 for the first one, and $2.00 for each additional one …” would cause the phrase “2.50 for the first one, and” to be treated as mathematics since it falls between dollar signs. We could add them as configuration, if we want: https://docs.mathjax.org/en/latest/input/tex/delimiters.html#tex-delimiters but should consider the potential downsides of a sentence like: ``` To split $100 in half, we calculate $100/2$ ``` [GitHub docs](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/writing-mathematical-expressions#writing-dollar-signs-in-line-with-and-within-mathematical-expressions) recommend using html to split and make it work like so: ```html To split <span>$</span>100 in half, we calculate $100/2$ ``` We'd have to ask ourselves what's the likelihood of an equation being inline with an expected dollar sign. Wow, that's great! Thank you! For the inline dollar sign problem, I think that the benefits of having inline LaTeX outweigh the corner case of a dollar sign in the same sentence. I would say that people who chose to use LaTeX math in their README.md will probably detect this problem when rendering locally or on GitHub/GitLab their markdown file, which will push them to try and find a solution on their own. Moreover, it would be nice, from my point of view, if we match the GitHub syntax, and thus allow inline LaTeX. Great work, anyway! Thanks again!
2022-08-30T00:14:49Z
[]
[]
pypi/warehouse
12,197
pypi__warehouse-12197
[ "11647" ]
88b31b59c98713e99332ae4bdd9be4221f67c0b7
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1227,6 +1227,7 @@ def __init__(self, request): def default_response(self): all_user_organizations = user_organizations(self.request) + # Get list of invites as (organization, token) tuples. organization_invites = ( self.organization_service.get_organization_invites_by_user( self.request.user.id @@ -1237,11 +1238,19 @@ def default_response(self): for organization_invite in organization_invites ] + # Get list of organizations that are approved (True) or pending (None). + organizations = self.organization_service.get_organizations_by_user( + self.request.user.id + ) + organizations = [ + organization + for organization in organizations + if organization.is_approved is not False + ] + return { "organization_invites": organization_invites, - "organizations": self.organization_service.get_organizations_by_user( - self.request.user.id - ), + "organizations": organizations, "organizations_managed": list( organization.name for organization in all_user_organizations["organizations_managed"]
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -2337,7 +2337,7 @@ def test_default_response(self, monkeypatch): ) monkeypatch.setattr(views, "CreateOrganizationForm", create_organization_cls) - organization = pretend.stub(name=pretend.stub()) + organization = pretend.stub(name=pretend.stub(), is_approved=None) user_organizations = pretend.call_recorder( lambda *a, **kw: {
Restrict the number of pending organization requests per user When creating an organization, the create page should disappear once the request has been submitted. It should be replaced with a message saying that the user should wait for the admin and for how many days. We should restrict the number of pending requests per user at any given time. To replicate the issue- 1. Log in 2. Click on 'Your Organizations' 3. Create an organization and submit the request for admin approval It is possible to create another new account. ![org7](https://user-images.githubusercontent.com/37237726/175110467-6203eb55-d583-4300-906f-63557e768a2a.png)
Priority 3/3 Limit to a configurable number
2022-09-07T22:18:29Z
[]
[]
pypi/warehouse
12,200
pypi__warehouse-12200
[ "12050" ]
e70b2b39c61f24e60a59cf86377d2b3e5568bed6
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1386,6 +1386,9 @@ def default_response(self): orgtype=self.organization.orgtype, organization_service=self.organization_service, ), + "save_organization_name_form": SaveOrganizationNameForm( + organization_service=self.organization_service, + ), "active_projects": self.active_projects, } @@ -1471,13 +1474,10 @@ def save_organization_name(self): "manage.organization.settings", organization_name=self.organization.normalized_name, ) + + "#modal-close" ) - else: - for error_list in form.errors.values(): - for error in error_list: - self.request.session.flash(error, queue="error") - return self.default_response + return {**self.default_response, "save_organization_name_form": form} @view_config(request_method="POST", request_param=["confirm_organization_name"]) def delete_organization(self):
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -2654,6 +2654,14 @@ def test_manage_organization( ) monkeypatch.setattr(views, "SaveOrganizationForm", save_organization_cls) + save_organization_name_obj = pretend.stub() + save_organization_name_cls = pretend.call_recorder( + lambda *a, **kw: save_organization_name_obj + ) + monkeypatch.setattr( + views, "SaveOrganizationNameForm", save_organization_name_cls + ) + view = views.ManageOrganizationSettingsViews(organization, db_request) result = view.manage_organization() @@ -2662,6 +2670,7 @@ def test_manage_organization( assert result == { "organization": organization, "save_organization_form": save_organization_obj, + "save_organization_name_form": save_organization_name_obj, "active_projects": view.active_projects, } assert save_organization_cls.calls == [ @@ -2740,6 +2749,14 @@ def test_save_organization_validation_fails( ) monkeypatch.setattr(views, "SaveOrganizationForm", save_organization_cls) + save_organization_name_obj = pretend.stub() + save_organization_name_cls = pretend.call_recorder( + lambda *a, **kw: save_organization_name_obj + ) + monkeypatch.setattr( + views, "SaveOrganizationNameForm", save_organization_name_cls + ) + view = views.ManageOrganizationSettingsViews(organization, db_request) result = view.save_organization() @@ -2819,9 +2836,8 @@ def rename_organization(organization_id, organization_name): result = view.save_organization_name() assert isinstance(result, HTTPSeeOther) - assert ( - result.headers["Location"] - == f"/manage/organization/{organization.normalized_name}/settings/" + assert result.headers["Location"] == ( + f"/manage/organization/{organization.normalized_name}/settings/#modal-close" ) assert organization_service.rename_organization.calls == [ pretend.call(organization.id, "new-name") @@ -2878,7 +2894,10 @@ def rename_organization(organization_id, organization_name): view = views.ManageOrganizationSettingsViews(organization, db_request) result = view.save_organization_name() - assert result == view.default_response + assert result == { + **view.default_response, + "save_organization_name_form": save_organization_name_obj, + } assert organization_service.rename_organization.calls == [] def test_save_organization_name_disable_organizations(self, db_request): @@ -2969,6 +2988,14 @@ def test_delete_organization_with_active_projects( ) monkeypatch.setattr(views, "SaveOrganizationForm", save_organization_cls) + save_organization_name_obj = pretend.stub() + save_organization_name_cls = pretend.call_recorder( + lambda *a, **kw: save_organization_name_obj + ) + monkeypatch.setattr( + views, "SaveOrganizationNameForm", save_organization_name_cls + ) + monkeypatch.setattr( organization_service, "delete_organization",
Show an error when a pre-existing org account name is chosen When changing the org account name, choosing a name that already exists should fail rather than showing a banner. To recreate the issue- - Click on 'Your organizations' - Select an organization - Click on 'Settings' - Click on 'Change organization account name; - Enter a name for an org that already exists The confirmation box disappears and a banner appears ![test10](https://user-images.githubusercontent.com/37237726/183532975-688f1c52-1dbb-45d8-bc56-ad71d5adc6cb.png)
2022-09-08T21:58:43Z
[]
[]
pypi/warehouse
12,208
pypi__warehouse-12208
[ "12048" ]
b0096ca484d326da1db3663a87525c526a407d27
diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py --- a/warehouse/manage/forms.py +++ b/warehouse/manage/forms.py @@ -425,7 +425,13 @@ class OrganizationNameMixin: ) def validate_name(self, field): - if self.organization_service.find_organizationid(field.data) is not None: + # Find organization by name. + organization_id = self.organization_service.find_organizationid(field.data) + + # Name is valid if one of the following is true: + # - There is no name conflict with any organization. + # - The name conflict is with the current organization. + if organization_id is not None and organization_id != self.organization_id: raise wtforms.validators.ValidationError( _( "This organization account name has already been used. " @@ -539,9 +545,10 @@ class SaveOrganizationNameForm(OrganizationNameMixin, forms.Form): __params__ = ["name"] - def __init__(self, *args, organization_service, **kwargs): + def __init__(self, *args, organization_service, organization_id=None, **kwargs): super().__init__(*args, **kwargs) self.organization_service = organization_service + self.organization_id = organization_id class SaveOrganizationForm(forms.Form): @@ -640,13 +647,24 @@ class SaveTeamForm(forms.Form): ] ) - def __init__(self, *args, organization_id, organization_service, **kwargs): + def __init__( + self, *args, organization_service, organization_id, team_id=None, **kwargs + ): super().__init__(*args, **kwargs) + self.team_id = team_id self.organization_id = organization_id self.organization_service = organization_service def validate_name(self, field): - if self.organization_service.find_teamid(self.organization_id, field.data): + # Find team by name. + team_id = self.organization_service.find_teamid( + self.organization_id, field.data + ) + + # Name is valid if one of the following is true: + # - There is no name conflict with any team. + # - The name conflict is with the current team. + if team_id is not None and team_id != self.team_id: raise wtforms.validators.ValidationError( _( "This team name has already been used. " diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1446,6 +1446,7 @@ def save_organization_name(self): form = SaveOrganizationNameForm( self.request.POST, organization_service=self.organization_service, + organization_id=self.organization.id, ) if form.validate(): @@ -1677,8 +1678,8 @@ def default_response(self): "organization": self.organization, "create_team_form": CreateTeamForm( self.request.POST, - organization_id=self.organization.id, organization_service=self.organization_service, + organization_id=self.organization.id, ), } @@ -2316,8 +2317,9 @@ def default_response(self): "team": self.team, "save_team_form": SaveTeamForm( name=self.team.name, - organization_id=self.team.organization_id, organization_service=self.organization_service, + organization_id=self.team.organization_id, + team_id=self.team.id, ), } @@ -2329,8 +2331,9 @@ def manage_team(self): def save_team(self): form = SaveTeamForm( self.request.POST, - organization_id=self.team.organization_id, organization_service=self.organization_service, + organization_id=self.team.organization_id, + team_id=self.team.id, ) if form.validate(): diff --git a/warehouse/utils/organization.py b/warehouse/utils/organization.py --- a/warehouse/utils/organization.py +++ b/warehouse/utils/organization.py @@ -10,7 +10,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from packaging.utils import canonicalize_name from pyramid.httpexceptions import HTTPSeeOther @@ -21,23 +20,27 @@ def confirm_organization( field_name="confirm_organization_name", error_message="Could not delete organization", ): - confirm = request.POST.get(field_name) - organization_name = organization.normalized_name + confirm = request.POST.get(field_name, "").strip() if not confirm: request.session.flash("Confirm the request", queue="error") raise HTTPSeeOther( - request.route_path(fail_route, organization_name=organization_name) + request.route_path( + fail_route, + organization_name=organization.normalized_name, + ) ) - if canonicalize_name(confirm) != organization.normalized_name: + + organization_name = organization.name.strip() + if confirm != organization_name: request.session.flash( - ( - f"{error_message} - " - f"{confirm!r} is not the same as {organization.normalized_name!r}" - ), + f"{error_message} - {confirm!r} is not the same as {organization_name!r}", queue="error", ) raise HTTPSeeOther( - request.route_path(fail_route, organization_name=organization_name) + request.route_path( + fail_route, + organization_name=organization.normalized_name, + ) ) @@ -48,23 +51,27 @@ def confirm_team( field_name="confirm_team_name", error_message="Could not delete team", ): - confirm = request.POST.get(field_name) - organization_name = team.organization.normalized_name - team_name = team.normalized_name + confirm = request.POST.get(field_name, "").strip() if not confirm: request.session.flash("Confirm the request", queue="error") raise HTTPSeeOther( request.route_path( - fail_route, organization_name=organization_name, team_name=team_name + fail_route, + organization_name=team.organization.normalized_name, + team_name=team.normalized_name, ) ) - if confirm.strip() != team.name.strip(): + + team_name = team.name.strip() + if confirm != team_name: request.session.flash( - (f"{error_message} - " f"{confirm!r} is not the same as {team.name!r}"), + f"{error_message} - {confirm!r} is not the same as {team_name!r}", queue="error", ) raise HTTPSeeOther( request.route_path( - fail_route, organization_name=organization_name, team_name=team_name + fail_route, + organization_name=team.organization.normalized_name, + team_name=team.normalized_name, ) ) diff --git a/warehouse/utils/project.py b/warehouse/utils/project.py --- a/warehouse/utils/project.py +++ b/warehouse/utils/project.py @@ -180,18 +180,28 @@ def confirm_project( field_name="confirm_project_name", error_message="Could not delete project", ): - confirm = request.POST.get(field_name) - project_name = project.normalized_name + confirm = request.POST.get(field_name, "").strip() if not confirm: request.session.flash("Confirm the request", queue="error") - raise HTTPSeeOther(request.route_path(fail_route, project_name=project_name)) - if canonicalize_name(confirm) != project.normalized_name: + raise HTTPSeeOther( + request.route_path( + fail_route, + project_name=project.normalized_name, + ) + ) + + project_name = project.name.strip() + if confirm != project_name: request.session.flash( - f"{error_message} - " - + f"{confirm!r} is not the same as {project.normalized_name!r}", + f"{error_message} - {confirm!r} is not the same as {project_name!r}", queue="error", ) - raise HTTPSeeOther(request.route_path(fail_route, project_name=project_name)) + raise HTTPSeeOther( + request.route_path( + fail_route, + project_name=project.normalized_name, + ) + ) def remove_project(project, request, flash=True):
diff --git a/tests/unit/admin/views/test_projects.py b/tests/unit/admin/views/test_projects.py --- a/tests/unit/admin/views/test_projects.py +++ b/tests/unit/admin/views/test_projects.py @@ -440,7 +440,7 @@ def test_sets_limit_with_less_than_minimum(self, db_request): class TestDeleteProject: def test_no_confirm(self): - project = pretend.stub(normalized_name="foo") + project = pretend.stub(name="foo", normalized_name="foo") request = pretend.stub( POST={}, session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), @@ -457,7 +457,7 @@ def test_no_confirm(self): ] def test_wrong_confirm(self): - project = pretend.stub(normalized_name="foo") + project = pretend.stub(name="foo", normalized_name="foo") request = pretend.stub( POST={"confirm_project_name": "bar"}, session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), @@ -485,7 +485,7 @@ def test_deletes_project(self, db_request): db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) - db_request.POST["confirm_project_name"] = project.normalized_name + db_request.POST["confirm_project_name"] = project.name db_request.user = UserFactory.create() views.delete_project(project, db_request) diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -2923,10 +2923,10 @@ def test_save_organization_name( enable_organizations, monkeypatch, ): - organization = OrganizationFactory.create(name="old-name") + organization = OrganizationFactory.create(name="foobar") db_request.POST = { "confirm_current_organization_name": organization.name, - "name": "new-name", + "name": "FooBar", } db_request.route_path = pretend.call_recorder( lambda *a, organization_name, **kw: ( @@ -2981,30 +2981,57 @@ def rename_organization(organization_id, organization_name): f"/manage/organization/{organization.normalized_name}/settings/#modal-close" ) assert organization_service.rename_organization.calls == [ - pretend.call(organization.id, "new-name") + pretend.call(organization.id, "FooBar") ] assert send_email.calls == [ pretend.call( db_request, admins, - organization_name="new-name", - previous_organization_name="old-name", + organization_name="FooBar", + previous_organization_name="foobar", ), pretend.call( db_request, {pyramid_user}, - organization_name="new-name", - previous_organization_name="old-name", + organization_name="FooBar", + previous_organization_name="foobar", ), ] + def test_save_organization_name_wrong_confirm( + self, db_request, organization_service, enable_organizations, monkeypatch + ): + organization = OrganizationFactory.create(name="foobar") + db_request.POST = { + "confirm_current_organization_name": organization.name.upper(), + "name": "FooBar", + } + db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect") + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None) + ) + + view = views.ManageOrganizationSettingsViews(organization, db_request) + with pytest.raises(HTTPSeeOther): + view.save_organization_name() + + assert db_request.session.flash.calls == [ + pretend.call( + ( + "Could not rename organization - " + "'FOOBAR' is not the same as 'foobar'" + ), + queue="error", + ) + ] + def test_save_organization_name_validation_fails( self, db_request, organization_service, enable_organizations, monkeypatch ): - organization = OrganizationFactory.create(name="old-name") + organization = OrganizationFactory.create(name="foobar") db_request.POST = { "confirm_current_organization_name": organization.name, - "name": "new-name", + "name": "FooBar", } def rename_organization(organization_id, organization_name): @@ -4871,7 +4898,7 @@ def test_manage_team( def test_save_team(self, db_request, organization_service, enable_organizations): team = TeamFactory.create(name="Team Name") - db_request.POST = MultiDict({"name": "New Team Name"}) + db_request.POST = MultiDict({"name": "Team name"}) db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foo/bar/") view = views.ManageTeamSettingsViews(team, db_request) @@ -4879,13 +4906,22 @@ def test_save_team(self, db_request, organization_service, enable_organizations) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/foo/bar/" - assert team.name == "New Team Name" + assert team.name == "Team name" def test_save_team_validation_fails( self, db_request, organization_service, enable_organizations ): - team = TeamFactory.create(name="Team Name") - db_request.POST = MultiDict({"name": "Team Name"}) + organization = OrganizationFactory.create() + team = TeamFactory.create( + name="Team Name", + organization=organization, + ) + TeamFactory.create( + name="Existing Team Name", + organization=organization, + ) + + db_request.POST = MultiDict({"name": "Existing Team Name"}) view = views.ManageTeamSettingsViews(team, db_request) result = view.save_team() @@ -4897,7 +4933,7 @@ def test_save_team_validation_fails( } assert team.name == "Team Name" assert form.name.errors == [ - ("This team name has already been used. " "Choose a different team name.") + "This team name has already been used. Choose a different team name." ] def test_delete_team( @@ -4964,7 +5000,7 @@ def test_delete_team_wrong_confirm( monkeypatch, ): team = TeamFactory.create(name="Team Name") - db_request.POST = MultiDict({"confirm_team_name": "Wrong Team Name"}) + db_request.POST = MultiDict({"confirm_team_name": "TEAM NAME"}) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) @@ -4978,7 +5014,7 @@ def test_delete_team_wrong_confirm( pretend.call( ( "Could not delete team - " - "'Wrong Team Name' is not the same as 'Team Name'" + "'TEAM NAME' is not the same as 'Team Name'" ), queue="error", ) @@ -5697,6 +5733,7 @@ def test_toggle_2fa_requirement_non_critical( def test_remove_organization_project_no_confirm(self): user = pretend.stub() project = pretend.stub( + name="foo", normalized_name="foo", organization=pretend.stub(owners=[user]), owners=[user], @@ -5724,12 +5761,13 @@ def test_remove_organization_project_no_confirm(self): def test_remove_organization_project_wrong_confirm(self): user = pretend.stub() project = pretend.stub( + name="foo", normalized_name="foo", organization=pretend.stub(owners=[user]), owners=[user], ) request = pretend.stub( - POST={"confirm_remove_organization_project_name": "bar"}, + POST={"confirm_remove_organization_project_name": "FOO"}, user=user, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), @@ -5748,7 +5786,7 @@ def test_remove_organization_project_wrong_confirm(self): pretend.call( ( "Could not remove project from organization - " - "'bar' is not the same as 'foo'" + "'FOO' is not the same as 'foo'" ), queue="error", ) @@ -5783,7 +5821,7 @@ def test_remove_organization_project_no_current_organization( db_request.POST = MultiDict( { - "confirm_remove_organization_project_name": project.normalized_name, + "confirm_remove_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -5857,7 +5895,7 @@ def test_remove_organization_project_no_individual_owner( db_request.POST = MultiDict( { - "confirm_remove_organization_project_name": project.normalized_name, + "confirm_remove_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -5899,7 +5937,7 @@ def test_remove_organization_project(self, monkeypatch, db_request): db_request.POST = MultiDict( { - "confirm_remove_organization_project_name": project.normalized_name, + "confirm_remove_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -5945,6 +5983,7 @@ def test_remove_organization_project(self, monkeypatch, db_request): def test_transfer_organization_project_no_confirm(self): user = pretend.stub() project = pretend.stub( + name="foo", normalized_name="foo", organization=pretend.stub(owners=[user]), ) @@ -5971,11 +6010,12 @@ def test_transfer_organization_project_no_confirm(self): def test_transfer_organization_project_wrong_confirm(self): user = pretend.stub() project = pretend.stub( + name="foo", normalized_name="foo", organization=pretend.stub(owners=[user]), ) request = pretend.stub( - POST={"confirm_transfer_organization_project_name": "bar"}, + POST={"confirm_transfer_organization_project_name": "FOO"}, user=user, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), @@ -5992,7 +6032,7 @@ def test_transfer_organization_project_wrong_confirm(self): ] assert request.session.flash.calls == [ pretend.call( - "Could not transfer project - 'bar' is not the same as 'foo'", + "Could not transfer project - 'FOO' is not the same as 'foo'", queue="error", ) ] @@ -6030,7 +6070,7 @@ def test_transfer_organization_project_no_current_organization( db_request.POST = MultiDict( { "organization": organization.normalized_name, - "confirm_transfer_organization_project_name": project.normalized_name, + "confirm_transfer_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -6127,7 +6167,7 @@ def test_transfer_organization_project_no_individual_owner( db_request.POST = MultiDict( { "organization": organization.normalized_name, - "confirm_transfer_organization_project_name": project.normalized_name, + "confirm_transfer_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -6198,7 +6238,7 @@ def test_transfer_organization_project_invalid(self, monkeypatch, db_request): db_request.POST = MultiDict( { "organization": "", - "confirm_transfer_organization_project_name": project.normalized_name, + "confirm_transfer_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -6234,7 +6274,7 @@ def test_transfer_organization_project(self, monkeypatch, db_request): db_request.POST = MultiDict( { "organization": organization.normalized_name, - "confirm_transfer_organization_project_name": project.normalized_name, + "confirm_transfer_organization_project_name": project.name, } ) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False)) @@ -6319,9 +6359,9 @@ def test_delete_project_no_confirm(self): ] def test_delete_project_wrong_confirm(self): - project = pretend.stub(normalized_name="foo") + project = pretend.stub(name="foo", normalized_name="foo") request = pretend.stub( - POST={"confirm_project_name": "bar"}, + POST={"confirm_project_name": "FOO"}, flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)), session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), route_path=lambda *a, **kw: "/foo/bar/", @@ -6337,7 +6377,7 @@ def test_delete_project_wrong_confirm(self): ] assert request.session.flash.calls == [ pretend.call( - "Could not delete project - 'bar' is not the same as 'foo'", + "Could not delete project - 'FOO' is not the same as 'foo'", queue="error", ) ] @@ -6401,7 +6441,7 @@ def test_delete_project(self, monkeypatch, db_request): db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) - db_request.POST["confirm_project_name"] = project.normalized_name + db_request.POST["confirm_project_name"] = project.name db_request.user = UserFactory.create() RoleFactory.create(project=project, user=db_request.user, role_name="Owner") @@ -6470,9 +6510,9 @@ def test_destroy_project_docs_no_confirm(self): ] def test_destroy_project_docs_wrong_confirm(self): - project = pretend.stub(normalized_name="foo") + project = pretend.stub(name="foo", normalized_name="foo") request = pretend.stub( - POST={"confirm_project_name": "bar"}, + POST={"confirm_project_name": "FOO"}, session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)), route_path=lambda *a, **kw: "/foo/bar/", ) @@ -6484,7 +6524,7 @@ def test_destroy_project_docs_wrong_confirm(self): assert request.session.flash.calls == [ pretend.call( - "Could not delete project - 'bar' is not the same as 'foo'", + "Could not delete project - 'FOO' is not the same as 'foo'", queue="error", ) ] @@ -6500,7 +6540,7 @@ def test_destroy_project_docs(self, db_request): db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) - db_request.POST["confirm_project_name"] = project.normalized_name + db_request.POST["confirm_project_name"] = project.name db_request.user = UserFactory.create() db_request.task = task @@ -7226,7 +7266,10 @@ def test_delete_project_release_file(self, monkeypatch, db_request): ] def test_delete_project_release_file_no_confirm(self): - release = pretend.stub(version="1.2.3", project=pretend.stub(name="foobar")) + release = pretend.stub( + version="1.2.3", + project=pretend.stub(name="foobar", normalized_name="foobar"), + ) request = pretend.stub( POST={"confirm_project_name": ""}, method="POST", diff --git a/tests/unit/utils/test_organization.py b/tests/unit/utils/test_organization.py --- a/tests/unit/utils/test_organization.py +++ b/tests/unit/utils/test_organization.py @@ -19,7 +19,7 @@ def test_confirm(): - organization = stub(normalized_name="foobar") + organization = stub(name="foobar", normalized_name="foobar") request = stub( POST={"confirm_organization_name": "foobar"}, route_path=call_recorder(lambda *a, **kw: stub()), @@ -33,7 +33,7 @@ def test_confirm(): def test_confirm_no_input(): - organization = stub(normalized_name="foobar") + organization = stub(name="foobar", normalized_name="foobar") request = stub( POST={"confirm_organization_name": ""}, route_path=call_recorder(lambda *a, **kw: "/the-redirect"), @@ -49,7 +49,7 @@ def test_confirm_no_input(): def test_confirm_incorrect_input(): - organization = stub(normalized_name="foobar") + organization = stub(name="foobar", normalized_name="foobar") request = stub( POST={"confirm_organization_name": "bizbaz"}, route_path=call_recorder(lambda *a, **kw: "/the-redirect"), diff --git a/tests/unit/utils/test_project.py b/tests/unit/utils/test_project.py --- a/tests/unit/utils/test_project.py +++ b/tests/unit/utils/test_project.py @@ -42,7 +42,7 @@ def test_confirm(): - project = stub(normalized_name="foobar") + project = stub(name="foobar", normalized_name="foobar") request = stub( POST={"confirm_project_name": "foobar"}, route_path=call_recorder(lambda *a, **kw: stub()), @@ -56,7 +56,7 @@ def test_confirm(): def test_confirm_no_input(): - project = stub(normalized_name="foobar") + project = stub(name="foobar", normalized_name="foobar") request = stub( POST={"confirm_project_name": ""}, route_path=call_recorder(lambda *a, **kw: "/the-redirect"), @@ -72,7 +72,7 @@ def test_confirm_no_input(): def test_confirm_incorrect_input(): - project = stub(normalized_name="foobar") + project = stub(name="foobar", normalized_name="foobar") request = stub( POST={"confirm_project_name": "bizbaz"}, route_path=call_recorder(lambda *a, **kw: "/the-redirect"),
No case sensitivity check when changing an organization account name When changing the organization account name, the confirmation box 1. Does not show the current name of the org account name. Similar features show the current name so that it is easy to fill in 2. The current org account name check is not case sensitive ![test8](https://user-images.githubusercontent.com/37237726/183529994-3a445554-d54d-43d0-adb0-b2fb45716b24.png)
2022-09-12T17:58:41Z
[]
[]
pypi/warehouse
12,239
pypi__warehouse-12239
[ "12046" ]
bbfe1df561312c73149987d62cf5485ce174970d
diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py --- a/warehouse/manage/forms.py +++ b/warehouse/manage/forms.py @@ -49,13 +49,9 @@ class TeamProjectRoleNameMixin: team_project_role_name = wtforms.SelectField( "Select permissions", - choices=[ - ("", "Select permissions"), - ("Upload", "Upload"), - ("Administer", "Administer"), - ], + choices=[("", "Select role"), ("Maintainer", "Maintainer"), ("Owner", "Owner")], coerce=lambda string: TeamProjectRoleType(string) if string else None, - validators=[wtforms.validators.DataRequired(message="Select permissions")], + validators=[wtforms.validators.DataRequired(message="Select role")], ) diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -227,7 +227,7 @@ def user_projects(request): request.db.query(Project.id.label("id")) .join(TeamProjectRole.project) .join(teams, TeamProjectRole.team_id == teams.c.id) - .filter(TeamProjectRole.role_name == TeamProjectRoleType.Administer), + .filter(TeamProjectRole.role_name == TeamProjectRoleType.Owner), ) with_sole_owner = with_sole_owner.union( @@ -3927,10 +3927,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): if existing_role: request.session.flash( request._( - ( - "Team '${team_name}' already has " - "${role_name} permissions for project" - ), + "Team '${team_name}' already has ${role_name} role for project", mapping={ "team_name": team_name, "role_name": existing_role.role_name.value, @@ -4464,12 +4461,12 @@ def change_team_project_role(project, request, _form_class=ChangeTeamProjectRole .one() ) if ( - role.role_name == TeamProjectRoleType.Administer + role.role_name == TeamProjectRoleType.Owner and request.user in role.team.members and request.user not in role.team.organization.owners ): request.session.flash( - "Cannot remove your own team with Administer permissions", + "Cannot remove your own team as Owner", queue="error", ) else: @@ -4570,14 +4567,12 @@ def delete_team_project_role(project, request): .one() ) removing_self = ( - role.role_name == TeamProjectRoleType.Administer + role.role_name == TeamProjectRoleType.Owner and request.user in role.team.members and request.user not in role.team.organization.owners ) if removing_self: - request.session.flash( - "Cannot remove your own team with Administer permissions", queue="error" - ) + request.session.flash("Cannot remove your own team as Owner", queue="error") else: role_name = role.role_name team = role.team diff --git a/warehouse/organizations/models.py b/warehouse/organizations/models.py --- a/warehouse/organizations/models.py +++ b/warehouse/organizations/models.py @@ -508,8 +508,8 @@ class TeamRole(db.Model): class TeamProjectRoleType(str, enum.Enum): - Administer = "Administer" - Upload = "Upload" + Owner = "Owner" # Granted "Administer" permissions. + Maintainer = "Maintainer" # Granted "Upload" permissions. class TeamProjectRole(db.Model): diff --git a/warehouse/packaging/models.py b/warehouse/packaging/models.py --- a/warehouse/packaging/models.py +++ b/warehouse/packaging/models.py @@ -264,7 +264,8 @@ def __acl__(self): query = query.options(orm.lazyload("team")) for role in query.all(): permissions |= { - (user.id, role.role_name.value) for user in role.team.members + (user.id, "Administer" if role.role_name.value == "Owner" else "Upload") + for user in role.team.members } # Add all organization owners for this project.
diff --git a/tests/common/db/organizations.py b/tests/common/db/organizations.py --- a/tests/common/db/organizations.py +++ b/tests/common/db/organizations.py @@ -156,6 +156,6 @@ class TeamProjectRoleFactory(WarehouseFactory): class Meta: model = TeamProjectRole - role_name = TeamProjectRoleType.Administer + role_name = TeamProjectRoleType.Owner project = factory.SubFactory(ProjectFactory) team = factory.SubFactory(TeamFactory) diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -5034,7 +5034,7 @@ def test_manage_team_projects( project = ProjectFactory.create() TeamProjectRoleFactory.create( - project=project, team=team, role_name=TeamProjectRoleType.Administer + project=project, team=team, role_name=TeamProjectRoleType.Owner ) view = views.ManageTeamProjectsViews(team, db_request) @@ -5533,7 +5533,7 @@ def test_manage_projects(self, db_request): TeamProjectRoleFactory( team=team, project=team_project, - role_name=TeamProjectRoleType.Upload, + role_name=TeamProjectRoleType.Maintainer, ) assert views.manage_projects(db_request) == { @@ -7661,7 +7661,7 @@ def test_post_new_internal_team_role( { "is_team": "true", "team_name": organization_team.name, - "team_project_role_name": "Administer", + "team_project_role_name": "Owner", "username": "", "role_name": "", } @@ -7693,7 +7693,7 @@ def test_post_new_internal_team_role( team=organization_team, submitter=db_request.user, project_name=organization_project.name, - role="Administer", + role="Owner", ) ] assert send_added_as_team_collaborator_email.calls == [ @@ -7703,7 +7703,7 @@ def test_post_new_internal_team_role( team=organization_team, submitter=db_request.user, project_name=organization_project.name, - role="Administer", + role="Owner", ) ] assert isinstance(result, HTTPSeeOther) @@ -7720,7 +7720,7 @@ def test_post_duplicate_internal_team_role( { "is_team": "true", "team_name": organization_team.name, - "team_project_role_name": "Administer", + "team_project_role_name": "Owner", "username": "", "role_name": "", } @@ -7732,7 +7732,7 @@ def test_post_duplicate_internal_team_role( team_project_role = TeamProjectRoleFactory.create( team=organization_team, project=organization_project, - role_name=TeamProjectRoleType.Administer, + role_name=TeamProjectRoleType.Owner, ) result = views.manage_project_roles(organization_project, db_request) @@ -7743,10 +7743,7 @@ def test_post_duplicate_internal_team_role( assert team_project_role == db_request.db.query(TeamProjectRole).one() assert db_request.session.flash.calls == [ pretend.call( - ( - f"Team '{organization_team.name}' already has " - "Administer permissions for project" - ), + f"Team '{organization_team.name}' already has Owner role for project", queue="error", ) ] @@ -7772,7 +7769,7 @@ def test_post_new_internal_role( { "is_team": "false", "team_name": "", - "team_project_role_name": "Administer", + "team_project_role_name": "Owner", "username": organization_member.username, "role_name": "Owner", } @@ -8651,9 +8648,9 @@ def test_change_role( role = TeamProjectRoleFactory.create( team=organization_team, project=organization_project, - role_name=TeamProjectRoleType.Administer, + role_name=TeamProjectRoleType.Owner, ) - new_role_name = TeamProjectRoleType.Upload + new_role_name = TeamProjectRoleType.Maintainer db_request.method = "POST" db_request.POST = MultiDict( @@ -8718,7 +8715,7 @@ def test_change_role( ) assert entry.name == organization_project.name - assert entry.action == f"change Administer {organization_team.name} to Upload" + assert entry.action == f"change Owner {organization_team.name} to Maintainer" assert entry.submitted_by == db_request.user assert entry.submitted_from == db_request.remote_addr @@ -8747,7 +8744,7 @@ def test_change_missing_role(self, db_request, organization_project): db_request.method = "POST" db_request.POST = MultiDict( - {"role_id": missing_role_id, "team_project_role_name": "Administer"} + {"role_id": missing_role_id, "team_project_role_name": "Owner"} ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) @@ -8772,13 +8769,13 @@ def test_change_own_owner_role( role = TeamProjectRoleFactory.create( team=organization_team, project=organization_project, - role_name=TeamProjectRoleType.Administer, + role_name=TeamProjectRoleType.Owner, ) db_request.method = "POST" db_request.user = organization_member db_request.POST = MultiDict( - {"role_id": role.id, "team_project_role_name": "Upload"} + {"role_id": role.id, "team_project_role_name": "Maintainer"} ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) @@ -8788,9 +8785,7 @@ def test_change_own_owner_role( result = views.change_team_project_role(organization_project, db_request) assert db_request.session.flash.calls == [ - pretend.call( - "Cannot remove your own team with Administer permissions", queue="error" - ) + pretend.call("Cannot remove your own team as Owner", queue="error") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect" @@ -8841,7 +8836,7 @@ def test_delete_role( role = TeamProjectRoleFactory.create( team=organization_team, project=organization_project, - role_name=TeamProjectRoleType.Administer, + role_name=TeamProjectRoleType.Owner, ) db_request.method = "POST" @@ -8903,7 +8898,7 @@ def test_delete_role( ) assert entry.name == organization_project.name - assert entry.action == f"remove Administer {organization_team.name}" + assert entry.action == f"remove Owner {organization_team.name}" assert entry.submitted_by == db_request.user assert entry.submitted_from == db_request.remote_addr @@ -8935,7 +8930,7 @@ def test_delete_own_owner_role( role = TeamProjectRoleFactory.create( team=organization_team, project=organization_project, - role_name=TeamProjectRoleType.Administer, + role_name=TeamProjectRoleType.Owner, ) db_request.method = "POST" @@ -8949,9 +8944,7 @@ def test_delete_own_owner_role( result = views.delete_team_project_role(organization_project, db_request) assert db_request.session.flash.calls == [ - pretend.call( - "Cannot remove your own team with Administer permissions", queue="error" - ) + pretend.call("Cannot remove your own team as Owner", queue="error") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect" diff --git a/tests/unit/packaging/test_models.py b/tests/unit/packaging/test_models.py --- a/tests/unit/packaging/test_models.py +++ b/tests/unit/packaging/test_models.py @@ -138,7 +138,7 @@ def test_acl(self, db_session): team = DBTeamFactory.create() owner4 = DBTeamRoleFactory.create(team=team) DBTeamProjectRoleFactory.create( - team=team, project=project, role_name=TeamProjectRoleType.Administer + team=team, project=project, role_name=TeamProjectRoleType.Owner ) acls = []
Assigning role/permission to a team or collaborator Assigning role/permission to a team or collaborator should be similar to each other. To recreate the issue- - Click on Your organization - Select Project - Click on Collaborators 1. When selecting a team under Collaborators, there is a drop down list for teams. There is no drop down list for internal collaborators 2. Teams are assigned Upload/Adminster permission while internal collaborators are assigned Maintainer/Owner role. Could it be changed so that both are assigned either role or permission 3. The view of teams and internal/external collaborators for a project is confusing. ![user_testing](https://user-images.githubusercontent.com/37237726/183527305-08364a8b-85fa-4b33-9750-559a8b10d085.png)
2022-09-20T04:29:39Z
[]
[]
pypi/warehouse
12,241
pypi__warehouse-12241
[ "12055" ]
546a3cd52e4c8e3fa01b1c7c4b45834c682d379d
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -3153,6 +3153,17 @@ def remove_organization_project(project, request): queue="success", ) + return HTTPSeeOther( + request.route_path( + "manage.organization.projects", + organization_name=organization.normalized_name, + ) + ) + + request.session.flash( + ("Could not remove project from organization - no organization found"), + queue="error", + ) return HTTPSeeOther( request.route_path("manage.project.settings", project_name=project.name) )
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -5912,7 +5912,12 @@ def test_remove_organization_project_no_current_organization( result = views.remove_organization_project(project, db_request) - assert db_request.session.flash.calls == [] + assert db_request.session.flash.calls == [ + pretend.call( + ("Could not remove project from organization - no organization found"), + queue="error", + ) + ] assert db_request.route_path.calls == [ pretend.call("manage.project.settings", project_name="foo") ] @@ -6035,7 +6040,10 @@ def test_remove_organization_project(self, monkeypatch, db_request): pretend.call("Removed the project 'foo' from 'bar'", queue="success") ] assert db_request.route_path.calls == [ - pretend.call("manage.project.settings", project_name="foo") + pretend.call( + "manage.organization.projects", + organization_name=project.organization.normalized_name, + ) ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect"
Removing a project from an organization leads to 403 error If the user is not an Owner of the project, removing a project from an organization leads to an error To recreate the issue- - Click on Your Projects - Select a Project and click on Manage - Click on Settings - Click on Remove Project - Enter the Project name - Click on Remove Project The confirmation goes to /manage/project/project_name/settings/ which the user does not have access to as the user is not an Owner of the project anymore. ![test11](https://user-images.githubusercontent.com/37237726/183537333-67d475d1-5ada-428e-ba29-bfeb5ea5189b.png) The URL will have to be changed to the home page to see the confirmation message that the project has been removed from the organization. ![test12](https://user-images.githubusercontent.com/37237726/183537338-2a019437-e898-4dfe-8b6e-8e013521d3bc.png)
@s-mm this ticket mentions to redirect to the home page. I was thinking that a redirect to the Organization Project Management page seems appropriate? Maybe this is what you had in mind? Please have a look and let me know if this makes sense or if you were envisioning something different. ![image](https://user-images.githubusercontent.com/76828663/190928687-6cba0c98-00a2-41f1-9ad1-b79a9566eee3.png) @sterbo Your suggestion is fine.
2022-09-20T18:22:19Z
[]
[]
pypi/warehouse
12,250
pypi__warehouse-12250
[ "12045" ]
425d6fe63a34e1be3c51579c504c57e43a5b4e19
diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py --- a/warehouse/manage/forms.py +++ b/warehouse/manage/forms.py @@ -604,19 +604,18 @@ class CreateOrganizationForm(SaveOrganizationNameForm, SaveOrganizationForm): __params__ = SaveOrganizationNameForm.__params__ + SaveOrganizationForm.__params__ -class CreateTeamRoleForm(UsernameMixin, forms.Form): +class CreateTeamRoleForm(forms.Form): + + username = wtforms.SelectField( + "Select user", + choices=[("", "Select user")], + default="", # Set default to avoid error when there are no user choices. + validators=[wtforms.validators.InputRequired()], + ) + def __init__(self, *args, user_choices, **kwargs): super().__init__(*args, **kwargs) - self.user_choices = user_choices - - def validate_username(self, field): - if field.data not in self.user_choices: - raise wtforms.validators.ValidationError( - _( - "No organization owner, manager, or member found " - "with that username. Please try again." - ) - ) + self.username.choices += [(name, name) for name in sorted(user_choices)] class SaveTeamForm(forms.Form): diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -2491,6 +2491,7 @@ def __init__(self, team, request): + organization_managers(self.request, self.team.organization) + organization_members(self.request, self.team.organization) ) + if user not in self.team.members ) @property @@ -2516,24 +2517,10 @@ def create_team_role(self): if not form.validate(): return default_response - # Check for existing role. + # Add user to team. username = form.username.data role_name = TeamRoleType.Member user_id = self.user_service.find_userid(username) - existing_role = self.organization_service.get_team_role_by_user( - self.team.id, user_id - ) - if existing_role: - self.request.session.flash( - self.request._( - "User '${username}' is already a team member", - mapping={"username": username}, - ), - queue="error", - ) - return default_response - - # Add user to team. role = self.organization_service.add_team_role( team_id=self.team.id, user_id=user_id, diff --git a/warehouse/organizations/interfaces.py b/warehouse/organizations/interfaces.py --- a/warehouse/organizations/interfaces.py +++ b/warehouse/organizations/interfaces.py @@ -232,11 +232,6 @@ def get_team_role(team_role_id): Return the team role object that represents the given team role id, """ - def get_team_role_by_user(team_id, user_id): - """ - Gets an team role for a specified team and user - """ - def add_team_role(team_id, user_id, role_name): """ Add the team role object to a team for a specified team id and user id diff --git a/warehouse/organizations/services.py b/warehouse/organizations/services.py --- a/warehouse/organizations/services.py +++ b/warehouse/organizations/services.py @@ -569,24 +569,6 @@ def get_team_role(self, team_role_id): """ return self.db.query(TeamRole).get(team_role_id) - def get_team_role_by_user(self, team_id, user_id): - """ - Gets a team role for a specified team and user - """ - try: - team_role = ( - self.db.query(TeamRole) - .filter( - TeamRole.team_id == team_id, - TeamRole.user_id == user_id, - ) - .one() - ) - except NoResultFound: - return - - return team_role - def get_team_roles(self, team_id): """ Gets a list of organization roles for a specified org
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -5253,11 +5253,7 @@ def test_create_team_role_duplicate_member( form = result["form"] assert organization_service.get_team_roles(team.id) == [role] - assert db_request.session.flash.calls == [ - pretend.call( - f"User '{member.username}' is already a team member", queue="error" - ) - ] + assert db_request.session.flash.calls == [] assert result == { "team": team, "roles": [role], @@ -5304,12 +5300,7 @@ def test_create_team_role_not_a_member( "form": form, } - assert form.username.errors == [ - ( - "No organization owner, manager, or member found " - "with that username. Please try again." - ) - ] + assert form.username.errors == ["Not a valid choice."] def test_delete_team_role( self,
No user found in the organization When adding a user to a team and that user is not found in the organization, the owner/manager - should be able to click on a link that goes to a page listing org members - Owner should be able to invite the user to join the team and by extension the org automatically To recreate the issue- - Select an organization - Click on 'Teams' - Select a team - Under 'Add member', enter a user's name who is not a member of the org ![test6](https://user-images.githubusercontent.com/37237726/183525334-c3ea746b-03f8-4964-950c-caccebce9db3.png)
2022-09-22T21:50:49Z
[]
[]
pypi/warehouse
12,291
pypi__warehouse-12291
[ "12053" ]
266a84b54e868da4734a2470d05fafc547e8d909
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -133,6 +133,7 @@ from warehouse.organizations.models import ( Organization, OrganizationInvitationStatus, + OrganizationProject, OrganizationRole, OrganizationRoleType, OrganizationType, @@ -3340,12 +3341,49 @@ def transfer_organization_project(project, request): def get_user_role_in_project(project, user, request): - return ( - request.db.query(Role) - .filter(Role.user == user, Role.project == project) - .one() - .role_name - ) + try: + return ( + request.db.query(Role) + .filter(Role.user == user, Role.project == project) + .one() + .role_name + ) + except NoResultFound: + # No project role found so check for Organization roles + return get_user_role_in_organization_project(project, user, request) + + +def get_user_role_in_organization_project(project, user, request): + try: + # If this is an organzation project check to see if user is Org Owner + role_name = ( + request.db.query(OrganizationRole) + .join( + OrganizationProject, + OrganizationProject.organization_id == OrganizationRole.organization_id, + ) + .filter( + OrganizationRole.user == user, + OrganizationProject.project == project, + OrganizationRole.role_name == OrganizationRoleType.Owner, + ) + .one() + .role_name + ) + except NoResultFound: + # Last but not least check if this is a Team Project and user has a team role + role_name = ( + request.db.query(TeamProjectRole) + .join(TeamRole, TeamRole.team_id == TeamProjectRole.team_id) + .filter( + TeamRole.user == user, + TeamProjectRole.project == project, + ) + .one() + .role_name + ) + + return role_name.value @view_config(
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -6676,6 +6676,45 @@ def test_get_user_role_in_project_single_role_maintainer(self, db_request): res = views.get_user_role_in_project(project, db_request.user, db_request) assert res == "Maintainer" + def test_get_user_role_in_project_org_owner(self, db_request): + organization = OrganizationFactory.create(name="baz") + project = ProjectFactory.create(name="foo") + OrganizationProjectFactory.create(organization=organization, project=project) + db_request.user = UserFactory.create() + OrganizationRoleFactory.create( + organization=organization, user=db_request.user, role_name="Owner" + ) + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None), + ) + + res = views.get_user_role_in_project(project, db_request.user, db_request) + assert res == "Owner" + + def test_get_user_role_in_project_team_project_owner(self, db_request): + organization = OrganizationFactory.create(name="baz") + team = TeamFactory(organization=organization) + project = ProjectFactory.create(name="foo") + OrganizationProjectFactory.create(organization=organization, project=project) + db_request.user = UserFactory.create() + OrganizationRoleFactory.create( + organization=organization, + user=db_request.user, + role_name=OrganizationRoleType.Member, + ) + TeamRoleFactory.create(team=team, user=db_request.user) + TeamProjectRoleFactory.create( + team=team, + project=project, + role_name=TeamProjectRoleType.Owner, + ) + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None), + ) + + res = views.get_user_role_in_project(project, db_request.user, db_request) + assert res == "Owner" + def test_delete_project(self, monkeypatch, db_request): project = ProjectFactory.create(name="foo")
Project deletion raises an error Deleting a project in an organization leads to an error- - Click on 'Your Project' - Select a project and click on 'Manage' - Click on 'Settings' - Click on 'Delete project' - Enter the project name and click on 'Delete project' [screencapture-c7f2-84-64-37-99-eu-ngrok-io-manage-project-tox-parallel-delete-project-2022-08-08-20_21_44.pdf](https://github.com/pypi/warehouse/files/9285717/screencapture-c7f2-84-64-37-99-eu-ngrok-io-manage-project-tox-parallel-delete-project-2022-08-08-20_21_44.pdf)
2022-09-30T04:58:49Z
[]
[]
pypi/warehouse
12,307
pypi__warehouse-12307
[ "11625" ]
257825d65afb72e4967c115663b0ab22a37308ea
diff --git a/warehouse/accounts/forms.py b/warehouse/accounts/forms.py --- a/warehouse/accounts/forms.py +++ b/warehouse/accounts/forms.py @@ -455,26 +455,3 @@ def validate_username_or_email(self, field): class ResetPasswordForm(NewPasswordMixin, forms.Form): pass - - -class TitanPromoCodeForm(forms.Form): - country = wtforms.SelectField( - "Select destination country", - choices=[ - ("", "Select destination country"), - ("Austria", "Austria"), - ("Belgium", "Belgium"), - ("Canada", "Canada"), - ("France", "France"), - ("Germany", "Germany"), - ("Italy", "Italy"), - ("Japan", "Japan"), - ("Spain", "Spain"), - ("Switzerland", "Switzerland"), - ("United Kingdom", "United Kingdom"), - ("United States", "United States"), - ], - validators=[ - wtforms.validators.DataRequired(message="Select destination country") - ], - ) diff --git a/warehouse/accounts/models.py b/warehouse/accounts/models.py --- a/warehouse/accounts/models.py +++ b/warehouse/accounts/models.py @@ -281,18 +281,3 @@ class ProhibitedUserName(db.Model): ) prohibited_by = orm.relationship(User) comment = Column(Text, nullable=False, server_default="") - - -class TitanPromoCode(db.Model): - __tablename__ = "user_titan_codes" - - user_id = Column( - UUID(as_uuid=True), - ForeignKey("users.id", deferrable=True, initially="DEFERRED"), - nullable=True, - index=True, - unique=True, - ) - code = Column(String, nullable=False, unique=True) - created = Column(DateTime, nullable=False, server_default=sql.func.now()) - distributed = Column(DateTime, nullable=True) diff --git a/warehouse/migrations/versions/90f6ee9298db_drop_titan_promo_code_table.py b/warehouse/migrations/versions/90f6ee9298db_drop_titan_promo_code_table.py new file mode 100644 --- /dev/null +++ b/warehouse/migrations/versions/90f6ee9298db_drop_titan_promo_code_table.py @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Drop titan_promo_code table + +Revision ID: 90f6ee9298db +Revises: d0f67adbcb80 +Create Date: 2022-10-03 18:48:39.327937 +""" + + +from alembic import op + +revision = "90f6ee9298db" +down_revision = "d0f67adbcb80" + + +def upgrade(): + op.drop_index("ix_user_titan_codes_user_id", table_name="user_titan_codes") + op.drop_table("user_titan_codes") + + +def downgrade(): + raise RuntimeError("Can't roll back") diff --git a/warehouse/views.py b/warehouse/views.py --- a/warehouse/views.py +++ b/warehouse/views.py @@ -12,7 +12,6 @@ import collections -import datetime import re import elasticsearch @@ -40,13 +39,11 @@ ) from sqlalchemy import func from sqlalchemy.orm import aliased, joinedload -from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.sql import exists, expression from trove_classifiers import deprecated_classifiers, sorted_classifiers from warehouse.accounts import REDIRECT_FIELD_NAME -from warehouse.accounts.forms import TitanPromoCodeForm -from warehouse.accounts.models import TitanPromoCode, User +from warehouse.accounts.models import User from warehouse.cache.http import add_vary, cache_control from warehouse.cache.origin import origin_cache from warehouse.classifiers.models import Classifier @@ -466,69 +463,9 @@ class SecurityKeyGiveaway: def __init__(self, request): self.request = request - @property - def form(self): - return TitanPromoCodeForm(**self.request.POST) - - @property - def codes_available(self): - return ( - self.request.db.query(TitanPromoCode) - .filter(TitanPromoCode.user_id.is_(None)) - .count() - ) > 0 - - @property - def promo_code(self): - if self.request.user: - try: - return ( - self.request.db.query(TitanPromoCode).filter( - TitanPromoCode.user_id == self.request.user.id - ) - ).one() - except NoResultFound: - pass - return None - @property def default_response(self): - codes_available = self.codes_available - promo_code = self.promo_code - has_webauthn = self.request.user and self.request.user.has_webauthn - is_too_new = ( - self.request.user - and self.request.user.date_joined is not None - and self.request.user.date_joined - > datetime.datetime(2022, 9, 23, 20, 20, 0, 0) - ) - - eligible = ( - codes_available and not has_webauthn and not promo_code and not is_too_new - ) - - if not codes_available: - reason_ineligible = "At this time there are no keys available" - elif is_too_new: - reason_ineligible = "Your account was created too recently" - elif has_webauthn: - reason_ineligible = ( - "You already have two-factor authentication enabled with a hardware " - "security key" - ) - elif promo_code: - reason_ineligible = "Promo code has already been generated" - else: - reason_ineligible = None - - return { - "eligible": eligible, - "reason_ineligible": reason_ineligible, - "form": self.form, - "codes_available": self.codes_available, - "promo_code": self.promo_code, - "REDIRECT_FIELD_NAME": REDIRECT_FIELD_NAME, - } + return {} @view_config(request_method="GET") def security_key_giveaway(self): @@ -539,33 +476,6 @@ def security_key_giveaway(self): return self.default_response - @view_config(request_method="POST") - def security_key_giveaway_submit(self): - if not self.request.registry.settings.get( - "warehouse.two_factor_mandate.available" - ): - raise HTTPNotFound - - default_response = self.default_response - - if not self.form.validate(): - self.request.session.flash("Form is not valid") - elif not default_response["eligible"]: - self.request.session.flash(default_response["reason_ineligible"]) - else: - # The form is valid, assign a promo code to the user - promo_code = ( - self.request.db.query(TitanPromoCode).filter( - TitanPromoCode.user_id.is_(None) - ) - ).first() - promo_code.user_id = self.request.user.id - promo_code.distributed = datetime.datetime.now() - # Flush so the promo code is available for the response - self.request.db.flush() - default_response["promo_code"] = promo_code - return default_response - @view_config( route_name="includes.current-user-indicator",
diff --git a/tests/unit/test_views.py b/tests/unit/test_views.py --- a/tests/unit/test_views.py +++ b/tests/unit/test_views.py @@ -11,7 +11,6 @@ # limitations under the License. import datetime -import uuid import elasticsearch import pretend @@ -27,11 +26,8 @@ from webob.multidict import MultiDict from warehouse import views -from warehouse.accounts.forms import TitanPromoCodeForm -from warehouse.accounts.models import TitanPromoCode from warehouse.errors import WarehouseDenied from warehouse.views import ( - REDIRECT_FIELD_NAME, SecurityKeyGiveaway, current_user_indicator, flash_messages, @@ -561,122 +557,8 @@ def test_invalid(self): class TestSecurityKeyGiveaway: - def test_form(self): - country = "United States" - request = pretend.stub(POST={"country": country}) - form = SecurityKeyGiveaway(request).form - - assert isinstance(form, TitanPromoCodeForm) - assert form.country.data == country - - def test_codes_available_no_codes(self, db_request): - assert SecurityKeyGiveaway(db_request).codes_available is False - - def test_codes_available_no_unused_codes(self, db_request): - db_request.db.add(TitanPromoCode(code="foo", user_id=str(uuid.uuid4()))) - - assert SecurityKeyGiveaway(db_request).codes_available is False - - def test_codes_available(self, db_request): - db_request.db.add(TitanPromoCode(code="foo")) - - assert SecurityKeyGiveaway(db_request).codes_available is True - - def test_promo_code_no_user(self, db_request): - db_request.user = None - assert SecurityKeyGiveaway(db_request).promo_code is None - - def test_promo_code_no_codes(self, db_request): - db_request.user = UserFactory.create() - assert SecurityKeyGiveaway(db_request).promo_code is None - - def test_promo_code(self, db_request): - db_request.user = UserFactory.create() - code = TitanPromoCode(code="foo", user_id=db_request.user.id) - db_request.db.add(code) - assert SecurityKeyGiveaway(db_request).promo_code == code - - @pytest.mark.parametrize( - "codes_available, promo_code, user, eligible, reason_ineligible", # noqa - [ - (True, None, None, True, None), - ( # A very old user without date_joined - True, - None, - pretend.stub( - has_webauthn=False, - date_joined=None, - ), - True, - None, - ), - ( - False, - None, - pretend.stub( - has_webauthn=False, - date_joined=datetime.datetime(2021, 9, 23, 20, 20, 0, 0), - ), - False, - "At this time there are no keys available", - ), - ( - True, - None, - pretend.stub( - has_webauthn=False, - date_joined=datetime.datetime(2022, 9, 24, 20, 20, 0, 0), # Too new - ), - False, - "Your account was created too recently", - ), - ( - True, - None, - pretend.stub( - has_webauthn=True, - date_joined=datetime.datetime(2021, 9, 23, 20, 20, 0, 0), - ), - False, - "You already have two-factor authentication enabled with a hardware " - "security key", - ), - ( - True, - pretend.stub(), - pretend.stub( - has_webauthn=False, - date_joined=datetime.datetime(2021, 9, 23, 20, 20, 0, 0), - ), - False, - "Promo code has already been generated", - ), - ], - ) - def test_default_response( - self, - codes_available, - promo_code, - user, - eligible, - reason_ineligible, - monkeypatch, - ): - request = pretend.stub(user=user) - SecurityKeyGiveaway.codes_available = property(lambda a: codes_available) - SecurityKeyGiveaway.promo_code = property(lambda a: promo_code) - form = pretend.stub() - SecurityKeyGiveaway.form = property(lambda a: form) - ins = SecurityKeyGiveaway(request) - - assert ins.default_response == { - "eligible": eligible, - "reason_ineligible": reason_ineligible, - "form": ins.form, - "codes_available": codes_available, - "promo_code": promo_code, - "REDIRECT_FIELD_NAME": REDIRECT_FIELD_NAME, - } + def test_default_response(self): + assert SecurityKeyGiveaway(pretend.stub()).default_response == {} def test_security_key_giveaway_not_found(self): request = pretend.stub(registry=pretend.stub(settings={})) @@ -694,65 +576,3 @@ def test_security_key_giveaway(self): SecurityKeyGiveaway.default_response = default_response assert SecurityKeyGiveaway(request).security_key_giveaway() == default_response - - def test_security_key_giveaway_submit_not_found(self): - request = pretend.stub(registry=pretend.stub(settings={})) - - with pytest.raises(HTTPNotFound): - SecurityKeyGiveaway(request).security_key_giveaway_submit() - - def test_security_key_giveaway_submit_invalid_form(self): - request = pretend.stub( - registry=pretend.stub( - settings={"warehouse.two_factor_mandate.available": True} - ), - session=pretend.stub(flash=pretend.call_recorder(lambda a: None)), - ) - default_response = pretend.stub() - SecurityKeyGiveaway.default_response = default_response - form = pretend.stub(validate=lambda: False) - SecurityKeyGiveaway.form = property(lambda a: form) - - assert ( - SecurityKeyGiveaway(request).security_key_giveaway_submit() - == default_response - ) - assert request.session.flash.calls == [pretend.call("Form is not valid")] - - def test_security_key_giveaway_submit_ineligible(self): - request = pretend.stub( - registry=pretend.stub( - settings={"warehouse.two_factor_mandate.available": True} - ), - session=pretend.stub(flash=pretend.call_recorder(lambda a: None)), - ) - reason_ineligible = pretend.stub() - default_response = {"eligible": False, "reason_ineligible": reason_ineligible} - SecurityKeyGiveaway.default_response = default_response - form = pretend.stub(validate=lambda: True) - SecurityKeyGiveaway.form = property(lambda a: form) - - assert ( - SecurityKeyGiveaway(request).security_key_giveaway_submit() - == default_response - ) - assert request.session.flash.calls == [pretend.call(reason_ineligible)] - - def test_security_key_giveaway_submit(self, db_request): - db_request.registry = pretend.stub( - settings={"warehouse.two_factor_mandate.available": True} - ) - db_request.session = pretend.stub(flash=pretend.call_recorder(lambda a: None)) - db_request.user = UserFactory.create() - promo_code = TitanPromoCode(code="foo") - db_request.db.add(promo_code) - - default_response = {"eligible": True} - SecurityKeyGiveaway.default_response = default_response - form = pretend.stub(validate=lambda: True) - SecurityKeyGiveaway.form = property(lambda a: form) - - assert ( - SecurityKeyGiveaway(db_request).security_key_giveaway_submit() - == default_response - )
Rollout plan for critical projects promo The following steps should be followed to roll out the critical projects promo: ### Launch - [x] Merge #10856 - At this point, no projects will be considered critical, and the promo will be unavailable - https://pypi.org/security-key-giveaway/ will 404 - Overall, no functional changes will be enabled at this point - [x] Merge https://github.com/pypa/warehouse/pull/11626 which emits relevant metrics - Number of projects marked critical - Number of projects manually requiring 2FA - Number of critical project maintainers - Number of critical project maintainers with 2FA enabled - Number of users with 2FA enabled - [x] Load discount codes into `code` column of `user_titan_codes` table - [x] Set `TWOFACTORREQUIREMENT_ENABLED` to `True`. - This makes the opt-in 2FA requirement feature enabled - [x] Set `TWOFACTORMANDATE_AVAILABLE` to `True`. - This enables the job which flips the `pypi_mandates_2fa` bit to True and emails maintainers. - However, the cohort size will be zero, so this has the effect of 'soft-launching' to only our own dependencies - This also makes https://pypi.org/security-key-giveaway/ viewable - At this point, we should make sure everything is working OK and codes are redeemable before moving on - [x] Set `TWOFACTORMANDATE_COHORTSIZE` to a non-zero value - We are targeting the top 1% of projects, so this should be around `3800`. - [x] Tweets/announcements about the promo can go out at this time - These should include details about the giveaway, which projects are considered critical and why, as well as new features which allow projects to opt-in to their own 2FA mandate ### Post-launch (After Oct 1, 2022) - [x] After some period of time, one of two things happen: - 1) We run out of codes, and https://pypi.org/security-key-giveaway/ indicates this as such - 2) We hit the expiry of of the codes - Either way, we merge a PR (#11625) which removes copy about the promo and updates https://pypi.org/security-key-giveaway/ to indicate that the promo has ended. Next steps are in https://github.com/pypi/warehouse/issues/12308.
There's now a public dashboard for the relevant metrics here: https://p.datadoghq.com/sb/7dc8b3250-389f47d638b967dbb8f7edfd4c46acb1 (h/t @ewdurbin for beautifying this). Heads up, you sent out an email with `http://localhost` in the URLs instead of `https://pypi.org`. This happens in Flask when you don't configure it to know where it is when a request isn't active, such as generating emails; probably similar in Pyramid. Thanks for the report, we're working on it 🙂 > Titan keys are only approved for sale in certain geographic regions, and thus can only be shipped to the following countries: Austria, Belgium, Canada, France, Germany, Italy, Japan, Spain, Switzerland, United Kingdom, and the United States. Since when Germany, Italy, France, etc. are part of different regulatory regime than the rest of EU? @tomato42 Unfortunately this is out of our control, these are the only countries in which Google is able to sell the product, and I don't have an explanation as to why. I received the ``[PyPI] A project you maintain has been designated as critical``, but it would be helpful to know the criteria for that designation. Number of downloads? Number of GitHub Stars? Number of other projects that have my project as a dependency? A combination of the above? It's also a bit inconsistent. Jinja2 didn't get marked as critical, even though it's the most downloaded of my projects. Flask didn't get marked, but the less used Quart did. Never mind, I think it's currently limited to some libraries that Warehouse uses, although not sure where Quart came from. > Never mind, I think it's currently limited to some libraries that Warehouse uses, although not sure where Quart came from. https://pypi.org/project/semgrep/ got marked as critical and it doesn't seem to be used by Warehouse (yet!) > I received the [PyPI] A project you maintain has been designated as critical, but it would be helpful to know the criteria for that designation. Number of downloads? Number of GitHub Stars? Number of other projects that have my project as a dependency? A combination of the above? Answers to this and many more questions are included at https://pypi.org/security-key-giveaway/ > It's also a bit inconsistent. Jinja2 didn't get marked as critical, even though it's the most downloaded of my projects. Flask didn't get marked, but the less used Quart did. This does surprise me, I wonder if we have an issue with name normalization happening. > Never mind, I think it's currently limited to some libraries that Warehouse uses, although not sure where Quart came from. We expanded it to the top 1% by downloads. The query is here: https://github.com/pypi/warehouse/blob/714babdf83fe3414974a14e1accdae1527cf7473/warehouse/packaging/tasks.py#L42-L57 Yes, BigQuery stores the names normalized IIRC, that query is using ``Project.name`` not ``Project.normalized_name``. Yeah, we've only flipped the bit for 3381 projects, this should be >3800. Will address this. @davidism https://github.com/pypi/warehouse/pull/11796 should fix this, and the bit should get flipped for these projects in ~8 hours. I am a bit confused by how / what projects are getting marked as critical as well. One of my projects (https://pypi.org/project/boa-str/) got marked as "critical" is an old, very small and simple string manipulation library last released in 2017. It was basically a small internal dependency made external for convenience. Nowhere near the level of a project like Flask or Jinja... I would be surprised if it had any external users at all, let alone met this criteria from the page linked above: > **What determines if project is a critical project?** > > PyPI determines project eligibility based on download counts derived from PyPI's [public dataset of download statistics](https://warehouse.pypa.io/api-reference/bigquery-datasets.html). Any project in the top 1% of downloads over the prior 6 months is designated as critical. I tried to access the public BigQuery dataset to run a simple query (below) but got denied running the first query due to free tier quota error. ```SQL SELECT COUNT(1) FROM `bigquery-public-data.pypi.file_downloads` WHERE project = "boa-str" AND timestamp >= "2022-01-01" GROUP BY `project` LIMIT 10; ``` The error: ``` Quota exceeded: Your project exceeded quota for free query bytes scanned. For more information, see https://cloud.google.com/bigquery/docs/troubleshoot-quotas ``` There is a small possibility that it's still in use as a dependency and e.g., being pulled in some Docker containers running at scale given that it was written for a startup which has grown massively. Are there other ways to access this data e.g., a JSON export of the 3800 projects to check whether this is a mistake? @tedmiston https://pepy.tech/project/boa-str or https://pypistats.org/packages/boa-str are both good ways to view this data. Looks like it gets quite a bit of downloads. @alex Thank you! It turns out xkcd was right after all. *updates resume* ![](https://imgs.xkcd.com/comics/dependency.png) > Are there other ways to access this data e.g., a JSON export of the 3800 projects to check whether this is a mistake? I expect the top ~3,800 projects (over 6 months) will be somewhat similar to those on monthly list at https://hugovk.github.io/top-pypi-packages/ @davidism Jinja2 is number 36 (75 million monthly downloads) so should be included, and likewise Pillow at 60 (43m). Pillow also isn't currently marked as critical, but both have a capital initial so I expect the normalisation fix will sort that in a few hours 👍 @tedmiston boa-str is at number 1,340 with 878k downloads! There still seem to be some problems with the query. for example sqlalchemy is not marked as critical even if it's both a top 1% project and it's used by warehouse The query hasn't re-run yet, it runs once a day. Pillow is now marked as critical, and there's the bump from 3.38k to 3.82k critical projects on the [dashboard](https://p.datadoghq.com/sb/7dc8b3250-389f47d638b967dbb8f7edfd4c46acb1?from_ts=1657270574591&to_ts=1657356974591&live=true): <img width="711" alt="image" src="https://user-images.githubusercontent.com/1324225/178100251-0927bf54-0b5a-46d5-b6d2-4a74d3d287dc.png"> Thanks! One note from a UX perspective — I enabled 2FA via app preemptively ahead of getting the hardware key. But as soon as one does this, the page at https://pypi.org/security-key-giveaway/, decides you're not eligible for the hardware key. It was trivial to remove it, request the hardware key, and re-enable it, but it would be nice if 3800 of us didn't have to do that 🙃. Edit: Never mind about getting the order through... it looks like Google is sold out of both keys in the U.S. now. [The USB-C key says in stock on the product page, but out of stock once added to cart. The USB-A key says out of stock on product page.] I am maintainer of 16 projects marked as critical but I am still not eligible to get a hardware key because I did the right thing and adopted (software based) 2FA previously. That is hilarious. ![](https://sbarnea.com/ss/Screen-Shot-2022-07-11-12-34-23.99.png) I am not sure if @tedmiston trick still works but I can see how this program could easily have opposite effect than the desired one. Our goal is to get as many people as possible to use 2FA. Our constraint is that we have a limited number of hardware keys to give away. While I agree that hardware keys should be preferred over TOTP, if you already have 2FA enabled via TOTP, but take a pair of free keys, that potentially means that one less person can enable 2FA. That said, the discount codes expire Oct 1. If it looks like we'll have a surplus of discount codes by then, I'd support adjusting this policy to allow TOTP users to acquire hardware keys as well. If someone never used hardware keys, I would recommend them the software approach as its is very easy to stick the TOTP into your prefered password manager or just us one app like google auth. Using a HW token is considerably more inconvenient. Forcing 2FA is no brainer and I would support even more aggressive rolling methods (1% is quite low). I think that those that fight-it are very few and are in the category that do not give a (dime) about security for users as in the end nobody is excluded from being hacked. A big thank you to all those that made the 1% group! @hugovk, when you got the email for Pillow, did it have HTTP or HTTPS links? I believe it should have been HTTPS and https://github.com/pypi/warehouse/issues/11802 is just a side-effect from us running the task via CLI instead of via cron. It had HTTP. The first email was for projects with lowercase names, the second was for Pillow: <img width="1181" alt="image" src="https://user-images.githubusercontent.com/1324225/178285564-56e26a28-fa6e-44f1-8785-a3764eb3283b.png"> Hi, we have also been designated as critical project. We have been automatically deploying/publishing releases to PyPI directly from our CI (running in the cloud), fully automated. It is not clear, or I cannot find how is it possible to achieve this, both the physical key and the authenticator apps seems to work only for manual publishing. Am I missing something? Many thanks! API keys can be used to accomplish this: https://pypi.org/help/#apitoken > API keys can be used to accomplish this: https://pypi.org/help/#apitoken This is what we were already using, and it starting failing today, we assumed it was the 2FA being enabled. We have also tried manually enabling 2FA, and it is still failing with ``"Backend is unhealthy"``. It might be some temporary issue, we will try again tomorrow and report. Thanks! Backend is unhealthy means the CDN is having trouble talking to the application servers. https://status.python.org/ shows some spikes in error metrics, not sure if that's related. In any event, it's unrelated to 2FA requirements :-) Hm, perhaps not the right place for this, but would it be useful to display a "critical package" badge on the pypi page, or make a badge for it to add to the repo if desired? At the moment it mostly feels like another hurdle to jump through when we perform the release dance that happens somewhere deep in dev/maintainer land. I see how it may benefit security in general, but as far as I understand the main reason for this promo is to show that pypi is taking security serious so that users (and downstream packages) can trust their dependencies a bit more. Would be nice to have something to show for that. @FirefoxMetzger I am in big favor of starting to add badges but it is not so simple. For example, I still find "critical" as misleading because in fact what was used to determine this was the download traffic in last 6 months. I would say that critical is likely to be more related to how many other projects are using, something that pypi cannot yet determine. For example, I would support making public the "Sole Owner" badge as as far as I am concerned that is a security and maintenance risk too as it means "only one person can publish". That persom might go-rogue at some point, or just become permanently unavailable. For me that might be a very good reason for marking a package as risky/problematic in a public way. In fact lack of use of bot accounts with tokens for uploading packages is another red flag but that is currently close to impossible to determine by pypi. Still, let's open a discussion thread as this issue is not the right place to discuss these. AFAIK, nobody should ever publish packages using personal credentials. The only exception is when you bootstrap a new project, so you reserve the namespace, but even this can be done with tokens. > Still, let's open a discussion thread as this issue is not the right place to discuss these. @ssbarnea Sure, feel free to ping me and I'm happy to chime in. > I would support making public the "Sole Owner" badge as as far as I am concerned that is a security and maintenance risk [...] marking a package as risky/problematic in a public way [...] lack of use of bot accounts with tokens for uploading packages is another red flag [...] nobody should ever publish packages using personal credentials Those are all very valid points from a security perspective and I agree that those are concerns to keep in mind. At the same time, I doubt that many maintainers are "sole owners" because they want to be, but rather because they haven't yet found others to join them in maintaining the package. In my (perhaps limited) experience, this change usually happens through increased adoption of the package because you'll eventually run into motivated individuals that volunteer to help out. I'm not entirely convinced that more pressure on sole maintainers (in the form of a "this repo is risky to use because there is only one person maintaining it" badge) will help improve the situation. Instead, I was thinking that a "your project is a critical piece of infra, keep up the good work" badge doesn't cost much, shows appreciation for people spending their free time on this, might encourage sole maintainers to adhere to best practice (you want to live up to the expectation others have of you), and will at worst do nothing. Its also complementary to any crack down actions on packages that could be maintained better (eg., enforced 2FA), so I figured I could at least suggest it :)
2022-10-03T18:58:07Z
[]
[]
pypi/warehouse
12,343
pypi__warehouse-12343
[ "12335" ]
bfb17e4158d2223e67f694d1618c6dd3b2191976
diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py --- a/warehouse/search/queries.py +++ b/warehouse/search/queries.py @@ -52,36 +52,45 @@ def get_es_query(es, terms, order, classifiers): """ Returns an Elasticsearch query from data from the request. """ + classifier_q = Q( + "bool", + # Theh results must have all selected classifiers + must=[ + Q( + "bool", + should=[ + # Term search for the exact classifier + Q("term", classifiers=classifier), + # Prefix search for potential children classifiers + Q("prefix", classifiers=classifier + " :: "), + ], + ) + for classifier in classifiers + ], + ) if not terms: - query = es.query() + query = es.query(classifier_q) if classifiers else es.query() else: - bool_query = gather_es_queries(terms) + quoted_string, unquoted_string = filter_query(terms) + bool_query = Q( + "bool", + must=[form_query("phrase", i) for i in quoted_string] + + [form_query("best_fields", i) for i in unquoted_string] + + ([classifier_q] if classifiers else []), + ) + + # Allow to optionally match on prefix + # if ``q`` is longer than one character. + if len(terms) > 1: + bool_query = bool_query | Q("prefix", normalized_name=terms) + query = es.query(bool_query) query = query.suggest("name_suggestion", terms, term={"field": "name"}) - # Require match to all specified classifiers - for classifier in classifiers: - query = query.query("prefix", classifiers=classifier) - query = query_for_order(query, order) return query -def gather_es_queries(q): - quoted_string, unquoted_string = filter_query(q) - must = [form_query("phrase", i) for i in quoted_string] + [ - form_query("best_fields", i) for i in unquoted_string - ] - - bool_query = Q("bool", must=must) - - # Allow to optionally match on prefix - # if ``q`` is longer than one character. - if len(q) > 1: - bool_query = bool_query | Q("prefix", normalized_name=q) - return bool_query - - def filter_query(s): """ Filters given query with the below regex
diff --git a/tests/unit/test_search.py b/tests/unit/test_search.py --- a/tests/unit/test_search.py +++ b/tests/unit/test_search.py @@ -39,7 +39,7 @@ def test_no_terms(self): query = queries.get_es_query(es, "", "", []) - assert query == es.query() + assert query.to_dict() == {"query": {"match_all": {}}} @pytest.mark.parametrize( "terms,expected_prefix,expected_type", @@ -54,22 +54,31 @@ def test_quoted_query(self, terms, expected_prefix, expected_type): query = queries.get_es_query(es, terms, "", []) - query_dict = query.to_dict() - assert len(query_dict["query"]["bool"]["should"]) == 2 - assert query_dict["query"]["bool"]["should"][1] == { - "prefix": {"normalized_name": expected_prefix} - } - must_params = query_dict["query"]["bool"]["should"][0]["bool"]["must"] - assert len(must_params) == 1 - assert must_params[0]["multi_match"] == { - "fields": EXPECTED_SEARCH_FIELDS, - "type": expected_type, - "query": "foo bar" if terms != '"a"' else "a", - } - assert query_dict["suggest"] == { - "name_suggestion": {"text": terms, "term": {"field": "name"}} + assert query.to_dict() == { + "query": { + "bool": { + "should": [ + { + "bool": { + "must": [ + { + "multi_match": { + "fields": EXPECTED_SEARCH_FIELDS, + "query": "foo bar" + if terms != '"a"' + else "a", + "type": expected_type, + } + }, + ] + } + }, + {"prefix": {"normalized_name": expected_prefix}}, + ] + } + }, + "suggest": {"name_suggestion": {"text": terms, "term": {"field": "name"}}}, } - assert "sort" not in query_dict # default "relevance" mode does no sorting def test_single_not_quoted_character(self): es = Search() @@ -77,18 +86,22 @@ def test_single_not_quoted_character(self): query = queries.get_es_query(es, terms, "", []) - query_dict = query.to_dict() - must_params = query_dict["query"]["bool"]["must"] - assert len(must_params) == 1 - assert must_params[0]["multi_match"] == { - "fields": EXPECTED_SEARCH_FIELDS, - "type": "best_fields", - "query": "a", - } - assert query_dict["suggest"] == { - "name_suggestion": {"text": terms, "term": {"field": "name"}} + assert query.to_dict() == { + "query": { + "bool": { + "must": [ + { + "multi_match": { + "fields": EXPECTED_SEARCH_FIELDS, + "query": "a", + "type": "best_fields", + } + }, + ] + } + }, + "suggest": {"name_suggestion": {"text": "a", "term": {"field": "name"}}}, } - assert "sort" not in query_dict # default "relevance" mode does no sorting def test_mixed_quoted_query(self): es = Search() @@ -96,27 +109,38 @@ def test_mixed_quoted_query(self): query = queries.get_es_query(es, terms, "", []) - query_dict = query.to_dict() - assert len(query_dict["query"]["bool"]["should"]) == 2 - assert query_dict["query"]["bool"]["should"][1] == { - "prefix": {"normalized_name": '"foo bar" baz'} - } - must_params = query_dict["query"]["bool"]["should"][0]["bool"]["must"] - assert len(must_params) == 2 - assert must_params[0]["multi_match"] == { - "fields": EXPECTED_SEARCH_FIELDS, - "type": "phrase", - "query": "foo bar", - } - assert must_params[1]["multi_match"] == { - "fields": EXPECTED_SEARCH_FIELDS, - "type": "best_fields", - "query": "baz", - } - assert query_dict["suggest"] == { - "name_suggestion": {"text": terms, "term": {"field": "name"}} + assert query.to_dict() == { + "query": { + "bool": { + "should": [ + { + "bool": { + "must": [ + { + "multi_match": { + "fields": EXPECTED_SEARCH_FIELDS, + "query": "foo bar", + "type": "phrase", + } + }, + { + "multi_match": { + "fields": EXPECTED_SEARCH_FIELDS, + "query": "baz", + "type": "best_fields", + } + }, + ] + } + }, + {"prefix": {"normalized_name": '"foo bar" baz'}}, + ] + } + }, + "suggest": { + "name_suggestion": {"text": '"foo bar" baz', "term": {"field": "name"}} + }, } - assert "sort" not in query_dict # default "relevance" mode does no sorting @pytest.mark.parametrize( "order,field", [("created", "created"), ("-zscore", "zscore")] @@ -127,79 +151,139 @@ def test_sort_order(self, order, field): query = queries.get_es_query(es, terms, order, []) - query_dict = query.to_dict() - assert len(query_dict["query"]["bool"]["should"]) == 2 - assert query_dict["query"]["bool"]["should"][1] == { - "prefix": {"normalized_name": "foo bar"} - } - must_params = query_dict["query"]["bool"]["should"][0]["bool"]["must"] - assert len(must_params) == 1 - assert must_params[0]["multi_match"] == { - "fields": EXPECTED_SEARCH_FIELDS, - "type": "best_fields", - "query": "foo bar", - } - assert query_dict["suggest"] == { - "name_suggestion": {"text": terms, "term": {"field": "name"}} - } - assert query_dict["sort"] == [ - { - field: { - "order": "desc" if order.startswith("-") else "asc", - "unmapped_type": "long", + assert query.to_dict() == { + "query": { + "bool": { + "should": [ + { + "bool": { + "must": [ + { + "multi_match": { + "fields": EXPECTED_SEARCH_FIELDS, + "query": terms, + "type": "best_fields", + } + }, + ] + } + }, + {"prefix": {"normalized_name": terms}}, + ] } - } - ] + }, + "suggest": {"name_suggestion": {"text": terms, "term": {"field": "name"}}}, + "sort": [ + { + field: { + "order": "desc" if order.startswith("-") else "asc", + "unmapped_type": "long", + } + } + ], + } def test_with_classifiers_with_terms(self): es = Search() terms = "foo bar" - classifiers = [("c", "foo :: bar"), ("c", "fiz :: buz")] + classifiers = ["foo :: bar", "fiz :: buz"] query = queries.get_es_query(es, terms, "", classifiers) - query_dict = query.to_dict() - assert len(query_dict["query"]["bool"]["should"]) == 2 - assert query_dict["query"]["bool"]["should"][1] == { - "prefix": {"normalized_name": "foo bar"} - } - must_params = query_dict["query"]["bool"]["should"][0]["bool"]["must"] - assert len(must_params) == 1 - assert must_params[0]["multi_match"] == { - "fields": EXPECTED_SEARCH_FIELDS, - "type": "best_fields", - "query": "foo bar", - } - assert query_dict["suggest"] == { - "name_suggestion": {"text": terms, "term": {"field": "name"}} + assert query.to_dict() == { + "query": { + "bool": { + "should": [ + { + "bool": { + "must": [ + { + "multi_match": { + "fields": EXPECTED_SEARCH_FIELDS, + "query": terms, + "type": "best_fields", + } + }, + { + "bool": { + "must": [ + { + "bool": { + "should": [ + { + "term": { + "classifiers": classifier # noqa + } + }, + { + "prefix": { + "classifiers": classifier # noqa + + " :: " + } + }, + ] + } + } + for classifier in classifiers + ] + } + }, + ] + } + }, + {"prefix": {"normalized_name": terms}}, + ] + } + }, + "suggest": {"name_suggestion": {"text": terms, "term": {"field": "name"}}}, } - assert "sort" not in query_dict - assert query_dict["query"]["bool"]["must"] == [ - {"prefix": {"classifiers": classifier}} for classifier in classifiers - ] - assert query_dict["query"]["bool"]["minimum_should_match"] == 1 def test_with_classifiers_with_no_terms(self): es = Search() terms = "" - classifiers = [("c", "foo :: bar"), ("c", "fiz :: buz")] + classifiers = ["foo :: bar", "fiz :: buz"] query = queries.get_es_query(es, terms, "", classifiers) - query_dict = query.to_dict() - assert query_dict["query"]["bool"]["must"] == [ - {"prefix": {"classifiers": classifier}} for classifier in classifiers - ] + assert query.to_dict() == { + "query": { + "bool": { + "must": [ + { + "bool": { + "should": [ + {"term": {"classifiers": classifier}}, + {"prefix": {"classifiers": classifier + " :: "}}, + ] + } + } + for classifier in classifiers + ] + } + } + } def test_with_classifier_with_no_terms_and_order(self): es = Search() terms = "" - classifiers = [("c", "foo :: bar")] + classifiers = ["foo :: bar"] query = queries.get_es_query(es, terms, "-created", classifiers) - query_dict = query.to_dict() - assert query_dict["query"] == {"prefix": {"classifiers": ("c", "foo :: bar")}} - assert query_dict["sort"] == [ - {"created": {"order": "desc", "unmapped_type": "long"}} - ] + assert query.to_dict() == { + "query": { + "bool": { + "must": [ + { + "bool": { + "should": [ + {"term": {"classifiers": "foo :: bar"}}, + {"prefix": {"classifiers": "foo :: bar :: "}}, + ] + } + } + ] + } + }, + "sort": [{"created": {"order": "desc", "unmapped_type": "long"}}], + }
Python 3.1 classifier filtering is broken When [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results. [1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1
2022-10-11T05:13:31Z
[]
[]
pypi/warehouse
12,350
pypi__warehouse-12350
[ "11642" ]
89d523b67d1333e0c2adbcc73b698e709fa5046d
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1160,6 +1160,14 @@ def user_organizations(request): ) .subquery() ) + organizations_with_sole_owner = ( + request.db.query(OrganizationRole.organization_id) + .join(organizations_owned) + .filter(OrganizationRole.role_name == "Owner") + .group_by(OrganizationRole.organization_id) + .having(func.count(OrganizationRole.organization_id) == 1) + .subquery() + ) return { "organizations_owned": ( request.db.query(Organization) @@ -1179,6 +1187,15 @@ def user_organizations(request): .order_by(Organization.name) .all() ), + "organizations_with_sole_owner": ( + request.db.query(Organization) + .join( + organizations_with_sole_owner, + Organization.id == organizations_with_sole_owner.c.organization_id, + ) + .order_by(Organization.name) + .all() + ), } @@ -2061,6 +2078,12 @@ def manage_organization_roles( "roles": roles, "invitations": invitations, "form": form, + "organizations_with_sole_owner": list( + organization.name + for organization in user_organizations(request)[ + "organizations_with_sole_owner" + ] + ), } @@ -2235,6 +2258,12 @@ def delete_organization_role(organization, request): organization_service = request.find_service(IOrganizationService, context=None) role_id = request.POST["role_id"] role = organization_service.get_organization_role(role_id) + organizations_sole_owned = set( + organization.id + for organization in user_organizations(request)["organizations_with_sole_owner"] + ) + is_sole_owner = organization.id in organizations_sole_owned + if not role or role.organization_id != organization.id: request.session.flash("Could not find member", queue="error") elif ( @@ -2243,8 +2272,12 @@ def delete_organization_role(organization, request): request.session.flash( "Cannot remove other people from the organization", queue="error" ) - elif role.role_name == OrganizationRoleType.Owner and role.user == request.user: - request.session.flash("Cannot remove yourself as Owner", queue="error") + elif ( + role.role_name == OrganizationRoleType.Owner + and role.user == request.user + and is_sole_owner + ): + request.session.flash("Cannot remove yourself as Sole Owner", queue="error") else: organization_service.delete_organization_role(role.id) organization.record_event( @@ -4423,9 +4456,13 @@ def delete_project_role(project, request): .filter(Role.id == request.POST["role_id"]) .one() ) + projects_sole_owned = set( + project.name for project in user_projects(request)["projects_sole_owned"] + ) removing_self = role.role_name == "Owner" and role.user == request.user - if removing_self: - request.session.flash("Cannot remove yourself as Owner", queue="error") + is_sole_owner = project.name in projects_sole_owned + if removing_self and is_sole_owner: + request.session.flash("Cannot remove yourself as Sole Owner", queue="error") else: request.db.delete(role) request.db.add( @@ -4463,6 +4500,8 @@ def delete_project_role(project, request): ) request.session.flash("Removed role", queue="success") + if removing_self: + return HTTPSeeOther(request.route_path("manage.projects")) except NoResultFound: request.session.flash("Could not find role", queue="error")
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -4031,7 +4031,9 @@ def test_add_organization_project_new_project_name_conflict( class TestManageOrganizationRoles: - def test_get_manage_organization_roles(self, db_request, enable_organizations): + def test_get_manage_organization_roles( + self, db_request, pyramid_user, enable_organizations + ): organization = OrganizationFactory.create(name="foobar") form_obj = pretend.stub() @@ -4046,6 +4048,7 @@ def form_class(*a, **kw): "roles": set(), "invitations": set(), "form": form_obj, + "organizations_with_sole_owner": [], } @freeze_time(datetime.datetime.utcnow()) @@ -4810,10 +4813,20 @@ def test_delete_role(self, db_request, enable_organizations, monkeypatch): assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect" - def test_delete_missing_role(self, db_request, enable_organizations): + def test_delete_missing_role(self, db_request, enable_organizations, monkeypatch): organization = OrganizationFactory.create(name="foobar") missing_role_id = str(uuid.uuid4()) + user_organizations = pretend.call_recorder( + lambda *a, **kw: { + "organizations_managed": [], + "organizations_owned": [organization], + "organizations_billing": [], + "organizations_with_sole_owner": [], + } + ) + monkeypatch.setattr(views, "user_organizations", user_organizations) + db_request.method = "POST" db_request.user = pretend.stub() db_request.POST = MultiDict({"role_id": missing_role_id}) @@ -4880,7 +4893,7 @@ def test_delete_own_owner_role(self, db_request, enable_organizations): result = views.delete_organization_role(organization, db_request) assert db_request.session.flash.calls == [ - pretend.call("Cannot remove yourself as Owner", queue="error") + pretend.call("Cannot remove yourself as Sole Owner", queue="error") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect" @@ -8641,11 +8654,66 @@ def test_delete_own_owner_role(self, db_request): result = views.delete_project_role(project, db_request) assert db_request.session.flash.calls == [ - pretend.call("Cannot remove yourself as Owner", queue="error") + pretend.call("Cannot remove yourself as Sole Owner", queue="error") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect" + def test_delete_not_sole_owner_role(self, db_request, monkeypatch): + project = ProjectFactory.create(name="foobar") + user = UserFactory.create() + RoleFactory.create(user=user, project=project, role_name="Owner") + user_2 = UserFactory.create(username="testuser") + role_2 = RoleFactory.create(user=user_2, project=project, role_name="Owner") + + db_request.method = "POST" + db_request.user = user_2 + db_request.POST = MultiDict({"role_id": role_2.id}) + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None) + ) + db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect") + + send_collaborator_removed_email = pretend.call_recorder(lambda *a, **kw: None) + monkeypatch.setattr( + views, "send_collaborator_removed_email", send_collaborator_removed_email + ) + send_removed_as_collaborator_email = pretend.call_recorder( + lambda *a, **kw: None + ) + monkeypatch.setattr( + views, + "send_removed_as_collaborator_email", + send_removed_as_collaborator_email, + ) + + result = views.delete_project_role(project, db_request) + + assert db_request.route_path.calls == [pretend.call("manage.projects")] + assert db_request.db.query(Role).filter(Role.user_id == user_2.id).all() == [] + assert send_collaborator_removed_email.calls == [ + pretend.call( + db_request, {user}, user=user_2, submitter=user_2, project_name="foobar" + ) + ] + assert send_removed_as_collaborator_email.calls == [ + pretend.call(db_request, user_2, submitter=user_2, project_name="foobar") + ] + assert db_request.session.flash.calls == [ + pretend.call("Removed role", queue="success") + ] + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/the-redirect" + + entry = ( + db_request.db.query(JournalEntry).options(joinedload("submitted_by")).one() + ) + + assert entry.name == project.name + assert entry.action == "remove Owner testuser" + assert entry.submitted_by == db_request.user + assert entry.submitted_from == db_request.remote_addr + def test_delete_non_owner_role(self, db_request): project = ProjectFactory.create(name="foobar") user = UserFactory.create(username="testuser")
User who created the organization cannot leave the organization Once a user has created an organization, they cannot leave the organization. To replicate this issue- 1. Click on 'Your organizations' 2. Click on 'Manage' 3. Scroll to the bottom of the page where the members of the organization are listed The first owner cannot leave the organization. ![org1](https://user-images.githubusercontent.com/37237726/175095225-bf5c4cba-47ac-4346-8399-647a55066daa.png)
Priority 1/3. Need to enforce there is another owner.
2022-10-12T16:08:43Z
[]
[]
pypi/warehouse
12,351
pypi__warehouse-12351
[ "7119" ]
3bd01a17adc90efc6335c1adea1e0b4066b0e61b
diff --git a/warehouse/accounts/forms.py b/warehouse/accounts/forms.py --- a/warehouse/accounts/forms.py +++ b/warehouse/accounts/forms.py @@ -34,6 +34,7 @@ send_password_compromised_email_hibp, send_recovery_code_used_email, ) +from warehouse.events.tags import EventTag from warehouse.i18n import localize as _ from warehouse.utils.otp import TOTP_LENGTH @@ -343,7 +344,7 @@ def validate_totp_value(self, field): if not self.user_service.check_totp_value(self.user_id, totp_value): self.user_service.record_event( self.user_id, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "invalid_totp"}, ) raise wtforms.validators.ValidationError(_("Invalid TOTP code.")) @@ -378,7 +379,7 @@ def validate_credential(self, field): except webauthn.AuthenticationRejectedError as e: self.user_service.record_event( self.user_id, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "invalid_webauthn"}, ) raise wtforms.validators.ValidationError(str(e)) @@ -418,14 +419,14 @@ def validate_recovery_code_value(self, field): except (InvalidRecoveryCode, NoRecoveryCodes): self.user_service.record_event( self.user_id, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "invalid_recovery_code"}, ) raise wtforms.validators.ValidationError(_("Invalid recovery code.")) except BurnedRecoveryCode: self.user_service.record_event( self.user_id, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "burned_recovery_code"}, ) raise wtforms.validators.ValidationError( diff --git a/warehouse/accounts/security_policy.py b/warehouse/accounts/security_policy.py --- a/warehouse/accounts/security_policy.py +++ b/warehouse/accounts/security_policy.py @@ -31,6 +31,7 @@ BasicAuthFailedPassword, WarehouseDenied, ) +from warehouse.events.tags import EventTag from warehouse.packaging.models import TwoFactorRequireable from warehouse.utils.security_policy import AuthenticationMethod @@ -93,7 +94,7 @@ def _basic_auth_check(username, password, request): return True else: user.record_event( - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, ip_address=request.remote_addr, additional={"reason": "invalid_password", "auth_method": "basic"}, ) diff --git a/warehouse/accounts/views.py b/warehouse/accounts/views.py --- a/warehouse/accounts/views.py +++ b/warehouse/accounts/views.py @@ -66,6 +66,7 @@ send_password_reset_email, send_recovery_code_reminder_email, ) +from warehouse.events.tags import EventTag from warehouse.organizations.interfaces import IOrganizationService from warehouse.organizations.models import OrganizationRole, OrganizationRoleType from warehouse.packaging.models import ( @@ -440,7 +441,7 @@ def recovery_code(request, _form_class=RecoveryCodeAuthenticationForm): user_service.record_event( userid, - tag="account:recovery_codes:used", + tag=EventTag.Account.RecoveryCodesUsed, ) request.session.flash( @@ -553,7 +554,7 @@ def register(request, _form_class=RegistrationForm): email = user_service.add_email(user.id, form.email.data, primary=True) user_service.record_event( user.id, - tag="account:create", + tag=EventTag.Account.AccountCreate, additional={"email": form.email.data}, ) @@ -600,7 +601,7 @@ def request_password_reset(request, _form_class=RequestPasswordResetForm): send_password_reset_email(request, (user, email)) user_service.record_event( user.id, - tag="account:password:reset:request", + tag=EventTag.Account.PasswordResetRequest, ) user_service.ratelimiters["password.reset"].hit(user.id) @@ -610,7 +611,7 @@ def request_password_reset(request, _form_class=RequestPasswordResetForm): else: user_service.record_event( user.id, - tag="account:password:reset:attempt", + tag=EventTag.Account.PasswordResetAttempt, ) request.session.flash( request._( @@ -713,7 +714,7 @@ def _error(message): ) # Update password. user_service.update_user(user.id, password=form.new_password.data) - user_service.record_event(user.id, tag="account:password:reset") + user_service.record_event(user.id, tag=EventTag.Account.PasswordReset) password_reset_limiter.clear(user.id) # Send password change email @@ -775,7 +776,7 @@ def _error(message): email.unverify_reason = None email.transient_bounces = 0 email.user.record_event( - tag="account:email:verified", + tag=EventTag.Account.EmailVerified, ip_address=request.remote_addr, additional={"email": email.email, "primary": email.primary}, ) @@ -883,7 +884,7 @@ def _error(message): submitter_user = user_service.get_user(data.get("submitter_id")) message = request.params.get("message", "") organization.record_event( - tag="organization:organization_role:declined", + tag=EventTag.Organization.OrganizationRoleDeclineInvite, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(submitter_user.id), @@ -892,7 +893,7 @@ def _error(message): }, ) user.record_event( - tag="account:organization_role:declined", + tag=EventTag.Account.OrganizationRoleDeclineInvite, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(submitter_user.id), @@ -936,7 +937,7 @@ def _error(message): organization_service.delete_organization_invite(organization_invite.id) submitter_user = user_service.get_user(data.get("submitter_id")) organization.record_event( - tag="organization:organization_role:accepted", + tag=EventTag.Organization.OrganizationRoleAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(submitter_user.id), @@ -945,7 +946,7 @@ def _error(message): }, ) user.record_event( - tag="account:organization_role:accepted", + tag=EventTag.Account.OrganizationRoleAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(submitter_user.id), @@ -1055,6 +1056,25 @@ def _error(message): } elif request.method == "POST" and "decline" in request.POST: request.db.delete(role_invite) + submitter_user = user_service.get_user(data.get("submitter_id")) + project.record_event( + tag=EventTag.Project.RoleDeclineInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by": submitter_user.username, + "role_name": desired_role, + "target_user": user.username, + }, + ) + user.record_event( + tag=EventTag.Account.RoleDeclineInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by": submitter_user.username, + "project_name": project.name, + "role_name": desired_role, + }, + ) request.session.flash( request._( "Invitation for '${project_name}' is declined.", @@ -1075,7 +1095,7 @@ def _error(message): ) ) project.record_event( - tag="project:role:accepted", + tag=EventTag.Project.RoleAdd, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -1084,7 +1104,7 @@ def _error(message): }, ) user.record_event( - tag="account:role:accepted", + tag=EventTag.Account.RoleAdd, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -1179,7 +1199,7 @@ def _login_user(request, userid, two_factor_method=None, two_factor_label=None): user_service.update_user(userid, last_login=datetime.datetime.utcnow()) user_service.record_event( userid, - tag="account:login:success", + tag=EventTag.Account.LoginSuccess, additional={ "two_factor_method": two_factor_method, "two_factor_label": two_factor_label, diff --git a/warehouse/admin/views/organizations.py b/warehouse/admin/views/organizations.py --- a/warehouse/admin/views/organizations.py +++ b/warehouse/admin/views/organizations.py @@ -25,6 +25,7 @@ send_new_organization_approved_email, send_new_organization_declined_email, ) +from warehouse.events.tags import EventTag from warehouse.organizations.interfaces import IOrganizationService from warehouse.organizations.models import Organization from warehouse.utils.paginate import paginate_url_factory @@ -156,7 +157,9 @@ def organization_detail(request): raise HTTPNotFound create_event = ( - organization.events.filter(Organization.Event.tag == "organization:create") + organization.events.filter( + Organization.Event.tag == EventTag.Organization.OrganizationCreate + ) .order_by(Organization.Event.time.desc()) .first() ) @@ -164,14 +167,18 @@ def organization_detail(request): if organization.is_approved is True: approve_event = ( - organization.events.filter(Organization.Event.tag == "organization:approve") + organization.events.filter( + Organization.Event.tag == EventTag.Organization.OrganizationApprove + ) .order_by(Organization.Event.time.desc()) .first() ) admin = user_service.get_user(approve_event.additional["approved_by_user_id"]) elif organization.is_approved is False: decline_event = ( - organization.events.filter(Organization.Event.tag == "organization:decline") + organization.events.filter( + Organization.Event.tag == EventTag.Organization.OrganizationDecline + ) .order_by(Organization.Event.time.desc()) .first() ) @@ -216,7 +223,9 @@ def organization_approve(request): ) create_event = ( - organization.events.filter(Organization.Event.tag == "organization:create") + organization.events.filter( + Organization.Event.tag == EventTag.Organization.OrganizationCreate + ) .order_by(Organization.Event.time.desc()) .first() ) @@ -227,7 +236,7 @@ def organization_approve(request): organization_service.approve_organization(organization.id) organization_service.record_event( organization.id, - tag="organization:approve", + tag=EventTag.Organization.OrganizationApprove, additional={"approved_by_user_id": str(request.user.id)}, ) send_admin_new_organization_approved_email( @@ -282,7 +291,9 @@ def organization_decline(request): ) create_event = ( - organization.events.filter(Organization.Event.tag == "organization:create") + organization.events.filter( + Organization.Event.tag == EventTag.Organization.OrganizationCreate + ) .order_by(Organization.Event.time.desc()) .first() ) @@ -293,7 +304,7 @@ def organization_decline(request): organization_service.decline_organization(organization.id) organization_service.record_event( organization.id, - tag="organization:decline", + tag=EventTag.Organization.OrganizationDecline, additional={"declined_by_user_id": str(request.user.id)}, ) send_admin_new_organization_declined_email( diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -439,6 +439,7 @@ def configure(settings=None): # And some enums to reuse in the templates jglobals.setdefault("AdminFlagValue", "warehouse.admin.flags:AdminFlagValue") + jglobals.setdefault("EventTag", "warehouse.events.tags:EventTag") jglobals.setdefault( "OrganizationInvitationStatus", "warehouse.organizations.models:OrganizationInvitationStatus", diff --git a/warehouse/events/__init__.py b/warehouse/events/__init__.py new file mode 100644 --- /dev/null +++ b/warehouse/events/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/warehouse/events/tags.py b/warehouse/events/tags.py new file mode 100644 --- /dev/null +++ b/warehouse/events/tags.py @@ -0,0 +1,165 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum + + +class EventTagEnum(str, enum.Enum): + """Base class for Enum representing Event tags. + + Tags can be broken into three colon-separated parts: + 1. source type + 2. subject type + 3. action + + For example, for event tag "project:role:add": + 1. "project" is the source type + 2. "role" is the subject type + 3. "add" is the action + + In some cases, the subject type can contain a colon: + + For example, for event tag "project:release:file:remove": + 1. "project" is the source type + 2. "release:file" is the subject type + 3. "remove" is the action + + If omitted, subject type is implied to be the same as source type. + + For example, for event tag "project:create": + 1. "project" is the source type + 2. "project" is also the subject type + 3. "create" is the action + + """ + + source_type: str + subject_type: str + action: str + + # Name = "source_type:subject_type:action" + def __new__(cls, value: str): + values = value.split(":") + obj = str.__new__(cls, value) + obj._value_ = value + obj.source_type = values[0] + obj.subject_type = ":".join(values[1:-1]) or value[0] + obj.action = values[-1] + return obj + + +class EventTag: + class Account(EventTagEnum): + # Name = "source_type:subject_type:action" + APITokenAdded = "account:api_token:added" + APITokenRemoved = "account:api_token:removed" + APITokenRemovedLeak = "account:api_token:removed_leak" + AccountCreate = "account:create" + EmailAdd = "account:email:add" + EmailPrimaryChange = "account:email:primary:change" + EmailRemove = "account:email:remove" + EmailReverify = "account:email:reverify" + EmailVerified = "account:email:verified" + LoginFailure = "account:login:failure" + LoginSuccess = "account:login:success" + OrganizationRoleAdd = "account:organization_role:add" + OrganizationRoleChange = "account:organization_role:change" + OrganizationRoleDeclineInvite = "account:organization_role:decline_invite" + OrganizationRoleInvite = "account:organization_role:invite" + OrganizationRoleRemove = "account:organization_role:remove" + OrganizationRoleRevokeInvite = "account:organization_role:revoke_invite" + PasswordChange = "account:password:change" + PasswordReset = "account:password:reset" + PasswordResetAttempt = "account:password:reset:attempt" + PasswordResetRequest = "account:password:reset:request" + RecoveryCodesGenerated = "account:recovery_codes:generated" + RecoveryCodesRegenerated = "account:recovery_codes:regenerated" + RecoveryCodesUsed = "account:recovery_codes:used" + RoleAdd = "account:role:add" + RoleChange = "account:role:change" + RoleDeclineInvite = "account:role:decline_invite" + RoleInvite = "account:role:invite" + RoleRemove = "account:role:remove" + RoleRevokeInvite = "account:role:revoke_invite" + TeamRoleAdd = "account:team_role:add" + TeamRoleRemove = "account:team_role:remove" + TwoFactorMethodAdded = "account:two_factor:method_added" + TwoFactorMethodRemoved = "account:two_factor:method_removed" + # The following tags are no longer used when recording events. + # EmailSent = "account:email:sent" + # ReauthenticateFailure = "account:reauthenticate:failure" + # RoleAccepted = "account:role:accepted" + + class Project(EventTagEnum): + # Name = "source_type:subject_type:action" + APITokenAdded = "project:api_token:added" + APITokenRemoved = "project:api_token:removed" + OIDCProviderAdded = "project:oidc:provider-added" + OIDCProviderRemoved = "project:oidc:provider-removed" + OrganizationProjectAdd = "project:organization_project:add" + OrganizationProjectRemove = "project:organization_project:remove" + OwnersRequire2FADisabled = "project:owners_require_2fa:disabled" + OwnersRequire2FAEnabled = "project:owners_require_2fa:enabled" + ProjectCreate = "project:create" + ReleaseAdd = "project:release:add" + ReleaseFileRemove = "project:release:file:remove" + ReleaseRemove = "project:release:remove" + ReleaseUnyank = "project:release:unyank" + ReleaseYank = "project:release:yank" + RoleAdd = "project:role:add" + RoleChange = "project:role:change" + RoleDeclineInvite = "project:role:decline_invite" + RoleInvite = "project:role:invite" + RoleRemove = "project:role:remove" + RoleRevokeInvite = "project:role:revoke_invite" + TeamProjectRoleAdd = "project:team_project_role:add" + TeamProjectRoleChange = "project:team_project_role:change" + TeamProjectRoleRemove = "project:team_project_role:remove" + # The following tags are no longer used when recording events. + # RoleAccepted = "project:role:accepted" + # RoleDelete = "project:role:delete" + + class Organization(EventTagEnum): + # Name = "source_type:subject_type:action" + CatalogEntryAdd = "organization:catalog_entry:add" + OrganizationApprove = "organization:approve" + OrganizationCreate = "organization:create" + OrganizationDecline = "organization:decline" + OrganizationDelete = "organization:delete" + OrganizationRename = "organization:rename" + OrganizationProjectAdd = "organization:organization_project:add" + OrganizationProjectRemove = "organization:organization_project:remove" + OrganizationRoleAdd = "organization:organization_role:add" + OrganizationRoleChange = "organization:organization_role:change" + OrganizationRoleDeclineInvite = "organization:organization_role:decline_invite" + OrganizationRoleInvite = "organization:organization_role:invite" + OrganizationRoleRemove = "organization:organization_role:remove" + OrganizationRoleRevokeInvite = "organization:organization_role:revoke_invite" + TeamCreate = "organization:team:create" + TeamDelete = "organization:team:delete" + TeamRename = "organization:team:rename" + TeamProjectRoleAdd = "organization:team_project_role:add" + TeamProjectRoleChange = "organization:team_project_role:change" + TeamProjectRoleRemove = "organization:team_project_role:remove" + TeamRoleAdd = "organization:team_role:add" + TeamRoleRemove = "organization:team_role:remove" + + class Team(EventTagEnum): + # Name = "source_type:subject_type:action" + TeamCreate = "team:create" + TeamDelete = "team:delete" + TeamRename = "team:rename" + TeamProjectRoleAdd = "team:team_project_role:add" + TeamProjectRoleChange = "team:team_project_role:change" + TeamProjectRoleRemove = "team:team_project_role:remove" + TeamRoleAdd = "team:team_role:add" + TeamRoleRemove = "team:team_role:remove" diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -47,6 +47,7 @@ from warehouse.admin.flags import AdminFlagValue from warehouse.classifiers.models import Classifier from warehouse.email import send_basic_auth_with_two_factor_email +from warehouse.events.tags import EventTag from warehouse.metrics import IMetricsService from warehouse.packaging.interfaces import IFileStorage from warehouse.packaging.models import ( @@ -912,7 +913,7 @@ def file_upload(request): ) ) project.record_event( - tag="project:role:add", + tag=EventTag.Project.RoleAdd, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -1088,7 +1089,7 @@ def file_upload(request): ) project.record_event( - tag="project:release:add", + tag=EventTag.Project.ReleaseAdd, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, diff --git a/warehouse/integrations/github/utils.py b/warehouse/integrations/github/utils.py --- a/warehouse/integrations/github/utils.py +++ b/warehouse/integrations/github/utils.py @@ -21,6 +21,7 @@ from warehouse import integrations from warehouse.accounts.interfaces import IUserService from warehouse.email import send_token_compromised_email_leak +from warehouse.events.tags import EventTag from warehouse.macaroons import InvalidMacaroonError from warehouse.macaroons.interfaces import IMacaroonService from warehouse.metrics import IMetricsService @@ -250,7 +251,7 @@ def _analyze_disclosure(request, disclosure_record, origin): user_service.record_event( database_macaroon.user.id, - tag="account:api_token:removed_leak", + tag=EventTag.Account.APITokenRemovedLeak, additional={ "macaroon_id": str(database_macaroon.id), "public_url": disclosure.public_url, diff --git a/warehouse/malware/checks/package_turnover/check.py b/warehouse/malware/checks/package_turnover/check.py --- a/warehouse/malware/checks/package_turnover/check.py +++ b/warehouse/malware/checks/package_turnover/check.py @@ -16,6 +16,7 @@ from sqlalchemy import select from warehouse.accounts.models import User +from warehouse.events.tags import EventTag from warehouse.malware.checks.base import MalwareCheckBase from warehouse.malware.models import ( MalwareVerdict, @@ -48,7 +49,7 @@ def user_posture_verdicts(self, project): self.db.query(User.Event) .filter(User.Event.source_id == user.id) .filter(User.Event.time >= self._scan_interval) - .filter(User.Event.tag == "account:two_factor:method_removed") + .filter(User.Event.tag == EventTag.Account.TwoFactorMethodRemoved) .exists() ).scalar() @@ -69,7 +70,10 @@ def user_turnover_verdicts(self, project): self.db.query(Project.Event.additional) .filter(Project.Event.source_id == project.id) .filter(Project.Event.time >= self._scan_interval) - .filter(Project.Event.tag == "project:role:add") + .filter( + (Project.Event.tag == EventTag.Project.RoleAdd) + | (Project.Event.tag == "project:role:accepted") + ) .all() ) diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -95,6 +95,7 @@ send_unyanked_project_release_email, send_yanked_project_release_email, ) +from warehouse.events.tags import EventTag from warehouse.forklift.legacy import MAX_FILESIZE, MAX_PROJECT_SIZE from warehouse.macaroons import caveats from warehouse.macaroons.interfaces import IMacaroonService @@ -357,7 +358,7 @@ def add_email(self): email = self.user_service.add_email(self.request.user.id, form.email.data) self.user_service.record_event( self.request.user.id, - tag="account:email:add", + tag=EventTag.Account.EmailAdd, additional={"email": email.email}, ) @@ -400,7 +401,7 @@ def delete_email(self): self.request.user.emails.remove(email) self.user_service.record_event( self.request.user.id, - tag="account:email:remove", + tag=EventTag.Account.EmailRemove, additional={"email": email.email}, ) self.request.session.flash( @@ -436,7 +437,7 @@ def change_primary_email(self): new_primary_email.primary = True self.user_service.record_event( self.request.user.id, - tag="account:email:primary:change", + tag=EventTag.Account.EmailPrimaryChange, additional={ "old_primary": previous_primary_email.email if previous_primary_email @@ -481,7 +482,7 @@ def reverify_email(self): send_email_verification_email(self.request, (self.request.user, email)) verify_email_ratelimit.hit(self.request.user.id) email.user.record_event( - tag="account:email:reverify", + tag=EventTag.Account.EmailReverify, ip_address=self.request.remote_addr, additional={"email": email.email}, ) @@ -520,7 +521,7 @@ def change_password(self): ) self.user_service.record_event( self.request.user.id, - tag="account:password:change", + tag=EventTag.Account.PasswordChange, ) send_password_change_email(self.request, self.request.user) self.request.db.flush() # Ensure changes are persisted to DB @@ -698,7 +699,7 @@ def validate_totp_provision(self): self.request.session.clear_totp_secret() self.user_service.record_event( self.request.user.id, - tag="account:two_factor:method_added", + tag=EventTag.Account.TwoFactorMethodAdded, additional={"method": "totp"}, ) self.request.session.flash( @@ -736,7 +737,7 @@ def delete_totp(self): self.user_service.update_user(self.request.user.id, totp_secret=None) self.user_service.record_event( self.request.user.id, - tag="account:two_factor:method_removed", + tag=EventTag.Account.TwoFactorMethodRemoved, additional={"method": "totp"}, ) self.request.session.flash( @@ -823,7 +824,7 @@ def validate_webauthn_provision(self): ) self.user_service.record_event( self.request.user.id, - tag="account:two_factor:method_added", + tag=EventTag.Account.TwoFactorMethodAdded, additional={"method": "webauthn", "label": form.label.data}, ) self.request.session.flash( @@ -863,7 +864,7 @@ def delete_webauthn(self): self.request.user.webauthn.remove(form.webauthn) self.user_service.record_event( self.request.user.id, - tag="account:two_factor:method_removed", + tag=EventTag.Account.TwoFactorMethodRemoved, additional={"method": "webauthn", "label": form.label.data}, ) self.request.session.flash("Security device removed", queue="success") @@ -909,7 +910,7 @@ def recovery_codes_generate(self): send_recovery_codes_generated_email(self.request, self.request.user) self.user_service.record_event( self.request.user.id, - tag="account:recovery_codes:generated", + tag=EventTag.Account.RecoveryCodesGenerated, ) return {"recovery_codes": recovery_codes} @@ -925,7 +926,7 @@ def recovery_codes_regenerate(self): send_recovery_codes_generated_email(self.request, self.request.user) self.user_service.record_event( self.request.user.id, - tag="account:recovery_codes:regenerated", + tag=EventTag.Account.RecoveryCodesRegenerated, ) return {"recovery_codes": recovery_codes} @@ -1049,7 +1050,7 @@ def create_macaroon(self): ) self.user_service.record_event( self.request.user.id, - tag="account:api_token:added", + tag=EventTag.Account.APITokenAdded, additional={ "description": form.description.data, "caveats": recorded_caveats, @@ -1067,7 +1068,7 @@ def create_macaroon(self): # have access to projects that this project's owner # isn't aware of. project.record_event( - tag="project:api_token:added", + tag=EventTag.Project.APITokenAdded, ip_address=self.request.remote_addr, additional={ "description": form.description.data, @@ -1100,7 +1101,7 @@ def delete_macaroon(self): self.macaroon_service.delete_macaroon(form.macaroon_id.data) self.user_service.record_event( self.request.user.id, - tag="account:api_token:removed", + tag=EventTag.Account.APITokenRemoved, additional={"macaroon_id": form.macaroon_id.data}, ) if "projects" in macaroon.permissions_caveat: @@ -1112,7 +1113,7 @@ def delete_macaroon(self): ] for project in projects: project.record_event( - tag="project:api_token:removed", + tag=EventTag.Project.APITokenRemoved, ip_address=self.request.remote_addr, additional={ "description": macaroon.description, @@ -1330,13 +1331,13 @@ def create_organization(self): organization = self.organization_service.add_organization(**data) self.organization_service.record_event( organization.id, - tag="organization:create", + tag=EventTag.Organization.OrganizationCreate, additional={"created_by_user_id": str(self.request.user.id)}, ) self.organization_service.add_catalog_entry(organization.id) self.organization_service.record_event( organization.id, - tag="organization:catalog_entry:add", + tag=EventTag.Organization.CatalogEntryAdd, additional={"submitted_by_user_id": str(self.request.user.id)}, ) self.organization_service.add_organization_role( @@ -1346,16 +1347,7 @@ def create_organization(self): ) self.organization_service.record_event( organization.id, - tag="organization:organization_role:invite", - additional={ - "submitted_by_user_id": str(self.request.user.id), - "role_name": "Owner", - "target_user_id": str(self.request.user.id), - }, - ) - self.organization_service.record_event( - organization.id, - tag="organization:organization_role:accepted", + tag=EventTag.Organization.OrganizationRoleAdd, additional={ "submitted_by_user_id": str(self.request.user.id), "role_name": "Owner", @@ -1364,7 +1356,7 @@ def create_organization(self): ) self.user_service.record_event( self.request.user.id, - tag="account:organization_role:accepted", + tag=EventTag.Account.OrganizationRoleAdd, additional={ "submitted_by_user_id": str(self.request.user.id), "organization_name": organization.name, @@ -1488,7 +1480,7 @@ def save_organization_name(self): form.name.data, ) self.organization.record_event( - tag="organization:rename", + tag=EventTag.Organization.OrganizationRename, ip_address=self.request.remote_addr, additional={ "previous_organization_name": previous_organization_name, @@ -1536,7 +1528,7 @@ def delete_organization(self): # Record event before deleting organization. self.organization.record_event( - tag="organization:delete", + tag=EventTag.Organization.OrganizationDelete, ip_address=self.request.remote_addr, additional={ "deleted_by_user_id": str(self.request.user.id), @@ -1736,7 +1728,7 @@ def create_team(self): # Record events. self.organization.record_event( - tag="organization:team:create", + tag=EventTag.Organization.TeamCreate, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -1744,7 +1736,7 @@ def create_team(self): }, ) team.record_event( - tag="team:create", + tag=EventTag.Team.TeamCreate, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -1870,7 +1862,7 @@ def add_organization_project(self): ) ) project.record_event( - tag="project:role:delete", + tag=EventTag.Project.RoleRemove, ip_address=self.request.remote_addr, additional={ "submitted_by": self.request.user.username, @@ -1878,6 +1870,15 @@ def add_organization_project(self): "target_user": role.user.username, }, ) + role.user.record_event( + tag=EventTag.Account.RoleRemove, + ip_address=self.request.remote_addr, + additional={ + "submitted_by": self.request.user.username, + "project_name": project.name, + "role_name": role.role_name, + }, + ) else: # Validate new project name. try: @@ -1896,7 +1897,7 @@ def add_organization_project(self): # Record events. self.organization.record_event( - tag="organization:organization_project:add", + tag=EventTag.Organization.OrganizationProjectAdd, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -1904,7 +1905,7 @@ def add_organization_project(self): }, ) project.record_event( - tag="project:organization_project:add", + tag=EventTag.Project.OrganizationProjectAdd, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2031,7 +2032,7 @@ def manage_organization_roles( invite_token=invite_token, ) organization.record_event( - tag="organization:organization_role:invite", + tag=EventTag.Organization.OrganizationRoleInvite, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -2039,6 +2040,15 @@ def manage_organization_roles( "target_user_id": str(userid), }, ) + user.record_event( + tag=EventTag.Account.OrganizationRoleInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by_user_id": str(request.user.id), + "organization_name": organization.name, + "role_name": role_name.value, + }, + ) request.db.flush() # in order to get id owner_users = set(organization_owners(request, organization)) send_organization_member_invited_email( @@ -2131,7 +2141,7 @@ def revoke_organization_invitation(organization, request): role_name = token_data.get("desired_role") organization.record_event( - tag="organization:organization_role:revoke_invite", + tag=EventTag.Organization.OrganizationRoleRevokeInvite, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -2139,6 +2149,15 @@ def revoke_organization_invitation(organization, request): "target_user_id": str(user.id), }, ) + user.record_event( + tag=EventTag.Account.OrganizationRoleRevokeInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by_user_id": str(request.user.id), + "organization_name": organization.name, + "role_name": role_name, + }, + ) owner_users = set(organization_owners(request, organization)) send_organization_member_invite_canceled_email( @@ -2217,7 +2236,7 @@ def change_organization_role( ) organization.record_event( - tag="organization:organization_role:change", + tag=EventTag.Organization.OrganizationRoleChange, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -2226,7 +2245,7 @@ def change_organization_role( }, ) role.user.record_event( - tag="account:organization_role:change", + tag=EventTag.Account.OrganizationRoleChange, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -2281,7 +2300,7 @@ def delete_organization_role(organization, request): else: organization_service.delete_organization_role(role.id) organization.record_event( - tag="organization:organization_role:delete", + tag=EventTag.Organization.OrganizationRoleRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -2290,7 +2309,7 @@ def delete_organization_role(organization, request): }, ) role.user.record_event( - tag="account:organization_role:delete", + tag=EventTag.Account.OrganizationRoleRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -2381,7 +2400,26 @@ def save_team(self): if form.validate(): name = form.name.data + previous_team_name = self.team.name self.organization_service.rename_team(self.team.id, name) + self.team.organization.record_event( + tag=EventTag.Organization.TeamRename, + ip_address=self.request.remote_addr, + additional={ + "renamed_by_user_id": str(self.request.user.id), + "team_name": self.team.name, + "previous_team_name": previous_team_name, + }, + ) + self.team.record_event( + tag=EventTag.Team.TeamRename, + ip_address=self.request.remote_addr, + additional={ + "renamed_by_user_id": str(self.request.user.id), + "team_name": self.team.name, + "previous_team_name": previous_team_name, + }, + ) self.request.session.flash("Team name updated", queue="success") return HTTPSeeOther( self.request.route_path( @@ -2404,7 +2442,7 @@ def delete_team(self): # Record events. organization.record_event( - tag="organization:team:delete", + tag=EventTag.Organization.TeamDelete, ip_address=self.request.remote_addr, additional={ "deleted_by_user_id": str(self.request.user.id), @@ -2412,7 +2450,7 @@ def delete_team(self): }, ) self.team.record_event( - tag="team:delete", + tag=EventTag.Team.TeamDelete, ip_address=self.request.remote_addr, additional={ "deleted_by_user_id": str(self.request.user.id), @@ -2558,7 +2596,7 @@ def create_team_role(self): # Record events. self.team.organization.record_event( - tag="organization:team_role:add", + tag=EventTag.Organization.TeamRoleAdd, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2568,7 +2606,7 @@ def create_team_role(self): }, ) self.team.record_event( - tag="team:team_role:add", + tag=EventTag.Team.TeamRoleAdd, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2577,7 +2615,7 @@ def create_team_role(self): }, ) role.user.record_event( - tag="account:team_role:add", + tag=EventTag.Account.TeamRoleAdd, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2643,7 +2681,7 @@ def delete_team_role(self): # Record events. self.team.organization.record_event( - tag="organization:team_role:delete", + tag=EventTag.Organization.TeamRoleRemove, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2653,7 +2691,7 @@ def delete_team_role(self): }, ) self.team.record_event( - tag="team:team_role:delete", + tag=EventTag.Team.TeamRoleRemove, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2662,7 +2700,7 @@ def delete_team_role(self): }, ) role.user.record_event( - tag="account:team_role:delete", + tag=EventTag.Account.TeamRoleRemove, ip_address=self.request.remote_addr, additional={ "submitted_by_user_id": str(self.request.user.id), @@ -2828,7 +2866,7 @@ def toggle_2fa_requirement(self): elif self.project.owners_require_2fa: self.project.owners_require_2fa = False self.project.record_event( - tag="project:owners_require_2fa:disabled", + tag=EventTag.Project.OwnersRequire2FADisabled, ip_address=self.request.remote_addr, additional={"modified_by": self.request.user.username}, ) @@ -2839,7 +2877,7 @@ def toggle_2fa_requirement(self): else: self.project.owners_require_2fa = True self.project.record_event( - tag="project:owners_require_2fa:enabled", + tag=EventTag.Project.OwnersRequire2FAEnabled, ip_address=self.request.remote_addr, additional={"modified_by": self.request.user.username}, ) @@ -3016,7 +3054,7 @@ def add_github_oidc_provider(self): self.project.oidc_providers.append(provider) self.project.record_event( - tag="project:oidc:provider-added", + tag=EventTag.Project.OIDCProviderAdded, ip_address=self.request.remote_addr, additional={ "provider": provider.provider_name, @@ -3082,7 +3120,7 @@ def delete_oidc_provider(self): self.project.oidc_providers.remove(provider) self.project.record_event( - tag="project:oidc:provider-removed", + tag=EventTag.Project.OIDCProviderRemoved, ip_address=self.request.remote_addr, additional={ "provider": provider.provider_name, @@ -3151,7 +3189,7 @@ def remove_organization_project(project, request): if organization := project.organization: organization_service.delete_organization_project(organization.id, project.id) organization.record_event( - tag="organization:organization_project:remove", + tag=EventTag.Organization.OrganizationProjectRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -3159,7 +3197,7 @@ def remove_organization_project(project, request): }, ) project.record_event( - tag="project:organization_project:remove", + tag=EventTag.Project.OrganizationProjectRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -3286,7 +3324,7 @@ def transfer_organization_project(project, request): ) ) project.record_event( - tag="project:role:delete", + tag=EventTag.Project.RoleRemove, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -3300,7 +3338,7 @@ def transfer_organization_project(project, request): if organization := project.organization: organization_service.delete_organization_project(organization.id, project.id) organization.record_event( - tag="organization:organization_project:remove", + tag=EventTag.Organization.OrganizationProjectRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -3308,7 +3346,7 @@ def transfer_organization_project(project, request): }, ) project.record_event( - tag="project:organization_project:remove", + tag=EventTag.Project.OrganizationProjectRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -3331,7 +3369,7 @@ def transfer_organization_project(project, request): organization = organization_service.get_organization_by_name(form.organization.data) organization_service.add_organization_project(organization.id, project.id) organization.record_event( - tag="organization:organization_project:add", + tag=EventTag.Organization.OrganizationProjectAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -3339,7 +3377,7 @@ def transfer_organization_project(project, request): }, ) project.record_event( - tag="project:organization_project:add", + tag=EventTag.Project.OrganizationProjectAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -3592,7 +3630,7 @@ def yank_project_release(self): ) self.release.project.record_event( - tag="project:release:yank", + tag=EventTag.Project.ReleaseYank, ip_address=self.request.remote_addr, additional={ "submitted_by": self.request.user.username, @@ -3674,7 +3712,7 @@ def unyank_project_release(self): ) self.release.project.record_event( - tag="project:release:unyank", + tag=EventTag.Project.ReleaseUnyank, ip_address=self.request.remote_addr, additional={ "submitted_by": self.request.user.username, @@ -3771,7 +3809,7 @@ def delete_project_release(self): ) self.release.project.record_event( - tag="project:release:remove", + tag=EventTag.Project.ReleaseRemove, ip_address=self.request.remote_addr, additional={ "submitted_by": self.request.user.username, @@ -3862,7 +3900,7 @@ def _error(message): ) self.release.project.record_event( - tag="project:release:file:remove", + tag=EventTag.Project.ReleaseFileRemove, ip_address=self.request.remote_addr, additional={ "submitted_by": self.request.user.username, @@ -4019,7 +4057,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): # Record events. project.record_event( - tag="project:team_project_role:create", + tag=EventTag.Project.TeamProjectRoleAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4028,7 +4066,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): }, ) team.organization.record_event( - tag="organization:team_project_role:create", + tag=EventTag.Organization.TeamProjectRoleAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4038,7 +4076,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): }, ) team.record_event( - tag="team:team_project_role:create", + tag=EventTag.Team.TeamProjectRoleAdd, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4131,7 +4169,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): # Record events. project.record_event( - tag="project:role:create", + tag=EventTag.Project.RoleAdd, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -4140,7 +4178,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): }, ) user.record_event( - tag="account:role:create", + tag=EventTag.Account.RoleAdd, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -4264,7 +4302,7 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): token_age=token_service.max_age, ) project.record_event( - tag="project:role:invite", + tag=EventTag.Project.RoleInvite, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -4273,12 +4311,12 @@ def manage_project_roles(project, request, _form_class=CreateRoleForm): }, ) user.record_event( - tag="account:role:invite", + tag=EventTag.Account.RoleInvite, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, + "project_name": project.name, "role_name": role_name, - "target_user": username, }, ) request.db.flush() # in order to get id @@ -4342,7 +4380,7 @@ def revoke_project_role_invitation(project, request, _form_class=ChangeRoleForm) ) ) project.record_event( - tag="project:role:revoke_invite", + tag=EventTag.Project.RoleRevokeInvite, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -4350,6 +4388,15 @@ def revoke_project_role_invitation(project, request, _form_class=ChangeRoleForm) "target_user": user.username, }, ) + user.record_event( + tag=EventTag.Account.RoleRevokeInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by": request.user.username, + "project_name": project.name, + "role_name": role_name, + }, + ) request.session.flash( request._( "Invitation revoked from '${username}'.", @@ -4399,7 +4446,7 @@ def change_project_role(project, request, _form_class=ChangeRoleForm): ) role.role_name = form.role_name.data project.record_event( - tag="project:role:change", + tag=EventTag.Project.RoleChange, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -4407,6 +4454,15 @@ def change_project_role(project, request, _form_class=ChangeRoleForm): "target_user": role.user.username, }, ) + role.user.record_event( + tag=EventTag.Account.RoleChange, + ip_address=request.remote_addr, + additional={ + "submitted_by": request.user.username, + "project_name": project.name, + "role_name": form.role_name.data, + }, + ) owner_users = set(project_owners(request, project)) # Don't send owner notification email to new user @@ -4474,7 +4530,7 @@ def delete_project_role(project, request): ) ) project.record_event( - tag="project:role:delete", + tag=EventTag.Project.RoleRemove, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -4562,7 +4618,7 @@ def change_team_project_role(project, request, _form_class=ChangeTeamProjectRole # Record events. project.record_event( - tag="project:team_project_role:change", + tag=EventTag.Project.TeamProjectRoleChange, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4571,7 +4627,7 @@ def change_team_project_role(project, request, _form_class=ChangeTeamProjectRole }, ) role.team.organization.record_event( - tag="organization:team_project_role:change", + tag=EventTag.Organization.TeamProjectRoleChange, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4581,7 +4637,7 @@ def change_team_project_role(project, request, _form_class=ChangeTeamProjectRole }, ) role.team.record_event( - tag="team:team_project_role:change", + tag=EventTag.Team.TeamProjectRoleChange, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4665,7 +4721,7 @@ def delete_team_project_role(project, request): # Record event. project.record_event( - tag="project:team_project_role:delete", + tag=EventTag.Project.TeamProjectRoleRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4674,7 +4730,7 @@ def delete_team_project_role(project, request): }, ) team.organization.record_event( - tag="organization:team_project_role:delete", + tag=EventTag.Organization.TeamProjectRoleRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), @@ -4684,7 +4740,7 @@ def delete_team_project_role(project, request): }, ) team.record_event( - tag="team:team_project_role:delete", + tag=EventTag.Team.TeamProjectRoleRemove, ip_address=request.remote_addr, additional={ "submitted_by_user_id": str(request.user.id), diff --git a/warehouse/organizations/tasks.py b/warehouse/organizations/tasks.py --- a/warehouse/organizations/tasks.py +++ b/warehouse/organizations/tasks.py @@ -14,6 +14,7 @@ from warehouse import tasks from warehouse.accounts.interfaces import ITokenService, TokenExpired +from warehouse.events.tags import EventTag from warehouse.organizations.interfaces import IOrganizationService from warehouse.organizations.models import ( Organization, @@ -61,7 +62,7 @@ def delete_declined_organizations(request): # TODO: Cannot call this after deletion so how exactly do we handle this? organization_service.record_event( organization.id, - tag="organization:delete", + tag=EventTag.Organization.OrganizationDelete, additional={"deleted_by": "CRON"}, ) organization_service.delete_organization(organization.id) diff --git a/warehouse/utils/project.py b/warehouse/utils/project.py --- a/warehouse/utils/project.py +++ b/warehouse/utils/project.py @@ -25,6 +25,7 @@ from sqlalchemy.orm.exc import NoResultFound from warehouse.admin.flags import AdminFlagValue +from warehouse.events.tags import EventTag from warehouse.packaging.interfaces import IDocsStorage from warehouse.packaging.models import JournalEntry, ProhibitedProjectName, Project from warehouse.tasks import task @@ -165,7 +166,7 @@ def add_project(name, request): ) ) project.record_event( - tag="project:create", + tag=EventTag.Project.ProjectCreate, ip_address=request.remote_addr, additional={"created_by": request.user.username}, )
diff --git a/tests/unit/accounts/test_core.py b/tests/unit/accounts/test_core.py --- a/tests/unit/accounts/test_core.py +++ b/tests/unit/accounts/test_core.py @@ -33,6 +33,7 @@ database_login_factory, ) from warehouse.errors import BasicAuthBreachedPassword, BasicAuthFailedPassword +from warehouse.events.tags import EventTag from warehouse.rate_limiting import IRateLimiter, RateLimit from ...common.db.accounts import UserFactory @@ -98,7 +99,7 @@ def test_with_invalid_password(self, pyramid_request, pyramid_services): ] assert user.record_event.calls == [ pretend.call( - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, ip_address="1.2.3.4", additional={"reason": "invalid_password", "auth_method": "basic"}, ) diff --git a/tests/unit/accounts/test_forms.py b/tests/unit/accounts/test_forms.py --- a/tests/unit/accounts/test_forms.py +++ b/tests/unit/accounts/test_forms.py @@ -24,6 +24,7 @@ TooManyFailedLogins, ) from warehouse.accounts.models import DisableReason +from warehouse.events.tags import EventTag from warehouse.utils.webauthn import AuthenticationRejectedError @@ -179,7 +180,7 @@ def test_validate_password_notok(self, db_session): assert user_service.record_event.calls == [ pretend.call( 1, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "invalid_password"}, ) ] @@ -654,7 +655,7 @@ def test_totp_secret_exists(self, pyramid_config): assert user_service.record_event.calls == [ pretend.call( 1, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "invalid_totp"}, ) ] @@ -743,7 +744,7 @@ def test_credential_invalid(self): assert user_service.record_event.calls == [ pretend.call( 1, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": "invalid_webauthn"}, ) ] @@ -841,7 +842,7 @@ def test_invalid_recovery_code( assert user_service.record_event.calls == [ pretend.call( 1, - tag="account:login:failure", + tag=EventTag.Account.LoginFailure, additional={"reason": expected_reason}, ) ] diff --git a/tests/unit/accounts/test_views.py b/tests/unit/accounts/test_views.py --- a/tests/unit/accounts/test_views.py +++ b/tests/unit/accounts/test_views.py @@ -41,6 +41,7 @@ from warehouse.accounts.models import User from warehouse.accounts.views import two_factor_and_totp_validate from warehouse.admin.flags import AdminFlag, AdminFlagValue +from warehouse.events.tags import EventTag from warehouse.organizations.models import ( OrganizationInvitation, OrganizationRole, @@ -273,7 +274,7 @@ def test_post_validate_redirects( assert user_service.record_event.calls == [ pretend.call( user_id, - tag="account:login:success", + tag=EventTag.Account.LoginSuccess, additional={"two_factor_method": None, "two_factor_label": None}, ) ] @@ -333,7 +334,7 @@ def test_post_validate_no_redirects( assert user_service.record_event.calls == [ pretend.call( 1, - tag="account:login:success", + tag=EventTag.Account.LoginSuccess, additional={"two_factor_method": None, "two_factor_label": None}, ) ] @@ -697,7 +698,7 @@ def test_totp_auth( assert user_service.record_event.calls == [ pretend.call( "1", - tag="account:login:success", + tag=EventTag.Account.LoginSuccess, additional={"two_factor_method": "totp", "two_factor_label": "totp"}, ) ] @@ -1141,7 +1142,7 @@ def test_recovery_code_auth(self, monkeypatch, pyramid_request, redirect_url): assert user_service.record_event.calls == [ pretend.call( "1", - tag="account:login:success", + tag=EventTag.Account.LoginSuccess, additional={ "two_factor_method": "recovery-code", "two_factor_label": None, @@ -1149,7 +1150,7 @@ def test_recovery_code_auth(self, monkeypatch, pyramid_request, redirect_url): ), pretend.call( "1", - tag="account:recovery_codes:used", + tag=EventTag.Account.RecoveryCodesUsed, ), ] assert pyramid_request.session.flash.calls == [ @@ -1384,12 +1385,12 @@ def test_register_redirect(self, db_request, monkeypatch): assert record_event.calls == [ pretend.call( user.id, - tag="account:create", + tag=EventTag.Account.AccountCreate, additional={"email": "[email protected]"}, ), pretend.call( user.id, - tag="account:login:success", + tag=EventTag.Account.LoginSuccess, additional={"two_factor_method": None, "two_factor_label": None}, ), ] @@ -1496,7 +1497,7 @@ def test_request_password_reset( assert user_service.record_event.calls == [ pretend.call( stub_user.id, - tag="account:password:reset:request", + tag=EventTag.Account.PasswordResetRequest, ) ] @@ -1561,7 +1562,7 @@ def test_request_password_reset_with_email( assert user_service.record_event.calls == [ pretend.call( stub_user.id, - tag="account:password:reset:request", + tag=EventTag.Account.PasswordResetRequest, ) ] assert user_service.ratelimiters["password.reset"].test.calls == [ @@ -1637,7 +1638,7 @@ def test_request_password_reset_with_non_primary_email( assert user_service.record_event.calls == [ pretend.call( stub_user.id, - tag="account:password:reset:request", + tag=EventTag.Account.PasswordResetRequest, ) ] assert user_service.ratelimiters["password.reset"].test.calls == [ @@ -1735,7 +1736,7 @@ def test_password_reset_prohibited( assert user_service.record_event.calls == [ pretend.call( stub_user.id, - tag="account:password:reset:attempt", + tag=EventTag.Account.PasswordResetAttempt, ) ] diff --git a/tests/unit/admin/views/test_organizations.py b/tests/unit/admin/views/test_organizations.py --- a/tests/unit/admin/views/test_organizations.py +++ b/tests/unit/admin/views/test_organizations.py @@ -17,6 +17,7 @@ from warehouse.accounts.interfaces import IUserService from warehouse.admin.views import organizations as views +from warehouse.events.tags import EventTag from warehouse.organizations.interfaces import IOrganizationService from ....common.db.organizations import OrganizationFactory @@ -495,7 +496,7 @@ def test_approve(self, enable_organizations, monkeypatch): assert organization_service.record_event.calls == [ pretend.call( organization.id, - tag="organization:approve", + tag=EventTag.Organization.OrganizationApprove, additional={"approved_by_user_id": str(admin.id)}, ), ] @@ -641,7 +642,7 @@ def test_decline(self, enable_organizations, monkeypatch): assert organization_service.record_event.calls == [ pretend.call( organization.id, - tag="organization:decline", + tag=EventTag.Organization.OrganizationDecline, additional={"declined_by_user_id": str(admin.id)}, ), ] diff --git a/tests/unit/integration/github/test_utils.py b/tests/unit/integration/github/test_utils.py --- a/tests/unit/integration/github/test_utils.py +++ b/tests/unit/integration/github/test_utils.py @@ -20,6 +20,7 @@ import requests from warehouse import integrations +from warehouse.events.tags import EventTag from warehouse.integrations.github import tasks, utils @@ -606,7 +607,7 @@ def metrics_increment(key): assert record_event.calls == [ pretend.call( user_id, - tag="account:api_token:removed_leak", + tag=EventTag.Account.APITokenRemovedLeak, additional={ "macaroon_id": "12", "public_url": "http://example.com", diff --git a/tests/unit/malware/checks/package_turnover/test_check.py b/tests/unit/malware/checks/package_turnover/test_check.py --- a/tests/unit/malware/checks/package_turnover/test_check.py +++ b/tests/unit/malware/checks/package_turnover/test_check.py @@ -12,6 +12,7 @@ import pretend +from warehouse.events.tags import EventTag from warehouse.malware.checks.package_turnover import check as c from warehouse.malware.models import ( MalwareCheckState, @@ -43,7 +44,7 @@ def test_user_posture_verdicts(db_session): check = c.PackageTurnoverCheck(db_session) user.record_event( - tag="account:two_factor:method_removed", ip_address="0.0.0.0", additional={} + tag=EventTag.Account.TwoFactorMethodRemoved, ip_address="0.0.0.0", additional={} ) check.user_posture_verdicts(project) @@ -81,7 +82,7 @@ def test_user_posture_verdicts_has_2fa(db_session): check = c.PackageTurnoverCheck(db_session) user.record_event( - tag="account:two_factor:method_removed", ip_address="0.0.0.0", additional={} + tag=EventTag.Account.TwoFactorMethodRemoved, ip_address="0.0.0.0", additional={} ) check.user_posture_verdicts(project) @@ -94,7 +95,7 @@ def test_user_turnover_verdicts(db_session): RoleFactory.create(user=user, project=project, role_name="Owner") project.record_event( - tag="project:role:add", + tag=EventTag.Project.RoleAdd, ip_address="0.0.0.0", additional={"target_user": user.username}, ) diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -40,6 +40,7 @@ TokenExpired, ) from warehouse.admin.flags import AdminFlagValue +from warehouse.events.tags import EventTag from warehouse.forklift.legacy import MAX_FILESIZE, MAX_PROJECT_SIZE from warehouse.macaroons import caveats from warehouse.macaroons.interfaces import IMacaroonService @@ -309,7 +310,7 @@ def test_add_email(self, monkeypatch, pyramid_request): assert user_service.record_event.calls == [ pretend.call( pyramid_request.user.id, - tag="account:email:add", + tag=EventTag.Account.EmailAdd, additional={"email": email_address}, ) ] @@ -380,7 +381,7 @@ def test_delete_email(self, monkeypatch): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:email:remove", + tag=EventTag.Account.EmailRemove, additional={"email": email.email}, ) ] @@ -473,7 +474,7 @@ def test_change_primary_email(self, monkeypatch, db_request): assert user_service.record_event.calls == [ pretend.call( user.id, - tag="account:email:primary:change", + tag=EventTag.Account.EmailPrimaryChange, additional={"old_primary": "old", "new_primary": "new"}, ) ] @@ -509,7 +510,7 @@ def test_change_primary_email_without_current(self, monkeypatch, db_request): assert user_service.record_event.calls == [ pretend.call( user.id, - tag="account:email:primary:change", + tag=EventTag.Account.EmailPrimaryChange, additional={"old_primary": None, "new_primary": new_primary.email}, ) ] @@ -575,7 +576,7 @@ def test_reverify_email(self, monkeypatch): assert send_email.calls == [pretend.call(request, (request.user, email))] assert email.user.record_event.calls == [ pretend.call( - tag="account:email:reverify", + tag=EventTag.Account.EmailReverify, ip_address=request.remote_addr, additional={"email": email.email}, ) @@ -738,7 +739,7 @@ def test_change_password(self, monkeypatch): pretend.call(request.user.id, password=new_password) ] assert user_service.record_event.calls == [ - pretend.call(request.user.id, tag="account:password:change") + pretend.call(request.user.id, tag=EventTag.Account.PasswordChange) ] def test_change_password_validation_fails(self, monkeypatch): @@ -1142,7 +1143,7 @@ def test_validate_totp_provision(self, monkeypatch): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:two_factor:method_added", + tag=EventTag.Account.TwoFactorMethodAdded, additional={"method": "totp"}, ) ] @@ -1299,7 +1300,7 @@ def test_delete_totp(self, monkeypatch, db_request): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:two_factor:method_removed", + tag=EventTag.Account.TwoFactorMethodRemoved, additional={"method": "totp"}, ) ] @@ -1512,7 +1513,7 @@ def test_validate_webauthn_provision(self, monkeypatch): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:two_factor:method_added", + tag=EventTag.Account.TwoFactorMethodAdded, additional={ "method": "webauthn", "label": provision_webauthn_obj.label.data, @@ -1605,7 +1606,7 @@ def test_delete_webauthn(self, monkeypatch): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:two_factor:method_removed", + tag=EventTag.Account.TwoFactorMethodRemoved, additional={ "method": "webauthn", "label": delete_webauthn_obj.label.data, @@ -1691,7 +1692,7 @@ def test_recovery_codes_generate(self, monkeypatch): result = view.recovery_codes_generate() assert user_service.record_event.calls == [ - pretend.call(1, tag="account:recovery_codes:generated") + pretend.call(1, tag=EventTag.Account.RecoveryCodesGenerated) ] assert result == {"recovery_codes": ["aaaaaaaaaaaa", "bbbbbbbbbbbb"]} @@ -1758,7 +1759,7 @@ def test_recovery_codes_regenerate(self, monkeypatch): result = view.recovery_codes_regenerate() assert user_service.record_event.calls == [ - pretend.call(1, tag="account:recovery_codes:regenerated") + pretend.call(1, tag=EventTag.Account.RecoveryCodesRegenerated) ] assert result == {"recovery_codes": ["cccccccccccc", "dddddddddddd"]} @@ -2054,7 +2055,7 @@ def test_create_macaroon(self, monkeypatch): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:api_token:added", + tag=EventTag.Account.APITokenAdded, additional={ "description": create_macaroon_obj.description.data, "caveats": [ @@ -2149,7 +2150,7 @@ def test_create_macaroon_records_events_for_each_project(self, monkeypatch): assert record_user_event.calls == [ pretend.call( request.user.id, - tag="account:api_token:added", + tag=EventTag.Account.APITokenAdded, additional={ "description": create_macaroon_obj.description.data, "caveats": [ @@ -2164,7 +2165,7 @@ def test_create_macaroon_records_events_for_each_project(self, monkeypatch): ] assert record_project_event.calls == [ pretend.call( - tag="project:api_token:added", + tag=EventTag.Project.APITokenAdded, ip_address=request.remote_addr, additional={ "description": create_macaroon_obj.description.data, @@ -2172,7 +2173,7 @@ def test_create_macaroon_records_events_for_each_project(self, monkeypatch): }, ), pretend.call( - tag="project:api_token:added", + tag=EventTag.Project.APITokenAdded, ip_address=request.remote_addr, additional={ "description": create_macaroon_obj.description.data, @@ -2296,7 +2297,7 @@ def test_delete_macaroon(self, monkeypatch): assert record_event.calls == [ pretend.call( request.user.id, - tag="account:api_token:removed", + tag=EventTag.Account.APITokenRemoved, additional={"macaroon_id": delete_macaroon_obj.macaroon_id.data}, ) ] @@ -2364,13 +2365,13 @@ def test_delete_macaroon_records_events_for_each_project(self, monkeypatch): assert record_user_event.calls == [ pretend.call( request.user.id, - tag="account:api_token:removed", + tag=EventTag.Account.APITokenRemoved, additional={"macaroon_id": delete_macaroon_obj.macaroon_id.data}, ) ] assert record_project_event.calls == [ pretend.call( - tag="project:api_token:removed", + tag=EventTag.Project.APITokenRemoved, ip_address=request.remote_addr, additional={ "description": "fake macaroon", @@ -2378,7 +2379,7 @@ def test_delete_macaroon_records_events_for_each_project(self, monkeypatch): }, ), pretend.call( - tag="project:api_token:removed", + tag=EventTag.Project.APITokenRemoved, ip_address=request.remote_addr, additional={ "description": "fake macaroon", @@ -2557,26 +2558,17 @@ def test_create_organization(self, enable_organizations, monkeypatch): assert organization_service.record_event.calls == [ pretend.call( organization.id, - tag="organization:create", + tag=EventTag.Organization.OrganizationCreate, additional={"created_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, - tag="organization:catalog_entry:add", + tag=EventTag.Organization.CatalogEntryAdd, additional={"submitted_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, - tag="organization:organization_role:invite", - additional={ - "submitted_by_user_id": str(request.user.id), - "role_name": "Owner", - "target_user_id": str(request.user.id), - }, - ), - pretend.call( - organization.id, - tag="organization:organization_role:accepted", + tag=EventTag.Organization.OrganizationRoleAdd, additional={ "submitted_by_user_id": str(request.user.id), "role_name": "Owner", @@ -2587,7 +2579,7 @@ def test_create_organization(self, enable_organizations, monkeypatch): assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:organization_role:accepted", + tag=EventTag.Account.OrganizationRoleAdd, additional={ "submitted_by_user_id": str(request.user.id), "organization_name": organization.name, @@ -2715,26 +2707,17 @@ def test_create_organization_with_subscription( assert organization_service.record_event.calls == [ pretend.call( organization.id, - tag="organization:create", + tag=EventTag.Organization.OrganizationCreate, additional={"created_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, - tag="organization:catalog_entry:add", + tag=EventTag.Organization.CatalogEntryAdd, additional={"submitted_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, - tag="organization:organization_role:invite", - additional={ - "submitted_by_user_id": str(request.user.id), - "role_name": "Owner", - "target_user_id": str(request.user.id), - }, - ), - pretend.call( - organization.id, - tag="organization:organization_role:accepted", + tag=EventTag.Organization.OrganizationRoleAdd, additional={ "submitted_by_user_id": str(request.user.id), "role_name": "Owner", @@ -2745,7 +2728,7 @@ def test_create_organization_with_subscription( assert user_service.record_event.calls == [ pretend.call( request.user.id, - tag="account:organization_role:accepted", + tag=EventTag.Account.OrganizationRoleAdd, additional={ "submitted_by_user_id": str(request.user.id), "organization_name": organization.name, @@ -4947,7 +4930,9 @@ def test_manage_team( "save_team_form": form, } - def test_save_team(self, db_request, organization_service, enable_organizations): + def test_save_team( + self, db_request, pyramid_user, organization_service, enable_organizations + ): team = TeamFactory.create(name="Team Name") db_request.POST = MultiDict({"name": "Team name"}) db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foo/bar/") @@ -5788,13 +5773,13 @@ def test_toggle_2fa_requirement_critical( False, True, [pretend.call("2FA requirement enabled for foo", queue="success")], - "project:owners_require_2fa:enabled", + EventTag.Project.OwnersRequire2FAEnabled, ), ( True, False, [pretend.call("2FA requirement disabled for foo", queue="success")], - "project:owners_require_2fa:disabled", + EventTag.Project.OwnersRequire2FADisabled, ), ], ) @@ -7009,7 +6994,7 @@ def test_yank_project_release(self, monkeypatch): ] assert release.project.record_event.calls == [ pretend.call( - tag="project:release:yank", + tag=EventTag.Project.ReleaseYank, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -7178,7 +7163,7 @@ def test_unyank_project_release(self, monkeypatch): ] assert release.project.record_event.calls == [ pretend.call( - tag="project:release:unyank", + tag=EventTag.Project.ReleaseUnyank, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -7352,7 +7337,7 @@ def test_delete_project_release(self, monkeypatch): ] assert release.project.record_event.calls == [ pretend.call( - tag="project:release:remove", + tag=EventTag.Project.ReleaseRemove, ip_address=request.remote_addr, additional={ "submitted_by": request.user.username, @@ -9453,7 +9438,7 @@ def test_add_github_oidc_provider_preexisting(self, monkeypatch): ] assert project.record_event.calls == [ pretend.call( - tag="project:oidc:provider-added", + tag=EventTag.Project.OIDCProviderAdded, ip_address=request.remote_addr, additional={ "provider": "GitHub", @@ -9540,7 +9525,7 @@ def test_add_github_oidc_provider_created(self, monkeypatch): ] assert project.record_event.calls == [ pretend.call( - tag="project:oidc:provider-added", + tag=EventTag.Project.OIDCProviderAdded, ip_address=request.remote_addr, additional={ "provider": "GitHub", @@ -9835,7 +9820,7 @@ def test_delete_oidc_provider(self, monkeypatch): assert project.record_event.calls == [ pretend.call( - tag="project:oidc:provider-removed", + tag=EventTag.Project.OIDCProviderRemoved, ip_address=request.remote_addr, additional={ "provider": "fakeprovider",
Catalog and enforce ProjectEvent and UserEvent tags We do not currently have a canonical list of `tag` values for the [UserEvent](https://github.com/pypa/warehouse/blob/de6d014f62021cb915187f75458fa3ce53d115a7/warehouse/accounts/models.py#L170-L181) and [ProjectEvent](https://github.com/pypa/warehouse/blob/de6d014f62021cb915187f75458fa3ce53d115a7/warehouse/packaging/models.py#L239-L250) models. As more event driven features are added to warehouse, standardizing these tag names across the codebase would ensure consistency between individual systems. If we also enforce that models using these tags only accept valid tags, we avoid the need to precisely review PRs that might use an errant tag (typo or invalid). Considered when reviewing a new event driven system: https://github.com/pypa/warehouse/pull/7118#discussion_r359505560
2022-10-12T17:26:46Z
[]
[]
pypi/warehouse
12,360
pypi__warehouse-12360
[ "11640" ]
8eeae1267e675ab89bb27a1dfda5f254fee7252e
diff --git a/warehouse/accounts/services.py b/warehouse/accounts/services.py --- a/warehouse/accounts/services.py +++ b/warehouse/accounts/services.py @@ -94,7 +94,11 @@ def _get_user(self, userid): # TODO: We probably don't actually want to just return the database # object here. # TODO: We need some sort of Anonymous User. - return self.db.query(User).options(joinedload(User.webauthn)).get(userid) + return ( + self.db.query(User).options(joinedload(User.webauthn)).get(userid) + if userid + else None + ) def get_user(self, userid): return self.cached_get_user(userid) diff --git a/warehouse/events/tags.py b/warehouse/events/tags.py --- a/warehouse/events/tags.py +++ b/warehouse/events/tags.py @@ -59,6 +59,8 @@ def __new__(cls, value: str): class EventTag: class Account(EventTagEnum): + """Tags for User events.""" + # Name = "source_type:subject_type:action" APITokenAdded = "account:api_token:added" APITokenRemoved = "account:api_token:removed" @@ -100,6 +102,11 @@ class Account(EventTagEnum): # RoleAccepted = "account:role:accepted" class Project(EventTagEnum): + """Tags for Project events. + + Keep in sync with: warehouse/templates/manage/project/history.html + """ + # Name = "source_type:subject_type:action" APITokenAdded = "project:api_token:added" APITokenRemoved = "project:api_token:removed" @@ -129,6 +136,11 @@ class Project(EventTagEnum): # RoleDelete = "project:role:delete" class Organization(EventTagEnum): + """Tags for Organization events. + + Keep in sync with: warehouse/templates/manage/organization/history.html + """ + # Name = "source_type:subject_type:action" CatalogEntryAdd = "organization:catalog_entry:add" OrganizationApprove = "organization:approve" @@ -154,6 +166,11 @@ class Organization(EventTagEnum): TeamRoleRemove = "organization:team_role:remove" class Team(EventTagEnum): + """Tags for Organization events. + + Keep in sync with: warehouse/templates/manage/team/history.html + """ + # Name = "source_type:subject_type:action" TeamCreate = "team:create" TeamDelete = "team:delete" diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1332,14 +1332,13 @@ def create_organization(self): organization = self.organization_service.add_organization(**data) self.organization_service.record_event( organization.id, - tag=EventTag.Organization.OrganizationCreate, - additional={"created_by_user_id": str(self.request.user.id)}, + tag=EventTag.Organization.CatalogEntryAdd, + additional={"submitted_by_user_id": str(self.request.user.id)}, ) - self.organization_service.add_catalog_entry(organization.id) self.organization_service.record_event( organization.id, - tag=EventTag.Organization.CatalogEntryAdd, - additional={"submitted_by_user_id": str(self.request.user.id)}, + tag=EventTag.Organization.OrganizationCreate, + additional={"created_by_user_id": str(self.request.user.id)}, ) self.organization_service.add_organization_role( organization.id, @@ -1501,6 +1500,11 @@ def save_organization_name(self): self.organization.id, form.name.data, ) + self.organization.record_event( + tag=EventTag.Organization.CatalogEntryAdd, + ip_address=self.request.remote_addr, + additional={"submitted_by_user_id": str(self.request.user.id)}, + ) self.organization.record_event( tag=EventTag.Organization.OrganizationRename, ip_address=self.request.remote_addr, @@ -1753,7 +1757,7 @@ def create_team(self): tag=EventTag.Organization.TeamCreate, ip_address=self.request.remote_addr, additional={ - "submitted_by_user_id": str(self.request.user.id), + "created_by_user_id": str(self.request.user.id), "team_name": team.name, }, ) @@ -1761,7 +1765,7 @@ def create_team(self): tag=EventTag.Team.TeamCreate, ip_address=self.request.remote_addr, additional={ - "submitted_by_user_id": str(self.request.user.id), + "created_by_user_id": str(self.request.user.id), }, ) @@ -2374,6 +2378,47 @@ def delete_organization_role(organization, request): ) +@view_config( + route_name="manage.organization.history", + context=Organization, + renderer="manage/organization/history.html", + uses_session=True, + permission="manage:organization", + has_translations=True, +) +def manage_organization_history(organization, request): + try: + page_num = int(request.params.get("page", 1)) + except ValueError: + raise HTTPBadRequest("'page' must be an integer.") + + events_query = ( + request.db.query(Organization.Event) + .join(Organization.Event.source) + .filter(Organization.Event.source_id == organization.id) + .order_by(Organization.Event.time.desc()) + .order_by(Organization.Event.tag.desc()) + ) + + events = SQLAlchemyORMPage( + events_query, + page=page_num, + items_per_page=25, + url_maker=paginate_url_factory(request), + ) + + if events.page_count and page_num > events.page_count: + raise HTTPNotFound + + user_service = request.find_service(IUserService, context=None) + + return { + "events": events, + "get_user": user_service.get_user, + "organization": organization, + } + + @view_defaults( route_name="manage.team.settings", context=Team, @@ -2428,18 +2473,17 @@ def save_team(self): tag=EventTag.Organization.TeamRename, ip_address=self.request.remote_addr, additional={ - "renamed_by_user_id": str(self.request.user.id), "team_name": self.team.name, "previous_team_name": previous_team_name, + "renamed_by_user_id": str(self.request.user.id), }, ) self.team.record_event( tag=EventTag.Team.TeamRename, ip_address=self.request.remote_addr, additional={ - "renamed_by_user_id": str(self.request.user.id), - "team_name": self.team.name, "previous_team_name": previous_team_name, + "renamed_by_user_id": str(self.request.user.id), }, ) self.request.session.flash("Team name updated", queue="success") @@ -2767,6 +2811,47 @@ def delete_team_role(self): ) +@view_config( + route_name="manage.team.history", + context=Team, + renderer="manage/team/history.html", + uses_session=True, + permission="manage:team", + has_translations=True, +) +def manage_team_history(team, request): + try: + page_num = int(request.params.get("page", 1)) + except ValueError: + raise HTTPBadRequest("'page' must be an integer.") + + events_query = ( + request.db.query(Team.Event) + .join(Team.Event.source) + .filter(Team.Event.source_id == team.id) + .order_by(Team.Event.time.desc()) + .order_by(Team.Event.tag.desc()) + ) + + events = SQLAlchemyORMPage( + events_query, + page=page_num, + items_per_page=25, + url_maker=paginate_url_factory(request), + ) + + if events.page_count and page_num > events.page_count: + raise HTTPNotFound + + user_service = request.find_service(IUserService, context=None) + + return { + "events": events, + "get_user": user_service.get_user, + "team": team, + } + + @view_config( route_name="manage.projects", renderer="manage/projects.html", @@ -4831,7 +4916,13 @@ def manage_project_history(project, request): if events.page_count and page_num > events.page_count: raise HTTPNotFound - return {"project": project, "events": events} + user_service = request.find_service(IUserService, context=None) + + return { + "events": events, + "get_user": user_service.get_user, + "project": project, + } @view_config( diff --git a/warehouse/organizations/models.py b/warehouse/organizations/models.py --- a/warehouse/organizations/models.py +++ b/warehouse/organizations/models.py @@ -608,11 +608,15 @@ class Team(HasEvents, db.Model): ) def record_event(self, *, tag, ip_address, additional={}): - """Record team name in events in case team is ever deleted.""" + """Record org and team name in events in case they are ever deleted.""" super().record_event( tag=tag, ip_address=ip_address, - additional={"team_name": self.name, **additional}, + additional={ + "organization_name": self.organization.name, + "team_name": self.name, + **additional, + }, ) def __acl__(self): diff --git a/warehouse/organizations/services.py b/warehouse/organizations/services.py --- a/warehouse/organizations/services.py +++ b/warehouse/organizations/services.py @@ -121,6 +121,8 @@ def add_organization(self, name, display_name, orgtype, link_url, description): self.db.add(organization) self.db.flush() + self.add_catalog_entry(organization.id) + return organization def add_catalog_entry(self, organization_id): diff --git a/warehouse/routes.py b/warehouse/routes.py --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -304,6 +304,13 @@ def includeme(config): traverse="/{organization_name}", domain=warehouse, ) + config.add_route( + "manage.organization.history", + "/manage/organization/{organization_name}/history/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) config.add_route( "manage.team.settings", "/manage/organization/{organization_name}/team/{team_name}/settings/", @@ -332,6 +339,13 @@ def includeme(config): traverse="/{organization_name}/{team_name}", domain=warehouse, ) + config.add_route( + "manage.team.history", + "/manage/organization/{organization_name}/team/{team_name}/history/", + factory="warehouse.organizations.models:TeamFactory", + traverse="/{organization_name}/{team_name}", + domain=warehouse, + ) config.add_route("manage.projects", "/manage/projects/", domain=warehouse) config.add_route( "manage.project.settings",
diff --git a/tests/common/db/organizations.py b/tests/common/db/organizations.py --- a/tests/common/db/organizations.py +++ b/tests/common/db/organizations.py @@ -143,6 +143,13 @@ class Meta: organization = factory.SubFactory(OrganizationFactory) +class TeamEventFactory(WarehouseFactory): + class Meta: + model = Team.Event + + source = factory.SubFactory(TeamFactory) + + class TeamRoleFactory(WarehouseFactory): class Meta: model = TeamRole diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -49,11 +49,13 @@ from warehouse.oidc.interfaces import TooManyOIDCRegistrations from warehouse.organizations.interfaces import IOrganizationService from warehouse.organizations.models import ( + Organization, OrganizationInvitation, OrganizationInvitationStatus, OrganizationRole, OrganizationRoleType, OrganizationType, + Team, TeamProjectRole, TeamProjectRoleType, TeamRoleType, @@ -72,12 +74,14 @@ from ...common.db.accounts import EmailFactory from ...common.db.organizations import ( + OrganizationEventFactory, OrganizationFactory, OrganizationInvitationFactory, OrganizationProjectFactory, OrganizationRoleFactory, OrganizationStripeCustomerFactory, OrganizationStripeSubscriptionFactory, + TeamEventFactory, TeamFactory, TeamProjectRoleFactory, TeamRoleFactory, @@ -2545,9 +2549,6 @@ def test_create_organization(self, enable_organizations, monkeypatch): description=organization.description, ) ] - assert organization_service.add_catalog_entry.calls == [ - pretend.call(organization.id) - ] assert organization_service.add_organization_role.calls == [ pretend.call( organization.id, @@ -2558,13 +2559,13 @@ def test_create_organization(self, enable_organizations, monkeypatch): assert organization_service.record_event.calls == [ pretend.call( organization.id, - tag=EventTag.Organization.OrganizationCreate, - additional={"created_by_user_id": str(request.user.id)}, + tag=EventTag.Organization.CatalogEntryAdd, + additional={"submitted_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, - tag=EventTag.Organization.CatalogEntryAdd, - additional={"submitted_by_user_id": str(request.user.id)}, + tag=EventTag.Organization.OrganizationCreate, + additional={"created_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, @@ -2694,9 +2695,6 @@ def test_create_organization_with_subscription( description=organization.description, ) ] - assert organization_service.add_catalog_entry.calls == [ - pretend.call(organization.id) - ] assert organization_service.add_organization_role.calls == [ pretend.call( organization.id, @@ -2707,13 +2705,13 @@ def test_create_organization_with_subscription( assert organization_service.record_event.calls == [ pretend.call( organization.id, - tag=EventTag.Organization.OrganizationCreate, - additional={"created_by_user_id": str(request.user.id)}, + tag=EventTag.Organization.CatalogEntryAdd, + additional={"submitted_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, - tag=EventTag.Organization.CatalogEntryAdd, - additional={"submitted_by_user_id": str(request.user.id)}, + tag=EventTag.Organization.OrganizationCreate, + additional={"created_by_user_id": str(request.user.id)}, ), pretend.call( organization.id, @@ -4939,6 +4937,132 @@ def test_delete_non_owner_role(self, db_request, enable_organizations): assert result.headers["Location"] == "/the-redirect" +class TestManageOrganizationHistory: + def test_get(self, db_request, user_service): + organization = OrganizationFactory.create() + older_event = OrganizationEventFactory.create( + source=organization, + tag="fake:event", + ip_address="0.0.0.0", + time=datetime.datetime(2017, 2, 5, 17, 18, 18, 462_634), + ) + newer_event = OrganizationEventFactory.create( + source=organization, + tag="fake:event", + ip_address="0.0.0.0", + time=datetime.datetime(2018, 2, 5, 17, 18, 18, 462_634), + ) + + assert views.manage_organization_history(organization, db_request) == { + "events": [newer_event, older_event], + "get_user": user_service.get_user, + "organization": organization, + } + + def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request): + params = MultiDict({"page": "abc"}) + db_request.params = params + + events_query = pretend.stub() + db_request.events_query = pretend.stub( + events_query=lambda *a, **kw: events_query + ) + + page_obj = pretend.stub(page_count=10, item_count=1000) + page_cls = pretend.call_recorder(lambda *a, **kw: page_obj) + monkeypatch.setattr(views, "SQLAlchemyORMPage", page_cls) + + url_maker = pretend.stub() + url_maker_factory = pretend.call_recorder(lambda request: url_maker) + monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory) + + organization = OrganizationFactory.create() + with pytest.raises(HTTPBadRequest): + views.manage_organization_history(organization, db_request) + + assert page_cls.calls == [] + + def test_first_page(self, db_request, user_service): + page_number = 1 + params = MultiDict({"page": page_number}) + db_request.params = params + + organization = OrganizationFactory.create() + items_per_page = 25 + total_items = items_per_page + 2 + for _ in range(total_items): + OrganizationEventFactory.create( + source=organization, tag="fake:event", ip_address="0.0.0.0" + ) + events_query = ( + db_request.db.query(Organization.Event) + .join(Organization.Event.source) + .filter(Organization.Event.source_id == organization.id) + .order_by(Organization.Event.time.desc()) + ) + + events_page = SQLAlchemyORMPage( + events_query, + page=page_number, + items_per_page=items_per_page, + item_count=total_items, + url_maker=paginate_url_factory(db_request), + ) + assert views.manage_organization_history(organization, db_request) == { + "events": events_page, + "get_user": user_service.get_user, + "organization": organization, + } + + def test_last_page(self, db_request, user_service): + page_number = 2 + params = MultiDict({"page": page_number}) + db_request.params = params + + organization = OrganizationFactory.create() + items_per_page = 25 + total_items = items_per_page + 2 + for _ in range(total_items): + OrganizationEventFactory.create( + source=organization, tag="fake:event", ip_address="0.0.0.0" + ) + events_query = ( + db_request.db.query(Organization.Event) + .join(Organization.Event.source) + .filter(Organization.Event.source_id == organization.id) + .order_by(Organization.Event.time.desc()) + ) + + events_page = SQLAlchemyORMPage( + events_query, + page=page_number, + items_per_page=items_per_page, + item_count=total_items, + url_maker=paginate_url_factory(db_request), + ) + assert views.manage_organization_history(organization, db_request) == { + "events": events_page, + "get_user": user_service.get_user, + "organization": organization, + } + + def test_raises_404_with_out_of_range_page(self, db_request): + page_number = 3 + params = MultiDict({"page": page_number}) + db_request.params = params + + organization = OrganizationFactory.create() + items_per_page = 25 + total_items = items_per_page + 2 + for _ in range(total_items): + OrganizationEventFactory.create( + source=organization, tag="fake:event", ip_address="0.0.0.0" + ) + + with pytest.raises(HTTPNotFound): + assert views.manage_organization_history(organization, db_request) + + class TestManageTeamSettings: def test_manage_team( self, db_request, organization_service, user_service, enable_organizations @@ -5499,6 +5623,126 @@ def test_delete_team_role_not_a_manager( assert isinstance(result, HTTPSeeOther) +class TestManageTeamHistory: + def test_get(self, db_request, user_service): + team = TeamFactory.create() + older_event = TeamEventFactory.create( + source=team, + tag="fake:event", + ip_address="0.0.0.0", + time=datetime.datetime(2017, 2, 5, 17, 18, 18, 462_634), + ) + newer_event = TeamEventFactory.create( + source=team, + tag="fake:event", + ip_address="0.0.0.0", + time=datetime.datetime(2018, 2, 5, 17, 18, 18, 462_634), + ) + + assert views.manage_team_history(team, db_request) == { + "events": [newer_event, older_event], + "get_user": user_service.get_user, + "team": team, + } + + def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request): + params = MultiDict({"page": "abc"}) + db_request.params = params + + events_query = pretend.stub() + db_request.events_query = pretend.stub( + events_query=lambda *a, **kw: events_query + ) + + page_obj = pretend.stub(page_count=10, item_count=1000) + page_cls = pretend.call_recorder(lambda *a, **kw: page_obj) + monkeypatch.setattr(views, "SQLAlchemyORMPage", page_cls) + + url_maker = pretend.stub() + url_maker_factory = pretend.call_recorder(lambda request: url_maker) + monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory) + + team = TeamFactory.create() + with pytest.raises(HTTPBadRequest): + views.manage_team_history(team, db_request) + + assert page_cls.calls == [] + + def test_first_page(self, db_request, user_service): + page_number = 1 + params = MultiDict({"page": page_number}) + db_request.params = params + + team = TeamFactory.create() + items_per_page = 25 + total_items = items_per_page + 2 + for _ in range(total_items): + TeamEventFactory.create(source=team, tag="fake:event", ip_address="0.0.0.0") + events_query = ( + db_request.db.query(Team.Event) + .join(Team.Event.source) + .filter(Team.Event.source_id == team.id) + .order_by(Team.Event.time.desc()) + ) + + events_page = SQLAlchemyORMPage( + events_query, + page=page_number, + items_per_page=items_per_page, + item_count=total_items, + url_maker=paginate_url_factory(db_request), + ) + assert views.manage_team_history(team, db_request) == { + "events": events_page, + "get_user": user_service.get_user, + "team": team, + } + + def test_last_page(self, db_request, user_service): + page_number = 2 + params = MultiDict({"page": page_number}) + db_request.params = params + + team = TeamFactory.create() + items_per_page = 25 + total_items = items_per_page + 2 + for _ in range(total_items): + TeamEventFactory.create(source=team, tag="fake:event", ip_address="0.0.0.0") + events_query = ( + db_request.db.query(Team.Event) + .join(Team.Event.source) + .filter(Team.Event.source_id == team.id) + .order_by(Team.Event.time.desc()) + ) + + events_page = SQLAlchemyORMPage( + events_query, + page=page_number, + items_per_page=items_per_page, + item_count=total_items, + url_maker=paginate_url_factory(db_request), + ) + assert views.manage_team_history(team, db_request) == { + "events": events_page, + "get_user": user_service.get_user, + "team": team, + } + + def test_raises_404_with_out_of_range_page(self, db_request): + page_number = 3 + params = MultiDict({"page": page_number}) + db_request.params = params + + team = TeamFactory.create() + items_per_page = 25 + total_items = items_per_page + 2 + for _ in range(total_items): + TeamEventFactory.create(source=team, tag="fake:event", ip_address="0.0.0.0") + + with pytest.raises(HTTPNotFound): + assert views.manage_team_history(team, db_request) + + class TestManageProjects: def test_manage_projects(self, db_request): older_release = ReleaseFactory(created=datetime.datetime(2015, 1, 1)) @@ -9099,7 +9343,7 @@ def test_delete_own_owner_role( class TestManageProjectHistory: - def test_get(self, db_request): + def test_get(self, db_request, user_service): project = ProjectFactory.create() older_event = ProjectEventFactory.create( source=project, @@ -9115,8 +9359,9 @@ def test_get(self, db_request): ) assert views.manage_project_history(project, db_request) == { - "project": project, "events": [newer_event, older_event], + "get_user": user_service.get_user, + "project": project, } def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request): @@ -9142,7 +9387,7 @@ def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request): assert page_cls.calls == [] - def test_first_page(self, db_request): + def test_first_page(self, db_request, user_service): page_number = 1 params = MultiDict({"page": page_number}) db_request.params = params @@ -9169,11 +9414,12 @@ def test_first_page(self, db_request): url_maker=paginate_url_factory(db_request), ) assert views.manage_project_history(project, db_request) == { - "project": project, "events": events_page, + "get_user": user_service.get_user, + "project": project, } - def test_last_page(self, db_request): + def test_last_page(self, db_request, user_service): page_number = 2 params = MultiDict({"page": page_number}) db_request.params = params @@ -9200,8 +9446,9 @@ def test_last_page(self, db_request): url_maker=paginate_url_factory(db_request), ) assert views.manage_project_history(project, db_request) == { - "project": project, "events": events_page, + "get_user": user_service.get_user, + "project": project, } def test_raises_404_with_out_of_range_page(self, db_request): diff --git a/tests/unit/test_routes.py b/tests/unit/test_routes.py --- a/tests/unit/test_routes.py +++ b/tests/unit/test_routes.py @@ -318,6 +318,13 @@ def add_policy(name, filename): traverse="/{organization_name}", domain=warehouse, ), + pretend.call( + "manage.organization.history", + "/manage/organization/{organization_name}/history/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), pretend.call( "manage.team.settings", "/manage/organization/{organization_name}/team/{team_name}/settings/", @@ -346,6 +353,13 @@ def add_policy(name, filename): traverse="/{organization_name}/{team_name}", domain=warehouse, ), + pretend.call( + "manage.team.history", + "/manage/organization/{organization_name}/team/{team_name}/history/", + factory="warehouse.organizations.models:TeamFactory", + traverse="/{organization_name}/{team_name}", + domain=warehouse, + ), pretend.call("manage.projects", "/manage/projects/", domain=warehouse), pretend.call( "manage.project.settings",
Expose audit log of operations performed in an organization An owner of an organization account should be able to view an audit of operations such as - 1. All users invited to the organization 2. Users who accepted the invitation 3. Users who declined the invitation 4. If any invitation was revoked This audit log should be exposed to the owners of the org
2022-10-14T19:47:44Z
[]
[]
pypi/warehouse
12,364
pypi__warehouse-12364
[ "11650" ]
a126979d243747441af95d8c2c3d6edd9e61f305
diff --git a/warehouse/accounts/views.py b/warehouse/accounts/views.py --- a/warehouse/accounts/views.py +++ b/warehouse/accounts/views.py @@ -881,6 +881,7 @@ def _error(message): elif request.method == "POST" and "decline" in request.POST: organization_service.delete_organization_invite(organization_invite.id) submitter_user = user_service.get_user(data.get("submitter_id")) + message = request.params.get("message", "") organization.record_event( tag="organization:organization_role:declined", ip_address=request.remote_addr, @@ -911,6 +912,7 @@ def _error(message): owner_users, user=user, organization_name=organization.name, + message=message, ) send_declined_as_invited_organization_member_email( request, diff --git a/warehouse/email/__init__.py b/warehouse/email/__init__.py --- a/warehouse/email/__init__.py +++ b/warehouse/email/__init__.py @@ -448,10 +448,12 @@ def send_organization_member_invite_declined_email( *, user, organization_name, + message, ): return { "username": user.username, "organization_name": organization_name, + "message": message, }
diff --git a/tests/unit/accounts/test_views.py b/tests/unit/accounts/test_views.py --- a/tests/unit/accounts/test_views.py +++ b/tests/unit/accounts/test_views.py @@ -2363,10 +2363,13 @@ def test_verify_organization_role_declined( user=owner_user, role_name=OrganizationRoleType.Owner, ) + message = "Some reason to decline." db_request.user = user db_request.method = "POST" - db_request.POST.update({"token": "RANDOM_KEY", "decline": "Decline"}) + db_request.POST.update( + {"token": "RANDOM_KEY", "decline": "Decline", "message": message} + ) db_request.route_path = pretend.call_recorder(lambda name: "/") db_request.remote_addr = "192.168.1.1" db_request.session.flash = pretend.call_recorder(lambda *a, **kw: None) @@ -2411,6 +2414,7 @@ def test_verify_organization_role_declined( {owner_user}, user=user, organization_name=organization.name, + message=message, ) ] assert declined_as_invited_organization_member_email.calls == [ diff --git a/tests/unit/email/test_init.py b/tests/unit/email/test_init.py --- a/tests/unit/email/test_init.py +++ b/tests/unit/email/test_init.py @@ -1918,6 +1918,7 @@ def organization_invite(self, pyramid_user): EmailFactory.create(user=self.user, verified=True) self.desired_role = "Manager" self.organization_name = "example" + self.message = "test message" self.email_token = "token" self.token_age = 72 * 60 * 60 @@ -2159,11 +2160,13 @@ def test_send_organization_member_invite_declined_email( self.initiator_user, user=self.user, organization_name=self.organization_name, + message=self.message, ) assert result == { "username": self.user.username, "organization_name": self.organization_name, + "message": self.message, } subject_renderer.assert_(**result) body_renderer.assert_(**result)
Confirmation message when declining an invitation When a user has been invited to join an organization, the user can decline the invitation. When declining the invitation, there is no confirmation message that asks 'Are you sure?'. When declining an invitation, the user should be able to provide a message to the Owner. The message can then be shared with the Owner in the declination email that is sent to the Owner. To replicate this issue- 1. Log in 2. Click on 'Your organizations' 3. Under 'Pending invitations', click on 'Decline' A banner appears that the invitation has been declined. But there is no confirmation message. ![org8](https://user-images.githubusercontent.com/37237726/175117523-a14217d1-8723-4bcc-9f18-05f55c3f7bea.png)
Priority 3/3
2022-10-15T13:58:09Z
[]
[]
pypi/warehouse
12,377
pypi__warehouse-12377
[ "12043" ]
1df08e9e77ebf0f021be01015c33975fedeb8083
diff --git a/warehouse/email/__init__.py b/warehouse/email/__init__.py --- a/warehouse/email/__init__.py +++ b/warehouse/email/__init__.py @@ -571,6 +571,34 @@ def send_role_changed_as_organization_member_email( } +@_email("organization-updated") +def send_organization_updated_email( + request, + user, + *, + organization_name, + organization_display_name, + organization_link_url, + organization_description, + organization_orgtype, + previous_organization_display_name, + previous_organization_link_url, + previous_organization_description, + previous_organization_orgtype, +): + return { + "organization_name": organization_name, + "organization_display_name": organization_display_name, + "organization_link_url": organization_link_url, + "organization_description": organization_description, + "organization_orgtype": organization_orgtype, + "previous_organization_display_name": previous_organization_display_name, + "previous_organization_link_url": previous_organization_link_url, + "previous_organization_description": previous_organization_description, + "previous_organization_orgtype": previous_organization_orgtype, + } + + @_email("organization-renamed") def send_organization_renamed_email( request, user, *, organization_name, previous_organization_name diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py --- a/warehouse/manage/forms.py +++ b/warehouse/manage/forms.py @@ -590,7 +590,6 @@ class SaveOrganizationForm(forms.Form): ] ) orgtype = wtforms.SelectField( - # TODO: Map additional choices to "Company" and "Community". choices=[("Company", "Company"), ("Community", "Community")], coerce=OrganizationType, validators=[ diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -69,6 +69,7 @@ send_organization_project_removed_email, send_organization_renamed_email, send_organization_role_verification_email, + send_organization_updated_email, send_password_change_email, send_primary_email_change_email, send_project_role_verification_email, @@ -1426,7 +1427,6 @@ def default_response(self): link_url=self.organization.link_url, description=self.organization.description, orgtype=self.organization.orgtype, - organization_service=self.organization_service, ), "save_organization_name_form": SaveOrganizationNameForm( organization_service=self.organization_service, @@ -1440,15 +1440,37 @@ def manage_organization(self): @view_config(request_method="POST", request_param=SaveOrganizationForm.__params__) def save_organization(self): - form = SaveOrganizationForm( - self.request.POST, - organization_service=self.organization_service, - ) + form = SaveOrganizationForm(self.request.POST) if form.validate(): + previous_organization_display_name = self.organization.display_name + previous_organization_link_url = self.organization.link_url + previous_organization_description = self.organization.description + previous_organization_orgtype = self.organization.orgtype + data = form.data + if previous_organization_orgtype == OrganizationType.Company: + # Disable changing Company account to Community account. + data["orgtype"] = previous_organization_orgtype self.organization_service.update_organization(self.organization.id, **data) + + owner_users = set(organization_owners(self.request, self.organization)) + send_organization_updated_email( + self.request, + owner_users, + organization_name=self.organization.name, + organization_display_name=self.organization.display_name, + organization_link_url=self.organization.link_url, + organization_description=self.organization.description, + organization_orgtype=self.organization.orgtype, + previous_organization_display_name=previous_organization_display_name, + previous_organization_link_url=previous_organization_link_url, + previous_organization_description=previous_organization_description, + previous_organization_orgtype=previous_organization_orgtype, + ) + self.request.session.flash("Organization details updated", queue="success") + return HTTPSeeOther(self.request.path) return {**self.default_response, "save_organization_form": form} diff --git a/warehouse/predicates.py b/warehouse/predicates.py --- a/warehouse/predicates.py +++ b/warehouse/predicates.py @@ -14,6 +14,7 @@ from pyramid import predicates from pyramid.exceptions import ConfigurationError +from pyramid.httpexceptions import HTTPSeeOther from pyramid.util import is_same_domain from warehouse.admin.flags import AdminFlagValue @@ -82,17 +83,23 @@ def __call__(self, context: Organization | Team, request): context if isinstance(context, Organization) else context.organization ) - return ( - # Organization accounts are enabled. - not request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS) + if ( + # Organization accounts are disabled. + request.flags.enabled(AdminFlagValue.DISABLE_ORGANIZATIONS) + ): + return False + elif ( # Organization is active. - and organization.is_active + organization.is_active # Organization has active subscription if it is a Company. and ( organization.orgtype != OrganizationType.Company or organization.active_subscription ) - ) + ): + return True + else: + raise HTTPSeeOther(request.route_path("manage.organizations")) def includeme(config):
diff --git a/tests/unit/email/test_init.py b/tests/unit/email/test_init.py --- a/tests/unit/email/test_init.py +++ b/tests/unit/email/test_init.py @@ -2579,6 +2579,90 @@ def test_send_role_changed_as_organization_email( ] +class TestOrganizationUpdateEmails: + @pytest.fixture + def organization_update(self, pyramid_user): + self.user = UserFactory.create() + EmailFactory.create(user=self.user, verified=True) + self.organization_name = "example" + self.organization_display_name = "Example" + self.organization_link_url = "https://www.example.com/" + self.organization_description = "An example organization for testing" + self.organization_orgtype = "Company" + self.previous_organization_display_name = "Example Group" + self.previous_organization_link_url = "https://www.example.com/group/" + self.previous_organization_description = "An example group for testing" + self.previous_organization_orgtype = "Community" + + def test_send_organization_renamed_email( + self, + db_request, + organization_update, + make_email_renderers, + send_email, + ): + subject_renderer, body_renderer, html_renderer = make_email_renderers( + "organization-updated" + ) + + result = email.send_organization_updated_email( + db_request, + self.user, + organization_name=self.organization_name, + organization_display_name=self.organization_display_name, + organization_link_url=self.organization_link_url, + organization_description=self.organization_description, + organization_orgtype=self.organization_orgtype, + previous_organization_display_name=self.previous_organization_display_name, + previous_organization_link_url=self.previous_organization_link_url, + previous_organization_description=self.previous_organization_description, + previous_organization_orgtype=self.previous_organization_orgtype, + ) + + assert result == { + "organization_name": self.organization_name, + "organization_display_name": self.organization_display_name, + "organization_link_url": self.organization_link_url, + "organization_description": self.organization_description, + "organization_orgtype": self.organization_orgtype, + "previous_organization_display_name": ( + self.previous_organization_display_name + ), + "previous_organization_link_url": self.previous_organization_link_url, + "previous_organization_description": self.previous_organization_description, + "previous_organization_orgtype": self.previous_organization_orgtype, + } + subject_renderer.assert_(**result) + body_renderer.assert_(**result) + html_renderer.assert_(**result) + assert db_request.task.calls == [pretend.call(send_email)] + assert send_email.delay.calls == [ + pretend.call( + f"{self.user.name} <{self.user.email}>", + { + "subject": subject_renderer.string_response, + "body_text": body_renderer.string_response, + "body_html": ( + f"<html>\n" + f"<head></head>\n" + f"<body><p>{html_renderer.string_response}</p></body>\n" + f"</html>\n" + ), + }, + { + "tag": "account:email:sent", + "user_id": self.user.id, + "additional": { + "from_": db_request.registry.settings["mail.sender"], + "to": self.user.email, + "subject": subject_renderer.string_response, + "redact_ip": True, + }, + }, + ) + ] + + class TestOrganizationRenameEmails: @pytest.fixture def organization_rename(self, pyramid_user): diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -2870,14 +2870,20 @@ def test_manage_organization( link_url=organization.link_url, description=organization.description, orgtype=organization.orgtype, - organization_service=organization_service, ), ] + @pytest.mark.parametrize("orgtype", list(OrganizationType)) def test_save_organization( - self, db_request, organization_service, enable_organizations, monkeypatch + self, + db_request, + pyramid_user, + orgtype, + organization_service, + enable_organizations, + monkeypatch, ): - organization = OrganizationFactory.create() + organization = OrganizationFactory.create(orgtype=orgtype) db_request.POST = { "display_name": organization.display_name, "link_url": organization.link_url, @@ -2899,6 +2905,12 @@ def test_save_organization( ) monkeypatch.setattr(views, "SaveOrganizationForm", save_organization_cls) + send_email = pretend.call_recorder(lambda *a, **kw: None) + monkeypatch.setattr(views, "send_organization_updated_email", send_email) + monkeypatch.setattr( + views, "organization_owners", lambda *a, **kw: [pyramid_user] + ) + view = views.ManageOrganizationSettingsViews(organization, db_request) result = view.save_organization() @@ -2906,6 +2918,21 @@ def test_save_organization( assert organization_service.update_organization.calls == [ pretend.call(organization.id, **db_request.POST) ] + assert send_email.calls == [ + pretend.call( + db_request, + {pyramid_user}, + organization_name=organization.name, + organization_display_name=organization.display_name, + organization_link_url=organization.link_url, + organization_description=organization.description, + organization_orgtype=organization.orgtype, + previous_organization_display_name=organization.display_name, + previous_organization_link_url=organization.link_url, + previous_organization_description=organization.description, + previous_organization_orgtype=organization.orgtype, + ), + ] def test_save_organization_validation_fails( self, db_request, organization_service, enable_organizations, monkeypatch diff --git a/tests/unit/test_predicates.py b/tests/unit/test_predicates.py --- a/tests/unit/test_predicates.py +++ b/tests/unit/test_predicates.py @@ -14,6 +14,7 @@ import pytest from pyramid.exceptions import ConfigurationError +from pyramid.httpexceptions import HTTPSeeOther from warehouse.organizations.models import OrganizationType from warehouse.predicates import ( @@ -151,9 +152,16 @@ def test_inactive_organization( organization, enable_organizations, ): + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "/manage/organizations/" + ) + organization.is_active = False predicate = ActiveOrganizationPredicate(True, None) - assert not predicate(organization, db_request) + with pytest.raises(HTTPSeeOther): + predicate(organization, db_request) + + assert db_request.route_path.calls == [pretend.call("manage.organizations")] def test_inactive_subscription( self, @@ -162,8 +170,15 @@ def test_inactive_subscription( enable_organizations, inactive_subscription, ): + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "/manage/organizations/" + ) + predicate = ActiveOrganizationPredicate(True, None) - assert not predicate(organization, db_request) + with pytest.raises(HTTPSeeOther): + predicate(organization, db_request) + + assert db_request.route_path.calls == [pretend.call("manage.organizations")] def test_active_subscription( self, db_request, organization, enable_organizations, active_subscription
No email after changing the org name When changing the org name, no email is sent to the owners that the org name has changed To recreate this issue- - Click on 'Your organizations' - Select an organization - Click on 'Settings' - Change the 'Organization name' and click on 'Update organization' ![test4](https://user-images.githubusercontent.com/37237726/183522853-74b452f6-3176-400d-9867-bf8d9a25e459.png)
2022-10-17T22:30:26Z
[]
[]
pypi/warehouse
12,398
pypi__warehouse-12398
[ "12397" ]
f52c6b85c72bf315cba7f1ea2a3fd57cd6acc877
diff --git a/warehouse/accounts/forms.py b/warehouse/accounts/forms.py --- a/warehouse/accounts/forms.py +++ b/warehouse/accounts/forms.py @@ -30,6 +30,7 @@ TooManyFailedLogins, ) from warehouse.accounts.models import DisableReason +from warehouse.accounts.services import RECOVERY_CODE_BYTES from warehouse.email import ( send_password_compromised_email_hibp, send_recovery_code_used_email, @@ -79,7 +80,16 @@ class WebAuthnCredentialMixin: class RecoveryCodeValueMixin: recovery_code_value = wtforms.StringField( - validators=[wtforms.validators.DataRequired()] + validators=[ + wtforms.validators.DataRequired(), + wtforms.validators.Regexp( + rf"^ *([0-9a-f] *){{{2*RECOVERY_CODE_BYTES}}}$", + message=_( + "Recovery Codes must be ${recovery_code_length} characters.", + mapping={"recovery_code_length": 2 * RECOVERY_CODE_BYTES}, + ), + ), + ] ) @@ -409,7 +419,7 @@ class RecoveryCodeAuthenticationForm( RecoveryCodeValueMixin, _TwoFactorAuthenticationForm ): def validate_recovery_code_value(self, field): - recovery_code_value = field.data.encode("utf-8") + recovery_code_value = field.data.encode("utf-8").strip() try: self.user_service.check_recovery_code(self.user_id, recovery_code_value) diff --git a/warehouse/accounts/services.py b/warehouse/accounts/services.py --- a/warehouse/accounts/services.py +++ b/warehouse/accounts/services.py @@ -60,6 +60,7 @@ PASSWORD_FIELD = "password" RECOVERY_CODE_COUNT = 8 +RECOVERY_CODE_BYTES = 8 @implementer(IUserService) @@ -574,7 +575,9 @@ def generate_recovery_codes(self, user_id): if user.has_recovery_codes: self.db.query(RecoveryCode).filter_by(user=user).delete() - recovery_codes = [secrets.token_hex(8) for _ in range(RECOVERY_CODE_COUNT)] + recovery_codes = [ + secrets.token_hex(RECOVERY_CODE_BYTES) for _ in range(RECOVERY_CODE_COUNT) + ] for recovery_code in recovery_codes: self.db.add(RecoveryCode(user=user, code=self.hasher.hash(recovery_code)))
diff --git a/tests/unit/accounts/test_forms.py b/tests/unit/accounts/test_forms.py --- a/tests/unit/accounts/test_forms.py +++ b/tests/unit/accounts/test_forms.py @@ -832,7 +832,7 @@ def test_invalid_recovery_code( ) form = forms.RecoveryCodeAuthenticationForm( request=request, - data={"recovery_code_value": "invalid"}, + data={"recovery_code_value": "deadbeef00001111"}, user_id=1, user_service=user_service, ) @@ -852,7 +852,7 @@ def test_valid_recovery_code(self, monkeypatch): user = pretend.stub(id=pretend.stub(), username="foobar") form = forms.RecoveryCodeAuthenticationForm( request=request, - data={"recovery_code_value": "valid"}, + data={"recovery_code_value": "deadbeef00001111"}, user_id=pretend.stub(), user_service=pretend.stub( check_recovery_code=pretend.call_recorder(lambda *a, **kw: True), @@ -868,3 +868,37 @@ def test_valid_recovery_code(self, monkeypatch): assert form.validate() assert send_recovery_code_used_email.calls == [pretend.call(request, user)] + + @pytest.mark.parametrize( + "input_string, validates", + [ + (" deadbeef00001111 ", True), + ("deadbeef00001111 ", True), + (" deadbeef00001111", True), + ("deadbeef00001111", True), + ("wu-tang", False), + ("deadbeef00001111 deadbeef11110000", False), + ], + ) + def test_recovery_code_string_validation( + self, monkeypatch, input_string, validates + ): + request = pretend.stub(remote_addr="127.0.0.1") + user = pretend.stub(id=pretend.stub(), username="foobar") + form = forms.RecoveryCodeAuthenticationForm( + request=request, + data={"recovery_code_value": input_string}, + user_id=pretend.stub(), + user_service=pretend.stub( + check_recovery_code=pretend.call_recorder(lambda *a, **kw: True), + get_user=lambda _: user, + ), + ) + send_recovery_code_used_email = pretend.call_recorder( + lambda request, user: None + ) + monkeypatch.setattr( + forms, "send_recovery_code_used_email", send_recovery_code_used_email + ) + + assert form.validate() == validates
Improvements to 'Login using recovery codes' input field We've gotten a report that users are confused by the 'Login using recovery codes' input field, sometimes attempting to paste all their recovery codes into the field at once. We should add some simple client-side and server-side verification of this field, to ensure that the contents are a) the length we expect for a recovery code b) a valid set of characters. Additionally, we should ensure that the field does not autocomplete or retain previously entered data, which it does now.
2022-10-20T18:25:59Z
[]
[]
pypi/warehouse
12,405
pypi__warehouse-12405
[ "6640" ]
e3b8fa7c33b8d07158ae291febfcb18f6662a38b
diff --git a/warehouse/routes.py b/warehouse/routes.py --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -57,6 +57,12 @@ def includeme(config): # Our legal policies config.add_policy("terms-of-use", "terms.md") + config.add_template_view( + "trademarks", + "/trademarks/", + "pages/trademarks.html", + view_kw={"has_translations": True}, + ) # HTML Snippets for including into other pages. config.add_route(
diff --git a/tests/unit/test_routes.py b/tests/unit/test_routes.py --- a/tests/unit/test_routes.py +++ b/tests/unit/test_routes.py @@ -588,6 +588,12 @@ def add_policy(name, filename): "pages/sponsors.html", view_kw={"has_translations": True}, ), + pretend.call( + "trademarks", + "/trademarks/", + "pages/trademarks.html", + view_kw={"has_translations": True}, + ), ] assert config.add_redirect.calls == [
Warehouse logo usage guidelines page with links to SVGs **What's the problem this feature will solve?** <!-- A clear and concise description of what the problem is. --> People will know how to properly use the Warehouse logos. **Describe the solution you'd like** <!-- A clear and concise description of what you want to happen. --> A separate web-page under pypi.org having a style guide and usage conditions wrt the Warehouse logo. Something like https://www.redhat.com/en/about/brand/standards/logo or https://www.python.org/community/logos/. **Additional context** <!-- Add any other context, links, etc. about the feature here. --> * I want to have some graphics to use for https://github.com/pypa/gh-action-pypi-publish * https://twitter.com/nlhkabu/status/1173467743112355840 * FWIW I've used it for the preview at this page https://github.com/topics/github-action, I hope that's fine for now
@ewdurbin I believe the PSF was in the process, or have already, placed a trademark on the PyPI logo. Could you please look into this and let me know the best next steps? Thanks @nlhkabu @ewdurbin did this ever actually happen? I don't think I've seen anything published to date. My understanding is that "PyPI" has been issued trademark but the logo itself has not been. @VanL do you know the status of the logo mark registration for PyPI? The registration on the logo for PyPI has been granted. Building blocks alone: Reg. 5752245 Building blocks plus "Python Package Index": Reg. 5764496 Thanks, Van On Wed, Oct 19, 2022 at 11:07 AM Ee Durbin ***@***.***> wrote: > My understanding is that "PyPI" has been issued trademark but the logo > itself has not been. @VanL <https://github.com/VanL> do you know the > status of the logo mark registration for PyPI? > > — > Reply to this email directly, view it on GitHub > <https://github.com/pypi/warehouse/issues/6640#issuecomment-1284251336>, > or unsubscribe > <https://github.com/notifications/unsubscribe-auth/AABUNKWV2ID7AUORNUCDDWLWEAMDFANCNFSM4IXGJL7Q> > . > You are receiving this because you were mentioned.Message ID: > ***@***.***> > Thanks Van! I guess the next steps here would be to determine what a usage policy would look like, who would moderate it, and have someone build such a page. I would suggest that for these marks, that there is no allowed use outside of what we do. Thanks, Van On Thu, Oct 20, 2022 at 11:05 AM Ee Durbin ***@***.***> wrote: > Thanks Van! I guess the next steps here would be to determine what a usage > policy would look like, who would moderate it, and have someone build such > a page. > > — > Reply to this email directly, view it on GitHub > <https://github.com/pypi/warehouse/issues/6640#issuecomment-1285810124>, > or unsubscribe > <https://github.com/notifications/unsubscribe-auth/AABUNKXO6QCYDVO43O6NTYTWEFUVHANCNFSM4IXGJL7Q> > . > You are receiving this because you were mentioned.Message ID: > ***@***.***> > Can we define who's "we"? The PSF? The PyPA? The PyPI/Warehouse project? I suppose on further consideration, unlike all of the other PSF held trademarks... - "Python" a widely used and adaptable programming language - "PyCon" an idea / core name for an event - "PyLadies" a chaptered organization "PyPI" and associated logo represent a single entity/service, so usage outside of our presence on the web doesn't make sense in the same ways. For all intents and purposes "PyPI" is the service hosted at pypi.org, so I think "we" in this case is limited to that service and its associated web presence. @ewdurbin it's currently present in a “social preview” (that thing displayed when you paste a link on Twitter or Facebook, and lists like https://github.com/topics/github-actions-python) of this page https://github.com/marketplace/actions/pypi-publish. It is technically quite coupled with the PyPI service, but is under PyPA currently. Should I rework it, then? IMO, that's fine. What we're mostly concerned about is people using the logo for things that aren't "PyPI" -- e.g., some other index or private repo. Right, that's what I wanted to clarify. What's fair use and what's absolutely forbidden. Our messaging around the name and logo marks for PyPI should be that they always exclusively need to refer to the service hosted at pypi.org. Yes, nominative fair use exists, but we don't need to call it out (and even nominative fair use is fair use because it still refers to our single source for the PyPI service - pypi.org). Our statement should be: "PyPI", the "Python Package Index", and the PyPI blocks logo are registered trademarks of the Python Software Foundation referring to the products and services associated with pypi.org. Any other use by any other party is prohibited. Thanks, Van On Fri, Oct 21, 2022 at 9:39 AM Sviatoslav Sydorenko < ***@***.***> wrote: > Right, that's what I wanted to clarify. What's fair use and what's > absolutely forbidden. > > — > Reply to this email directly, view it on GitHub > <https://github.com/pypi/warehouse/issues/6640#issuecomment-1287058558>, > or unsubscribe > <https://github.com/notifications/unsubscribe-auth/AABUNKSA2YTMH2JF24ODHFDWEKTJLANCNFSM4IXGJL7Q> > . > You are receiving this because you were mentioned.Message ID: > ***@***.***> > That makes sense to me!
2022-10-21T19:05:41Z
[]
[]
pypi/warehouse
12,408
pypi__warehouse-12408
[ "12253" ]
ef9747c737019cd3f55d81d3a316fbaf7865643b
diff --git a/warehouse/utils/readme.py b/warehouse/utils/readme.py --- a/warehouse/utils/readme.py +++ b/warehouse/utils/readme.py @@ -41,6 +41,10 @@ def render(value, content_type=None, use_fallback=True): # also ensure that it's had any disallowed markup removed. rendered = renderer.render(value, **parameters) + # Wrap plaintext as preformatted to preserve whitespace. + if content_type == "text/plain": + rendered = f"<pre>{rendered}</pre>" + # If the content was not rendered, we'll render as plaintext instead. The # reason it's necessary to do this instead of just accepting plaintext is # that readme_renderer will deal with sanitizing the content.
diff --git a/tests/unit/packaging/test_views.py b/tests/unit/packaging/test_views.py --- a/tests/unit/packaging/test_views.py +++ b/tests/unit/packaging/test_views.py @@ -176,6 +176,53 @@ def test_normalizing_version_redirects(self, db_request): pretend.call(name=release.project.name, version=release.version) ] + def test_detail_render_plain(self, db_request): + users = [UserFactory.create(), UserFactory.create(), UserFactory.create()] + project = ProjectFactory.create() + releases = [ + ReleaseFactory.create( + project=project, + version=v, + description=DescriptionFactory.create( + raw="plaintext description", + html="", + content_type="text/plain", + ), + ) + for v in ["1.0", "2.0", "3.0", "4.0.dev0"] + ] + files = [ + FileFactory.create( + release=r, + filename="{}-{}.tar.gz".format(project.name, r.version), + python_version="source", + packagetype="sdist", + ) + for r in releases + ] + + # Create a role for each user + for user in users: + RoleFactory.create(user=user, project=project) + + result = views.release_detail(releases[1], db_request) + + assert result == { + "project": project, + "release": releases[1], + "files": [files[1]], + "sdists": [files[1]], + "bdists": [], + "description": "<pre>plaintext description</pre>", + "latest_version": project.latest_version, + "all_versions": [ + (r.version, r.created, r.is_prerelease, r.yanked) + for r in reversed(releases) + ], + "maintainers": sorted(users, key=lambda u: u.username.lower()), + "license": None, + } + def test_detail_rendered(self, db_request): users = [UserFactory.create(), UserFactory.create(), UserFactory.create()] project = ProjectFactory.create() @@ -186,7 +233,7 @@ def test_detail_rendered(self, db_request): description=DescriptionFactory.create( raw="unrendered description", html="rendered description", - content_type="text/plain", + content_type="text/html", ), ) for v in ["1.0", "2.0", "3.0", "4.0.dev0"] @@ -231,7 +278,7 @@ def test_detail_renders(self, monkeypatch, db_request): project=project, version=v, description=DescriptionFactory.create( - raw="unrendered description", html="", content_type="text/plain" + raw="unrendered description", html="", content_type="text/html" ), ) for v in ["1.0", "2.0", "3.0", "4.0.dev0"] @@ -275,7 +322,7 @@ def test_detail_renders(self, monkeypatch, db_request): } assert render_description.calls == [ - pretend.call("unrendered description", "text/plain") + pretend.call("unrendered description", "text/html") ] def test_detail_renders_files_natural_sort(self, db_request): diff --git a/tests/unit/utils/test_readme.py b/tests/unit/utils/test_readme.py --- a/tests/unit/utils/test_readme.py +++ b/tests/unit/utils/test_readme.py @@ -30,7 +30,7 @@ def test_cant_render_rst(): def test_can_render_plaintext(): result = readme.render("raw thing", "text/plain") - assert result == "raw thing" + assert result == "<pre>raw thing</pre>" def test_can_render_markdown():
Monospaced font for text/plain long_description Don't you think that would be nice to wrap project descriptions in text/plain with pre tag? Close if duplicate. I'm really sorry, looking through over 400 issues of production system is beyond of my capabilities.
Hi, thanks for the issue. I think it makes sense, and I don't think we have an existing issue about this yet. I've added the 'good first issue' label here. Thanks! This would keep ASCII art intact. Division of responsibility question: Since the rendering task in warehouse will prefer the `html` column of a Release's Description, should `readme_renderer` return a `text/plain` object with `<pre>` tags wrapping the entire block [over here](https://github.com/pypa/readme_renderer/blob/34e27d3d658287d9a25e8117c3950e27aeda52a8/readme_renderer/txt.py#L22-L24), or should warehouse use Description's `content_type` and wrap during display, something like this? ```diff diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py index c9ea1bc2..3ea3b731 100644 --- a/warehouse/packaging/views.py +++ b/warehouse/packaging/views.py @@ -90,6 +90,9 @@ def release_detail(release, request): # already rendered content. if release.description.html: description = release.description.html + if release.description.content_type == "text/plain": + # Wrap as preformatted text to preserve whitespace. + description = f"<pre>{description}</pre>" else: description = readme.render( release.description.raw, release.description.content_type ``` My preference leans towards implementing in `readme_renderer`, but that does change the current output behavior for the `readme_renderer.txt.render()` I would expect `readme_renderer` to continue 'rendering' plaintext as plaintext with no HTML -- I think it's up to the consumer (Warehouse) to determine what to do with that rendered output and if it needs to be wrapped. So I think something like the diff you have here would be sufficient. > continue 'rendering' plaintext as plaintext with no HTML The only transformations we perform on plaintext today in `readme_renderer` [is replacing `\n` with HTML `<br>` tags](https://github.com/pypa/readme_renderer/blob/34e27d3d658287d9a25e8117c3950e27aeda52a8/readme_renderer/txt.py#L23), so we're not **not** transforming it already over there. :grin: Hence my preference for adding the `<pre>` prefixing over in the library vs warehouse, since we're already doing a little to make the content prepared for warehouse. > I think it's up to the consumer (Warehouse) to determine what to do with that rendered output and if it needs to be wrapped. That makes sense in the grand scheme of how things work today, since the rendered output gets inserted into the `html` column that we're reading from, vs `raw`. If we want to encourage the notion of: "plaintext gets non-transformed by the library and then transformed on warehouse", and still accomplish something like this cleanly, I've thought of a few things to make the overall behavior consistent: ### alternative 1 - change the library to not transform plaintext at all (a slightly breaking change to the `.txt` renderer) - continue to store the output in the `html` column like it does today (which is only slightly wrong) - apply proposed diff to wrap the plaintext-from-html column with `<pre>` - should automatically handle the `\n` newlines in the source content (adds a small cost to request-time) ### alternative 2 - change the warehouse to look at the `raw` description column for plaintext content and transform to `<pre>` on display (slightly different from my proposed diff, adds small cost to request-time) - only store `raw` for plaintext entries (would need to clear out older plaintext `html` column entries to make accurate) - tell the warehouse to not run plaintext through the library (no change to the library, leaving "incorrect" txt render behavior alone, could mark deprecated, but might have impact on other users like `twine check`, would need to verify.) Both alternatives have tradeoffs and add a small amount of runtime cost, since instead of fetching fully-rendered HTML from a column, there's a bit of extra transformation needed to the raw. I'm leaning towards alternative 2 since it ends up the most "correct" overall - curious to know what you think! ### data For fun, can we see how many descriptions are actually affected today? Something like: ```sql select count(*) from release_descriptions where content_type = 'text/plain'; ``` ``` warehouse=> select count(*) from release_descriptions where content_type = 'text/plain'; count ------- 6328 (1 row) ``` Thanks @di! With that low amount of descriptions, I'm even more leaning towards alternative 2, since while it adds runtime cost, it's for so few items. I'm not sure I totally follow the approach, in my mind we could just add a case statement to the Jinja template here that takes into account the content type.
2022-10-23T21:54:21Z
[]
[]
pypi/warehouse
12,440
pypi__warehouse-12440
[ "12427" ]
685554af825ca71546dd0c0c6d84f19101dddaf9
diff --git a/warehouse/db.py b/warehouse/db.py --- a/warehouse/db.py +++ b/warehouse/db.py @@ -152,7 +152,7 @@ def cleanup(request): from warehouse.admin.flags import AdminFlag, AdminFlagValue flag = session.query(AdminFlag).get(AdminFlagValue.READ_ONLY.value) - if flag and flag.enabled and not request.user.is_superuser: + if flag and flag.enabled: request.tm.doom() # Return our session now that it's created and registered
diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py --- a/tests/unit/test_db.py +++ b/tests/unit/test_db.py @@ -164,7 +164,11 @@ def test_create_session(monkeypatch, pyramid_services): (None, False, []), (pretend.stub(enabled=False), True, []), (pretend.stub(enabled=False), False, []), - (pretend.stub(enabled=True, description="flag description"), True, []), + ( + pretend.stub(enabled=True, description="flag description"), + True, + [pretend.call()], + ), ( pretend.stub(enabled=True, description="flag description"), False,
Doom transaction if `request.user` is None Fixes #12422.
This still raises a `RecursionError: maximum recursion depth exceeded while calling a Python object` when logged in and `RuntimeError: Cannot use request.session in a view without uses_session=True.` when unauth'd. @ewdurbin Try this again, if it works I'll add the necessary tests.
2022-10-26T17:31:13Z
[]
[]
pypi/warehouse
12,653
pypi__warehouse-12653
[ "12392" ]
decc03c87bb5aae4f4edf39c62c4faca75208371
diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py --- a/warehouse/packaging/views.py +++ b/warehouse/packaging/views.py @@ -117,6 +117,10 @@ def release_detail(release, request): # first line only. short_license = release.license.split("\n")[0] if release.license else None + # Truncate the short license if we were unable to shorten it with newlines + if short_license and len(short_license) > 100 and short_license == release.license: + short_license = short_license[:100] + "..." + if license_classifiers and short_license: license = f"{license_classifiers} ({short_license})" else:
diff --git a/tests/unit/packaging/test_views.py b/tests/unit/packaging/test_views.py --- a/tests/unit/packaging/test_views.py +++ b/tests/unit/packaging/test_views.py @@ -400,6 +400,20 @@ def test_multiple_licenses_from_classifiers(self, db_request): assert result["license"] == "BSD License, MIT License" + def test_long_singleline_license(self, db_request): + """When license metadata contains no newlines, it gets truncated""" + release = ReleaseFactory.create( + license="Multiline License is very long, so long that it is far longer than" + " 100 characters, it's really so long, how terrible" + ) + + result = views.release_detail(release, db_request) + + assert result["license"] == ( + "Multiline License is very long, so long that it is far longer than 100 " + "characters, it's really so lo..." + ) + class TestEditProjectButton: def test_edit_project_button_returns_project(self):
PEP 621-style license field shows the entire license text **Describe the bug** When using PEP 621-style project metadata with a license file (see https://peps.python.org/pep-0621/#license), the entire license text is shown in the Meta -> License section of a package on pypi. For an example see https://pypi.org/project/pytest-logikal/ and the relevant pyproject.toml file https://github.com/logikal-io/pytest-logikal/blob/main/pyproject.toml#L10. Showing the entire license text makes the Meta section almost useless by pushing the rest of the metadata quite far to the bottom. **Expected behavior** The license text should be hidden by default and perhaps shown with a modal upon clicking "see full text". Note that the issue is similar to https://github.com/pypi/warehouse/issues/1354, however, it is a little different due to the PEP 621 standard causing this behavior. With more and more projects adopting PEP 621 I would expect this to be a bigger issue over time.
Some related discussion is here: https://github.com/FFY00/meson-python/issues/129 Thanks for the context! I think that the current UX needs to be improved regardless of how PEP 639 goes, the "Meta" section is really messed up for anyone who follows PEP 621 right now (which is the accepted standard at the moment). I think showing the (short) license name from the trove classifier there is more than enough (when available) with a fallback to the first line of the license text or so. PEP 639 would use different fields anyways, so even if that gets accepted you need to be able to deal with all the "legacy" approaches like PEP 621 or anything from before. It seems to me that that area in the PyPI UI is not meant for displaying anything more than a short-ish line anyhow, so at the very least the excess text should be truncated/hidden. I must add that currently setuptools just copies the contents of the referenced license file into the "License" field in PKG-INFO, so it can be interpreted that the contents of the field are the actual license text of the package. I can imagine that displaying the entire license on PyPI would be potentially useful, just not in the way it is done at the moment. Lastly, I see what you mean by https://packaging.python.org/en/latest/specifications/core-metadata/#license and so I believe setuptools does the right thing, we should probably just not use the license field together with the trove classifiers (in light of the core metadata specification). Nonetheless, even if someone used the license field according to the PyPA specification and added a longer custom license text there, the Meta section on PyPI would be still messed up, so I think this is indeed a valid bug, regardless of the slightly confusing specifications surrounding license information. > Some related discussion is here: https://github.com/FFY00/meson-python/issues/129 The repo moved, here is the new link: https://github.com/mesonbuild/meson-python/issues/129
2022-12-07T18:25:46Z
[]
[]
pypi/warehouse
12,699
pypi__warehouse-12699
[ "12419" ]
9116e796e905c3bc1a1a760fddb505fd198f26dd
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -4667,7 +4667,7 @@ def delete_project_role(project, request): request, role.user, submitter=request.user, project_name=project.name ) - request.session.flash("Removed role", queue="success") + request.session.flash("Removed collaborator", queue="success") if removing_self: return HTTPSeeOther(request.route_path("manage.projects")) except NoResultFound:
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -8860,7 +8860,7 @@ def test_delete_role(self, db_request, monkeypatch): pretend.call(db_request, user, submitter=user_2, project_name="foobar") ] assert db_request.session.flash.calls == [ - pretend.call("Removed role", queue="success") + pretend.call("Removed collaborator", queue="success") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect" @@ -8956,7 +8956,7 @@ def test_delete_not_sole_owner_role(self, db_request, monkeypatch): pretend.call(db_request, user_2, submitter=user_2, project_name="foobar") ] assert db_request.session.flash.calls == [ - pretend.call("Removed role", queue="success") + pretend.call("Removed collaborator", queue="success") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/the-redirect"
Banner shows the wrong message when removing a collaborator in an organization When removing a collaborator from a project in an organization, the banner says 'Removed role'. To recreate the error- - Click on Your organizations - Click on Manage for a specific organization - Click on Projects - Click on Manage for a specific project - Click on Collaborators - Click on Remove for a specific collaborator - Enter the team name or username of the collaborator - Click on Remove The banner should say 'Removed collaborator'. ![collaborator](https://user-images.githubusercontent.com/37237726/197766560-ca6d2200-185b-4905-90b5-e9b0cdaeb6e6.png)
2022-12-19T17:40:18Z
[]
[]
pypi/warehouse
12,701
pypi__warehouse-12701
[ "8843" ]
116c5046ff2f45d87f9e68d58225eab6164cd5c0
diff --git a/warehouse/filters.py b/warehouse/filters.py --- a/warehouse/filters.py +++ b/warehouse/filters.py @@ -28,6 +28,7 @@ import packaging.version import pytz +from natsort import natsorted from pyramid.threadlocal import get_current_request from warehouse.utils.http import is_valid_uri @@ -137,6 +138,10 @@ def format_classifiers(classifiers): structured[key] = [] structured[key].append(value[0]) + # Sort all the values in our data structure + for key, value in structured.items(): + structured[key] = natsorted(value) + return structured
diff --git a/tests/unit/test_filters.py b/tests/unit/test_filters.py --- a/tests/unit/test_filters.py +++ b/tests/unit/test_filters.py @@ -146,6 +146,19 @@ def test_format_tags(inp, expected): ["Foo :: Bar :: Baz", "Vleep :: Foo", "Foo :: Bar :: Qux"], [("Foo", ["Bar :: Baz", "Bar :: Qux"]), ("Vleep", ["Foo"])], ), + ( + [ + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.8", + ], + [ + ( + "Programming Language", + ["Python :: 3.8", "Python :: 3.10", "Python :: 3.11"], + ) + ], + ), ], ) def test_format_classifiers(inp, expected):
Classifiers: Python version sort order <!-- NOTE: This issue should be for problems with PyPI itself, including: * pypi.org * test.pypi.org * files.pythonhosted.org This issue should NOT be for a project installed from PyPI. If you are having an issue with a specific package, you should reach out to the maintainers of that project directly instead. Furthermore, this issue should NOT be for any non-PyPI properties (like python.org, docs.python.org, etc.) --> **Describe the bug** <!-- A clear and concise description the bug --> The classifiers "Programming Language :: Python :: 3.X" aren't sorted in the right order on https://pypi.org as well as on https://test.pypi.org I'm defining the classifiers like this in the `setup.py` file. ``` classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10" ] ``` In the navigation bar on pypy.org it will then appear like this: ![image](https://user-images.githubusercontent.com/70264417/99465712-42797f00-293b-11eb-8f1a-dced842f433f.png) With Python 3.10 at the top instead of at the bottom (after Python 3.9). To give the visitors of pypi.org a better and faster overview over a project, it would be great if the Python classifiers were sorted by the Python versions. **Expected behavior** <!-- A clear and concise description of what you expected to happen --> Classifiers sorted by Python versions. Python :: 3 Python :: 3.6 Python :: 3.7 Python :: 3.8 Python :: 3.9 Python :: 3.10 Python :: 3.11 Python :: 3.12 etc. **To Reproduce** <!-- Steps to reproduce the bug, or a link to PyPI where the bug is visible --> It can be seen for example here: https://pypi.org/project/officeextractor/
Thanks for filing an issue. PyPI is technically sorting these "correctly", since the original classifier order is not preserved in the project metadata, classifiers are just strings, and the classifiers are being sorted lexicographically. This can be seen in a number of other places as well, like https://pypi.org/classifiers/. I agree that it does feel unintuitive for humans, though. I'd be open to accepting a change that takes any integer/float value of segment of a classifier into account when sorting. However, this should probably be computed once and pre-built into https://github.com/pypa/trove-classifiers instead of being done on the fly everywhere by PyPI. For instances like this, where we'd have to do it on the fly, `trove-classifiers` should also provide a sorting function as well. I've filed https://github.com/pypa/trove-classifiers/issues/56 to capture that and marked this issue as blocked until it's implemented. This is also an issue in the list of filters in the sidebar of the search results page, e.g. https://pypi.org/search/?q=beautifulsoup ![Screenshot from 2021-01-19 23-43-05](https://user-images.githubusercontent.com/5687998/105128436-08e70080-5ab1-11eb-9f12-05cf9390b9f5.png) Also an issue in https://pypi.org/pypi?%3Aaction=list_classifiers I assume this has been fixed now by https://github.com/pypa/trove-classifiers/pull/57 👍🏼 Indeed! We still need to pull in the new version of `trove-classifiers` here and update our usage of it to use the `sorted_classifiers` list. Is this actually fixed? Looking at https://pypi.org/project/officeextractor/ the ordering is still wrong. Looks like we need to update this function which does some secondary sorting which may no longer be necessary: https://github.com/pypa/warehouse/blob/7fc3ce5bd7ecc93ef54c1652787fb5e7757fe6f2/warehouse/filters.py#L126-L144 which is used here: https://github.com/pypa/warehouse/blob/20c947a4cd4a9667bf0f31722e77b87964ae195c/warehouse/templates/includes/packaging/project-data.html#L128 Re-opening... Please see PR https://github.com/pypa/warehouse/pull/10006. This is now fixed, from https://pypi.org/project/officeextractor/: ![image](https://user-images.githubusercontent.com/294415/132566140-ba662bbd-ee79-48ac-84c9-3017ba317ae8.png) There may still be some pages that have the incorrect ordering in cache, but these will slowly be updated as these pages fall out of cache. This has broken, here's the earlier fixed https://pypi.org/project/officeextractor/: <img width="203" alt="image" src="https://user-images.githubusercontent.com/1324225/184849356-865c5e11-c1db-4ed0-8670-11caebe55477.png"> I'm also seeing the "bad" order at https://test.pypi.org/project/sphinxcontrib-towncrier/0.3.0a1.dev46/: ``` Programming Language Python :: 3.10 Python :: 3.11 Python :: 3.6 Python :: 3.7 Python :: 3.8 Python :: 3.9 ```
2022-12-19T21:26:28Z
[]
[]
pypi/warehouse
12,755
pypi__warehouse-12755
[ "12753" ]
fc5a2e4e6fafdfefef70b0b56f10274eb2ecb9b5
diff --git a/warehouse/packaging/models.py b/warehouse/packaging/models.py --- a/warehouse/packaging/models.py +++ b/warehouse/packaging/models.py @@ -591,6 +591,8 @@ def github_repo_info_url(self): segments = parsed.path.strip("/").split("/") if parsed.netloc in {"github.com", "www.github.com"} and len(segments) >= 2: user_name, repo_name = segments[:2] + if repo_name.endswith(".git"): + repo_name = repo_name.removesuffix(".git") return f"https://api.github.com/repos/{user_name}/{repo_name}" @property
diff --git a/tests/unit/packaging/test_models.py b/tests/unit/packaging/test_models.py --- a/tests/unit/packaging/test_models.py +++ b/tests/unit/packaging/test_models.py @@ -430,6 +430,14 @@ def test_acl(self, db_session): ("https://google.com/pypi/warehouse/tree/main", None), ("https://google.com", None), ("incorrect url", None), + ( + "https://www.github.com/pypi/warehouse.git", + "https://api.github.com/repos/pypi/warehouse", + ), + ( + "https://www.github.com/pypi/warehouse.git/", + "https://api.github.com/repos/pypi/warehouse", + ), ], ) def test_github_repo_info_url(self, db_session, home_page, expected):
GitHub repository URLs that contain a '.git' suffix in the repository name segment are not normalized <!-- NOTE: This issue should be for problems with PyPI itself, including: * [x] pypi.org * [x] test.pypi.org * [ ] files.pythonhosted.org --> **Describe the bug** Python packages may include a list of project URLs in their metadata, and when PyPi detects a GitHub-related project URL, it attempts to retrieve statistical information (number of stars, forks, and so on) from the repository by using the GitHub API. It is valid for a GitHub repository to specify a URL that contains a `.git` suffix in the repository name part -- for example, `https://github.com/pypi/warehouse.git/`. This is most commonly found in `git` `remote` references in cloned GitHub repositories, where the remote reference typically includes the `.git` repository name suffix (it's also part of the HTTPS clone URL that GitHub suggests when using their web UI). However, the GitHub API expects the repository name to be 'normalized' -- that is, repository type suffixes should not appear. So to follow the previous example, the repository name `warehouse` (part of organization name `pypi`) should be provided as the repository name when retrieving statistics for this repository. PyPi doesn't currently handle project URLs that include the `.git` suffix gracefully, and may display broken links and empty statistical fields for affected packages: ![image](https://user-images.githubusercontent.com/55152140/210579958-b58e7f73-0e9f-4cac-9575-3a1e8da7bc69.png) **Expected behavior** The `.git` suffix in GitHub repository names could be detected and removed so that statistics can be retrieved for packages that include it within a project URL. **To Reproduce** The problem is currently visible following the latest release of `arcana-core` at v0.8.6 here: - https://pypi.org/project/arcana-core/ And the repository URL configured for that release is visible here: - https://github.com/ArcanaFramework/arcana-core/blob/238aa5abc46d613b0c5f12fcade354ee0db9feba/pyproject.toml#L66 (note the `.git` suffix in the repository name) **My Platform** <!-- Any details about your specific platform: * [ ] If the problem is in the browser, what browser, version, and OS? * [ ] If the problem is with a command-line tool, what version of that tool? * [ ] If the problem is with connecting to PyPI, include some details about your network, including SSL/TLS implementation in use, internet service provider, and if there are any firewalls or proxies in use. --> N/A **Additional context** Similar to (and may include changes in a similar part of the code) to #12725.
2023-01-04T17:06:18Z
[]
[]
pypi/warehouse
12,792
pypi__warehouse-12792
[ "10146" ]
0a39690e12b524609c407a6cd52b6f983268ae9a
diff --git a/warehouse/db.py b/warehouse/db.py --- a/warehouse/db.py +++ b/warehouse/db.py @@ -115,6 +115,11 @@ def _configure_alembic(config): alembic_cfg = alembic.config.Config() alembic_cfg.set_main_option("script_location", "warehouse:migrations") alembic_cfg.set_main_option("url", config.registry.settings["database.url"]) + alembic_cfg.set_section_option("post_write_hooks", "hooks", "black, isort") + alembic_cfg.set_section_option("post_write_hooks", "black.type", "console_scripts") + alembic_cfg.set_section_option("post_write_hooks", "black.entrypoint", "black") + alembic_cfg.set_section_option("post_write_hooks", "isort.type", "console_scripts") + alembic_cfg.set_section_option("post_write_hooks", "isort.entrypoint", "isort") return alembic_cfg
diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py --- a/tests/unit/test_db.py +++ b/tests/unit/test_db.py @@ -82,7 +82,10 @@ def handler(config): def test_configure_alembic(monkeypatch): - config_obj = pretend.stub(set_main_option=pretend.call_recorder(lambda *a: None)) + config_obj = pretend.stub( + set_main_option=pretend.call_recorder(lambda *a: None), + set_section_option=pretend.call_recorder(lambda *a: None), + ) def config_cls(): return config_obj @@ -100,6 +103,13 @@ def config_cls(): pretend.call("script_location", "warehouse:migrations"), pretend.call("url", config.registry.settings["database.url"]), ] + assert alembic_config.set_section_option.calls == [ + pretend.call("post_write_hooks", "hooks", "black, isort"), + pretend.call("post_write_hooks", "black.type", "console_scripts"), + pretend.call("post_write_hooks", "black.entrypoint", "black"), + pretend.call("post_write_hooks", "isort.type", "console_scripts"), + pretend.call("post_write_hooks", "isort.entrypoint", "isort"), + ] def test_raises_db_available_error(pyramid_services, metrics):
Improve Alembic story Fixes #10053. Adds `alembic.ini`. Runs `black` and `isort` after generating migrations.
/spent 1h
2023-01-09T22:02:02Z
[]
[]
pypi/warehouse
12,820
pypi__warehouse-12820
[ "12801" ]
5b5e1421776327a0a111b1821d880983dae5abfb
diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -194,6 +194,7 @@ def configure(settings=None): maybe_set(settings, "camo.key", "CAMO_KEY") maybe_set(settings, "docs.url", "DOCS_URL") maybe_set(settings, "ga.tracking_id", "GA_TRACKING_ID") + maybe_set(settings, "ga4.tracking_id", "GA4_TRACKING_ID") maybe_set(settings, "statuspage.url", "STATUSPAGE_URL") maybe_set(settings, "token.password.secret", "TOKEN_PASSWORD_SECRET") maybe_set(settings, "token.email.secret", "TOKEN_EMAIL_SECRET") diff --git a/warehouse/csp.py b/warehouse/csp.py --- a/warehouse/csp.py +++ b/warehouse/csp.py @@ -86,6 +86,9 @@ def includeme(config): "connect-src": [ SELF, "https://api.github.com/repos/", + "https://*.google-analytics.com", + "https://*.analytics.google.com", + "https://*.googletagmanager.com", "fastly-insights.com", "*.fastly-insights.com", "*.ethicalads.io", @@ -106,14 +109,16 @@ def includeme(config): "img-src": [ SELF, config.registry.settings["camo.url"], - "www.google-analytics.com", + "https://*.google-analytics.com", + "https://*.googletagmanager.com", "*.fastly-insights.com", "*.ethicalads.io", ], "script-src": [ SELF, - "www.googletagmanager.com", - "www.google-analytics.com", + "https://*.googletagmanager.com", + "https://www.google-analytics.com", # Remove when disabling UA + "https://ssl.google-analytics.com", # Remove when disabling UA "*.fastly-insights.com", "*.ethicalads.io", # Hash for v1.4.0 of ethicalads.min.js
diff --git a/tests/unit/test_csp.py b/tests/unit/test_csp.py --- a/tests/unit/test_csp.py +++ b/tests/unit/test_csp.py @@ -217,6 +217,9 @@ def test_includeme(): "connect-src": [ "'self'", "https://api.github.com/repos/", + "https://*.google-analytics.com", + "https://*.analytics.google.com", + "https://*.googletagmanager.com", "fastly-insights.com", "*.fastly-insights.com", "*.ethicalads.io", @@ -232,14 +235,16 @@ def test_includeme(): "img-src": [ "'self'", "camo.url.value", - "www.google-analytics.com", + "https://*.google-analytics.com", + "https://*.googletagmanager.com", "*.fastly-insights.com", "*.ethicalads.io", ], "script-src": [ "'self'", - "www.googletagmanager.com", - "www.google-analytics.com", + "https://*.googletagmanager.com", + "https://www.google-analytics.com", + "https://ssl.google-analytics.com", "*.fastly-insights.com", "*.ethicalads.io", "'sha256-U3hKDidudIaxBDEzwGJApJgPEf2mWk6cfMWghrAa6i0='",
Migrate to Google Analytics 4 Universal Analytics will be going away. > [Google Analytics 4](https://support.google.com/analytics/answer/10089681) is our next-generation measurement solution, and it's replacing Universal Analytics. On July 1, 2023, standard Universal Analytics properties will stop processing new hits. If you still rely on Universal Analytics, we recommend that you [prepare to use Google Analytics 4](https://support.google.com/analytics/answer/10759417) going forward. From https://support.google.com/analytics/answer/11583528?hl=en Places to examine: - https://github.com/pypi/warehouse/blob/d0c6cee5b12c899a81c9fae484be714ca2f803db/warehouse/static/js/warehouse/utils/analytics.js - https://github.com/pypi/warehouse/blob/d0c6cee5b12c899a81c9fae484be714ca2f803db/warehouse/templates/base.html#L138-L140 - https://github.com/pypi/warehouse/blob/d0c6cee5b12c899a81c9fae484be714ca2f803db/warehouse/csp.py#L109 - https://github.com/pypi/warehouse/blob/d0c6cee5b12c899a81c9fae484be714ca2f803db/warehouse/csp.py#L115-L116
2023-01-12T14:39:21Z
[]
[]
pypi/warehouse
12,888
pypi__warehouse-12888
[ "8990", "3252" ]
ede4fa0a575522eb77829138f1b677d084dfca35
diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -176,7 +176,6 @@ def configure(settings=None): "GITHUB_TOKEN_SCANNING_META_API_URL", default="https://api.github.com/meta/public_keys/token_scanning", ) - maybe_set(settings, "warehouse.trending_table", "WAREHOUSE_TRENDING_TABLE") maybe_set(settings, "warehouse.downloads_table", "WAREHOUSE_DOWNLOADS_TABLE") maybe_set(settings, "celery.broker_url", "BROKER_URL") maybe_set(settings, "celery.result_url", "REDIS_URL") diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py --- a/warehouse/packaging/__init__.py +++ b/warehouse/packaging/__init__.py @@ -22,7 +22,6 @@ from warehouse.packaging.tasks import ( compute_2fa_mandate, compute_2fa_metrics, - compute_trending, update_description_html, ) @@ -110,11 +109,6 @@ def includeme(config): # Add a periodic task to generate 2FA metrics config.add_periodic_task(crontab(minute="*/5"), compute_2fa_metrics) - # Add a periodic task to compute trending once a day, assuming we have - # been configured to be able to access BigQuery. - if config.get_settings().get("warehouse.trending_table"): - config.add_periodic_task(crontab(minute=0, hour=3), compute_trending) - # TODO: restore this # if config.get_settings().get("warehouse.release_files_table"): # config.add_periodic_task(crontab(minute=0), sync_bigquery_release_files) diff --git a/warehouse/packaging/models.py b/warehouse/packaging/models.py --- a/warehouse/packaging/models.py +++ b/warehouse/packaging/models.py @@ -29,7 +29,6 @@ DateTime, Enum, FetchedValue, - Float, ForeignKey, Index, Integer, @@ -196,7 +195,6 @@ class Project(SitemapMixin, TwoFactorRequireable, HasEvents, db.Model): upload_limit = Column(Integer, nullable=True) total_size_limit = Column(BigInteger, nullable=True) last_serial = Column(Integer, nullable=False, server_default=sql.text("0")) - zscore = Column(Float, nullable=True) total_size = Column(BigInteger, server_default=sql.text("0")) organization = orm.relationship( diff --git a/warehouse/packaging/search.py b/warehouse/packaging/search.py --- a/warehouse/packaging/search.py +++ b/warehouse/packaging/search.py @@ -12,7 +12,7 @@ import packaging.version -from elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer +from elasticsearch_dsl import Date, Document, Keyword, Text, analyzer from warehouse.search.utils import doc_type @@ -49,7 +49,6 @@ class Project(Document): platform = Keyword() created = Date() classifiers = Keyword(multi=True) - zscore = Float() @classmethod def from_db(cls, release): @@ -72,7 +71,6 @@ def from_db(cls, release): obj["platform"] = release.platform obj["created"] = release.created obj["classifiers"] = release.classifiers - obj["zscore"] = release.zscore return obj diff --git a/warehouse/packaging/tasks.py b/warehouse/packaging/tasks.py --- a/warehouse/packaging/tasks.py +++ b/warehouse/packaging/tasks.py @@ -21,7 +21,6 @@ from warehouse import tasks from warehouse.accounts.models import User, WebAuthn -from warehouse.cache.origin import IOriginCache from warehouse.email import send_two_factor_mandate_email from warehouse.metrics import IMetricsService from warehouse.packaging.models import Description, File, Project, Release, Role @@ -191,87 +190,6 @@ def compute_2fa_metrics(request): ) [email protected](ignore_result=True, acks_late=True) -def compute_trending(request): - bq = request.find_service(name="gcloud.bigquery") - query = bq.query( - """ SELECT project, - IF( - STDDEV(downloads) > 0, - (todays_downloads - AVG(downloads))/STDDEV(downloads), - NULL - ) as zscore - FROM ( - SELECT project, - date, - downloads, - FIRST_VALUE(downloads) OVER ( - PARTITION BY project - ORDER BY DATE DESC - ROWS BETWEEN UNBOUNDED PRECEDING - AND UNBOUNDED FOLLOWING - ) as todays_downloads - FROM ( - SELECT file.project as project, - DATE(timestamp) AS date, - COUNT(*) as downloads - FROM `{table}` - WHERE _TABLE_SUFFIX BETWEEN - FORMAT_DATE( - "%Y%m%d", - DATE_ADD(CURRENT_DATE(), INTERVAL -31 day)) - AND - FORMAT_DATE( - "%Y%m%d", - DATE_ADD(CURRENT_DATE(), INTERVAL -1 day)) - GROUP BY file.project, date - ) - ) - GROUP BY project, todays_downloads - HAVING SUM(downloads) >= 5000 - ORDER BY zscore DESC - """.format( - table=request.registry.settings["warehouse.trending_table"] - ) - ) - - zscores = {} - for row in query.result(): - row = dict(row) - zscores[row["project"]] = row["zscore"] - - # We're going to "reset" all of our zscores to a steady state where they - # are all equal to ``None``. The next query will then set any that have a - # value back to the expected value. - ( - request.db.query(Project) - .filter(Project.zscore != None) # noqa - .update({Project.zscore: None}) - ) - - # We need to convert the normalized name that we get out of BigQuery and - # turn it into the primary key of the Project object and construct a list - # of primary key: new zscore, including a default of None if the item isn't - # in the result set. - query = request.db.query(Project.id, Project.normalized_name).all() - to_update = [ - {"id": id, "zscore": zscores[normalized_name]} - for id, normalized_name in query - if normalized_name in zscores - ] - - # Reflect out updated ZScores into the database. - request.db.bulk_update_mappings(Project, to_update) - - # Trigger a purge of the trending surrogate key. - try: - cacher = request.find_service(IOriginCache) - except LookupError: - pass - else: - cacher.purge(["trending"]) - - @tasks.task(ignore_result=True, acks_late=True) def update_description_html(request): renderer_version = readme.renderer_version() diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py --- a/warehouse/search/tasks.py +++ b/warehouse/search/tasks.py @@ -93,7 +93,6 @@ def _project_docs(db, project_name=None): classifiers, Project.normalized_name, Project.name, - Project.zscore, ) .select_from(releases_list) .join(Release, Release.id == releases_list.c.id) diff --git a/warehouse/views.py b/warehouse/views.py --- a/warehouse/views.py +++ b/warehouse/views.py @@ -38,7 +38,6 @@ view_defaults, ) from sqlalchemy import func -from sqlalchemy.orm import aliased, joinedload from sqlalchemy.sql import exists, expression from trove_classifiers import deprecated_classifiers, sorted_classifiers @@ -210,48 +209,12 @@ def opensearchxml(request): 1 * 60 * 60, # 1 hour stale_while_revalidate=10 * 60, # 10 minutes stale_if_error=1 * 24 * 60 * 60, # 1 day - keys=["all-projects", "trending"], + keys=["all-projects"], ) ], has_translations=True, ) def index(request): - project_ids = [ - r[0] - for r in ( - request.db.query(Project.id) - .order_by(Project.zscore.desc().nullslast(), func.random()) - .limit(5) - .all() - ) - ] - release_a = aliased( - Release, - request.db.query(Release) - .distinct(Release.project_id) - .filter(Release.project_id.in_(project_ids)) - .order_by( - Release.project_id, - Release.is_prerelease.nullslast(), - Release._pypi_ordering.desc(), - ) - .subquery(), - ) - trending_projects = ( - request.db.query(release_a) - .options(joinedload(release_a.project)) - .order_by(func.array_idx(project_ids, release_a.project_id)) - .all() - ) - - latest_releases = ( - request.db.query(Release) - .options(joinedload(Release.project)) - .order_by(Release.created.desc()) - .limit(5) - .all() - ) - counts = dict( request.db.query(RowCount.table_name, RowCount.count) .filter( @@ -268,8 +231,6 @@ def index(request): ) return { - "latest_releases": latest_releases, - "trending_projects": trending_projects, "num_projects": counts.get(Project.__tablename__, 0), "num_releases": counts.get(Release.__tablename__, 0), "num_files": counts.get(File.__tablename__, 0),
diff --git a/tests/unit/packaging/test_init.py b/tests/unit/packaging/test_init.py --- a/tests/unit/packaging/test_init.py +++ b/tests/unit/packaging/test_init.py @@ -22,15 +22,13 @@ from warehouse.packaging.models import File, Project, Release, Role from warehouse.packaging.tasks import ( # sync_bigquery_release_files, compute_2fa_mandate, - compute_trending, update_description_html, ) [email protected]("with_trending", [True, False]) @pytest.mark.parametrize("with_bq_sync", [True, False]) @pytest.mark.parametrize("with_2fa_mandate", [True, False]) -def test_includeme(monkeypatch, with_trending, with_bq_sync, with_2fa_mandate): +def test_includeme(monkeypatch, with_bq_sync, with_2fa_mandate): storage_class = pretend.stub( create_service=pretend.call_recorder(lambda *a, **kw: pretend.stub()) ) @@ -40,8 +38,6 @@ def key_factory(keystring, iterate_on=None): monkeypatch.setattr(packaging, "key_factory", key_factory) settings = dict() - if with_trending: - settings["warehouse.trending_table"] = "foobar" if with_bq_sync: settings["warehouse.release_files_table"] = "fizzbuzz" if with_2fa_mandate: @@ -128,12 +124,6 @@ def key_factory(keystring, iterate_on=None): # ) pass - if with_trending: - assert ( - pretend.call(crontab(minute=0, hour=3), compute_trending) - in config.add_periodic_task.calls - ) - if with_2fa_mandate: assert ( pretend.call(crontab(minute=0, hour=3), compute_2fa_mandate) diff --git a/tests/unit/packaging/test_search.py b/tests/unit/packaging/test_search.py --- a/tests/unit/packaging/test_search.py +++ b/tests/unit/packaging/test_search.py @@ -35,7 +35,6 @@ def test_build_search(): platform="any platform", created=datetime.datetime(1956, 1, 31), classifiers=["Alpha", "Beta"], - zscore=None, ) obj = Project.from_db(release) @@ -55,4 +54,3 @@ def test_build_search(): assert obj["platform"] == "any platform" assert obj["created"] == datetime.datetime(1956, 1, 31) assert obj["classifiers"] == ["Alpha", "Beta"] - assert obj["zscore"] is None diff --git a/tests/unit/packaging/test_tasks.py b/tests/unit/packaging/test_tasks.py --- a/tests/unit/packaging/test_tasks.py +++ b/tests/unit/packaging/test_tasks.py @@ -15,18 +15,16 @@ import pretend import pytest -from google.cloud.bigquery import Row, SchemaField +from google.cloud.bigquery import SchemaField from wtforms import Field, Form, StringField import warehouse.packaging.tasks from warehouse.accounts.models import WebAuthn -from warehouse.cache.origin import IOriginCache -from warehouse.packaging.models import Description, Project +from warehouse.packaging.models import Description from warehouse.packaging.tasks import ( compute_2fa_mandate, compute_2fa_metrics, - compute_trending, sync_bigquery_release_files, update_bigquery_release_files, update_description_html, @@ -45,94 +43,6 @@ ) -class TestComputeTrending: - @pytest.mark.parametrize("with_purges", [True, False]) - def test_computes_trending(self, db_request, with_purges): - projects = [ - ProjectFactory.create(zscore=1 if not i else None) for i in range(3) - ] - - results = iter( - [ - Row((projects[1].normalized_name, 2), {"project": 0, "zscore": 1}), - Row((projects[2].normalized_name, -1), {"project": 0, "zscore": 1}), - ] - ) - query = pretend.stub(result=pretend.call_recorder(lambda *a, **kw: results)) - bigquery = pretend.stub(query=pretend.call_recorder(lambda q: query)) - - cacher = pretend.stub(purge=pretend.call_recorder(lambda keys: None)) - - def find_service(iface=None, name=None): - if iface is None and name == "gcloud.bigquery": - return bigquery - - if with_purges and issubclass(iface, IOriginCache): - return cacher - - raise LookupError - - db_request.find_service = find_service - db_request.registry.settings = { - "warehouse.trending_table": "example.pypi.downloads*" - } - - compute_trending(db_request) - - assert bigquery.query.calls == [ - pretend.call( - """ SELECT project, - IF( - STDDEV(downloads) > 0, - (todays_downloads - AVG(downloads))/STDDEV(downloads), - NULL - ) as zscore - FROM ( - SELECT project, - date, - downloads, - FIRST_VALUE(downloads) OVER ( - PARTITION BY project - ORDER BY DATE DESC - ROWS BETWEEN UNBOUNDED PRECEDING - AND UNBOUNDED FOLLOWING - ) as todays_downloads - FROM ( - SELECT file.project as project, - DATE(timestamp) AS date, - COUNT(*) as downloads - FROM `example.pypi.downloads*` - WHERE _TABLE_SUFFIX BETWEEN - FORMAT_DATE( - "%Y%m%d", - DATE_ADD(CURRENT_DATE(), INTERVAL -31 day)) - AND - FORMAT_DATE( - "%Y%m%d", - DATE_ADD(CURRENT_DATE(), INTERVAL -1 day)) - GROUP BY file.project, date - ) - ) - GROUP BY project, todays_downloads - HAVING SUM(downloads) >= 5000 - ORDER BY zscore DESC - """ - ) - ] - assert query.result.calls == [pretend.call()] - assert cacher.purge.calls == ( - [pretend.call(["trending"])] if with_purges else [] - ) - - results = dict(db_request.db.query(Project.name, Project.zscore).all()) - - assert results == { - projects[0].name: None, - projects[1].name: 2, - projects[2].name: -1, - } - - def test_update_description_html(monkeypatch, db_request): current_version = "24.0" previous_version = "23.0" diff --git a/tests/unit/test_search.py b/tests/unit/test_search.py --- a/tests/unit/test_search.py +++ b/tests/unit/test_search.py @@ -142,9 +142,7 @@ def test_mixed_quoted_query(self): }, } - @pytest.mark.parametrize( - "order,field", [("created", "created"), ("-zscore", "zscore")] - ) + @pytest.mark.parametrize("order,field", [("created", "created")]) def test_sort_order(self, order, field): es = Search() terms = "foo bar" diff --git a/tests/unit/test_views.py b/tests/unit/test_views.py --- a/tests/unit/test_views.py +++ b/tests/unit/test_views.py @@ -327,9 +327,6 @@ def test_index(self, db_request): UserFactory.create() assert index(db_request) == { - # assert that ordering is correct - "latest_releases": [release2, release1], - "trending_projects": [release2], "num_projects": 1, "num_users": 3, "num_releases": 2,
The `compute_trending` task is using the old download dataset The `compute_trending` task is currently configured to use the old `the-psf.pypi.downloads*` dataset instead of the new clustered/partitioned `the-psf.pypi.file_downloads`. https://github.com/pypa/warehouse/blob/aafc5185e57e67d43487ce4faa95913dd4573e14/warehouse/packaging/tasks.py#L25-L103 Assess libraries.io trending projects feed, consider replacing ours The [Libraries.io list of trending PyPI projects](https://libraries.io/trending?platforms=PyPI) is very different from the one on the http://pypi.org/ front page. Our list just computes a zscore of downloads. We should check their data & methods, and if their results are more interesting or better than ours, we should [hit their API](https://libraries.io/api) and use their list instead of ours on our front page. From [IRC today](http://kafka.dcpython.org/day/pypa/2018-03-14).
I've looked under the hood at the [libraries.io source code](https://github.com/librariesio/libraries.io), to try to understand how their trending list was computed. However, I'm not so familiar with Ruby or Rails, so let's see what I can find... I've started by [searching for "trending"](https://github.com/librariesio/libraries.io/search?q=trending), which trimmed down the results. At first glance, there is a query from [the repository model](https://github.com/librariesio/libraries.io/blob/94878b45ccb1dde9cb74aa3698658d29a96290a2/app/models/repository.rb#L75) which name looked promising. But the other search results from Github seem to reference [the `hacker_news` query](https://github.com/librariesio/libraries.io/blob/94878b45ccb1dde9cb74aa3698658d29a96290a2/app/models/repository.rb#L74) instead of the `trending` one. I'm not clear which query is used, but it seems it relies on Github stars? Now, looking at their API documentation, I'm not sure there is an endpoint available to expose this trending data. The [project search](https://libraries.io/api#project-search) seems interesting but seems to require a search term. Is that answering the research questions? I appreciate you looking into this, @browniebroke! @andrew is a maintainer of Libraries.io and could probably help answer our question about the endpoint availability and how we'd access trending data. @browniebroke could you try comparing the list of trending projects on the pypi.org front page with the list at https://libraries.io/trending?platforms=PyPI , and briefly assess how widely used or trending some of those projects seem to be (when you look for mentions of them on social media, on GitHub, in StackOverflow, and in new release announcements)? That might help us figure out how different the results are. https://github.com/librariesio/libraries.io/commit/aebab87a74641f80a131974b80677220e7922566
2023-01-24T20:53:00Z
[]
[]
pypi/warehouse
12,915
pypi__warehouse-12915
[ "11296" ]
e141459ae7e69be7bce20883f4714bacfc140449
diff --git a/warehouse/email/__init__.py b/warehouse/email/__init__.py --- a/warehouse/email/__init__.py +++ b/warehouse/email/__init__.py @@ -993,6 +993,13 @@ def send_oidc_provider_removed_email(request, user, project_name, provider): } +@_email("pending-oidc-provider-invalidated") +def send_pending_oidc_provider_invalidated_email(request, user, project_name): + return { + "project_name": project_name, + } + + @_email("two-factor-mandate") def send_two_factor_mandate_email(request, user): return {"username": user.username, "has_two_factor": user.has_two_factor} diff --git a/warehouse/oidc/interfaces.py b/warehouse/oidc/interfaces.py --- a/warehouse/oidc/interfaces.py +++ b/warehouse/oidc/interfaces.py @@ -12,12 +12,16 @@ from __future__ import annotations -from typing import Any, NewType +from typing import TYPE_CHECKING, Any, NewType from zope.interface import Interface +from warehouse.packaging.models import Project from warehouse.rate_limiting.interfaces import RateLimiterException +if TYPE_CHECKING: + from warehouse.oidc.models import PendingOIDCProvider # pragma: no cover + SignedClaims = NewType("SignedClaims", dict[str, Any]) @@ -32,12 +36,22 @@ def verify_jwt_signature(unverified_token: str): """ pass - def find_provider(signed_claims: SignedClaims): + def find_provider(signed_claims: SignedClaims, *, pending: bool = False): """ Given a mapping of signed claims produced by `verify_jwt_signature`, - attempt to find and return an `OIDCProvider` that matches them. + attempt to find and return either a `OIDCProvider` or `PendingOIDCProvider` + that matches them, depending on the value of `pending`. + + If no provider matches the claims, `None` is returned. + """ + pass + + def reify_pending_provider(pending_provider: PendingOIDCProvider, project: Project): + """ + Reify the given pending `PendingOIDCProvider` into an `OIDCProvider`, + adding it to the given project (presumed newly created) in the process. - If no `OIDCProvider` matches the claims, `None` is returned. + Returns the reified provider. """ pass diff --git a/warehouse/oidc/models.py b/warehouse/oidc/models.py --- a/warehouse/oidc/models.py +++ b/warehouse/oidc/models.py @@ -200,6 +200,15 @@ class PendingOIDCProvider(OIDCProviderMixin, db.Model): "polymorphic_on": OIDCProviderMixin.discriminator, } + def reify(self, session): # pragma: no cover + """ + Return an equivalent "normal" OIDC provider model for this pending provider, + deleting the pending provider in the process. + """ + + # Only concrete subclasses are constructed. + return NotImplemented + class GitHubProviderMixin: """ @@ -212,6 +221,7 @@ class GitHubProviderMixin: workflow_filename = Column(String) __verifiable_claims__ = { + "sub": _check_claim_binary(str.__eq__), "repository": _check_claim_binary(str.__eq__), "repository_owner": _check_claim_binary(str.__eq__), "repository_owner_id": _check_claim_binary(str.__eq__), @@ -222,7 +232,6 @@ class GitHubProviderMixin: "actor", "actor_id", "jti", - "sub", "ref", "sha", "run_id", @@ -258,6 +267,10 @@ def repository(self): def job_workflow_ref(self): return f"{self.repository}/{self._workflow_slug}" + @property + def sub(self): + return f"repo:{self.repository}" + def __str__(self): return f"{self.workflow_filename} @ {self.repository}" @@ -292,3 +305,29 @@ class PendingGitHubProvider(GitHubProviderMixin, PendingOIDCProvider): id = Column( UUID(as_uuid=True), ForeignKey(PendingOIDCProvider.id), primary_key=True ) + + def reify(self, session): + """ + Returns a `GitHubProvider` for this `PendingGitHubProvider`, + deleting the `PendingGitHubProvider` in the process. + """ + + maybe_provider = ( + session.query(GitHubProvider) + .filter( + GitHubProvider.repository_name == self.repository_name, + GitHubProvider.repository_owner == self.repository_owner, + GitHubProvider.workflow_filename == self.workflow_filename, + ) + .one_or_none() + ) + + provider = maybe_provider or GitHubProvider( + repository_name=self.repository_name, + repository_owner=self.repository_owner, + repository_owner_id=self.repository_owner_id, + workflow_filename=self.workflow_filename, + ) + + session.delete(self) + return provider diff --git a/warehouse/oidc/services.py b/warehouse/oidc/services.py --- a/warehouse/oidc/services.py +++ b/warehouse/oidc/services.py @@ -22,7 +22,7 @@ from warehouse.metrics.interfaces import IMetricsService from warehouse.oidc.interfaces import IOIDCProviderService, SignedClaims -from warehouse.oidc.models import OIDCProvider +from warehouse.oidc.models import OIDCProvider, PendingOIDCProvider from warehouse.oidc.utils import find_provider_by_issuer @@ -65,10 +65,19 @@ def verify_jwt_signature(self, unverified_token: str) -> SignedClaims | None: except jwt.PyJWTError: return None - def find_provider(self, signed_claims: SignedClaims) -> OIDCProvider | None: + def find_provider( + self, signed_claims: SignedClaims, *, pending: bool = False + ) -> OIDCProvider | PendingOIDCProvider | None: # NOTE: We do NOT verify the claims against the provider, since this # service is for development purposes only. - return find_provider_by_issuer(self.db, self.issuer_url, signed_claims) + return find_provider_by_issuer( + self.db, self.issuer_url, signed_claims, pending=pending + ) + + def reify_pending_provider(self, pending_provider, project): + new_provider = pending_provider.reify(self.db) + project.oidc_providers.append(new_provider) + return new_provider @implementer(IOIDCProviderService) @@ -248,14 +257,18 @@ def verify_jwt_signature(self, unverified_token: str) -> SignedClaims | None: sentry_sdk.capture_message(f"JWT verify raised generic error: {e}") return None - def find_provider(self, signed_claims: SignedClaims) -> OIDCProvider | None: + def find_provider( + self, signed_claims: SignedClaims, *, pending: bool = False + ) -> OIDCProvider | PendingOIDCProvider | None: metrics_tags = [f"provider:{self.provider}"] self.metrics.increment( "warehouse.oidc.find_provider.attempt", tags=metrics_tags, ) - provider = find_provider_by_issuer(self.db, self.issuer_url, signed_claims) + provider = find_provider_by_issuer( + self.db, self.issuer_url, signed_claims, pending=pending + ) if provider is None: self.metrics.increment( "warehouse.oidc.find_provider.provider_not_found", @@ -277,6 +290,11 @@ def find_provider(self, signed_claims: SignedClaims) -> OIDCProvider | None: return provider + def reify_pending_provider(self, pending_provider, project): + new_provider = pending_provider.reify(self.db) + project.oidc_providers.append(new_provider) + return new_provider + class OIDCProviderServiceFactory: def __init__(self, provider, issuer_url, service_class=OIDCProviderService): diff --git a/warehouse/oidc/utils.py b/warehouse/oidc/utils.py --- a/warehouse/oidc/utils.py +++ b/warehouse/oidc/utils.py @@ -14,21 +14,21 @@ from sqlalchemy.sql.expression import func, literal -from warehouse.oidc.interfaces import SignedClaims -from warehouse.oidc.models import GitHubProvider, OIDCProvider +from warehouse.oidc.models import GitHubProvider, PendingGitHubProvider GITHUB_OIDC_ISSUER_URL = "https://token.actions.githubusercontent.com" OIDC_ISSUER_URLS = {GITHUB_OIDC_ISSUER_URL} -def find_provider_by_issuer( - session, issuer_url: str, signed_claims: SignedClaims -) -> OIDCProvider | None: +def find_provider_by_issuer(session, issuer_url, signed_claims, *, pending=False): """ Given an OIDC issuer URL and a dictionary of claims that have been verified - for a token from that OIDC issuer, retrieve a concrete `OIDCProvider` registered - to one or more projects. + for a token from that OIDC issuer, retrieve either an `OIDCProvider` registered + to one or more projects or a `PendingOIDCProvider`, varying with the + `pending` parameter. + + Returns `None` if no provider can be found. """ if issuer_url not in OIDC_ISSUER_URLS: @@ -36,17 +36,19 @@ def find_provider_by_issuer( # claims for an issuer that we don't recognize and support. return None - # This is the ugly part: OIDCProvider is polymorphic, and retrieving - # the correct provider requires us to query based on provider-specific - # claims. + # This is the ugly part: OIDCProvider and PendingOIDCProvider are both + # polymorphic, and retrieving the correct provider requires us to query + # based on provider-specific claims. if issuer_url == GITHUB_OIDC_ISSUER_URL: repository = signed_claims["repository"] repository_owner, repository_name = repository.split("/", 1) workflow_prefix = f"{repository}/.github/workflows/" workflow_ref = signed_claims["job_workflow_ref"].removeprefix(workflow_prefix) + provider_cls = GitHubProvider if not pending else PendingGitHubProvider + return ( - session.query(GitHubProvider) + session.query(provider_cls) .filter_by( repository_name=repository_name, repository_owner=repository_owner, @@ -54,7 +56,7 @@ def find_provider_by_issuer( ) .filter( literal(workflow_ref).like( - func.concat(GitHubProvider.workflow_filename, "%") + func.concat(provider_cls.workflow_filename, "%") ) ) .one_or_none() diff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py --- a/warehouse/oidc/views.py +++ b/warehouse/oidc/views.py @@ -12,13 +12,23 @@ import time +from pydantic import BaseModel, StrictStr, ValidationError from pyramid.view import view_config +from sqlalchemy import func from warehouse.admin.flags import AdminFlagValue +from warehouse.email import send_pending_oidc_provider_invalidated_email from warehouse.events.tags import EventTag from warehouse.macaroons import caveats from warehouse.macaroons.interfaces import IMacaroonService from warehouse.oidc.interfaces import IOIDCProviderService +from warehouse.oidc.models import PendingOIDCProvider +from warehouse.packaging.interfaces import IProjectService +from warehouse.packaging.models import ProjectFactory + + +class TokenPayload(BaseModel): + token: StrictStr @view_config( @@ -47,34 +57,10 @@ def _invalid(errors): ) try: - body = request.json_body - except ValueError: - return _invalid( - errors=[{"code": "invalid-json", "description": "missing JSON body"}] - ) - - # `json_body` can return any valid top-level JSON type, so we have - # to make sure we're actually receiving a dictionary. - if not isinstance(body, dict): - return _invalid( - errors=[ - { - "code": "invalid-payload", - "description": "payload is not a JSON dictionary", - } - ] - ) - - unverified_jwt = body.get("token") - if unverified_jwt is None: - return _invalid( - errors=[{"code": "invalid-token", "description": "token is missing"}] - ) - - if not isinstance(unverified_jwt, str): - return _invalid( - errors=[{"code": "invalid-token", "description": "token is not a string"}] - ) + payload = TokenPayload.parse_raw(request.body) + unverified_jwt = payload.token + except ValidationError as exc: + return _invalid(errors=[{"code": "invalid-payload", "description": str(exc)}]) # For the time being, GitHub is our only OIDC provider. # In the future, this should locate the correct service based on an @@ -88,7 +74,57 @@ def _invalid(errors): ] ) - provider = oidc_service.find_provider(claims) + # First, try to find a pending provider. + pending_provider = oidc_service.find_provider(claims, pending=True) + if pending_provider is not None: + factory = ProjectFactory(request) + + # If the project already exists, this pending provider is no longer + # valid and needs to be removed. + # NOTE: This is mostly a sanity check, since we dispose of invalidated + # pending providers below. + if pending_provider.project_name in factory: + request.db.delete(pending_provider) + return _invalid( + errors=[ + { + "code": "invalid-pending-provider", + "description": "valid token, but project already exists", + } + ] + ) + + # Create the new project, and reify the pending provider against it. + project_service = request.find_service(IProjectService) + new_project = project_service.create_project( + pending_provider.project_name, pending_provider.added_by + ) + oidc_service.reify_pending_provider(pending_provider, new_project) + + # There might be other pending providers for the same project name, + # which we've now invalidated by creating the project. These would + # be disposed of on use, but we explicitly dispose of them here while + # also sending emails to their owners. + stale_pending_providers = ( + request.db.query(PendingOIDCProvider) + .filter( + func.normalize_pep426_name(PendingOIDCProvider.project_name) + == func.normalize_pep426_name(pending_provider.project_name) + ) + .all() + ) + for stale_provider in stale_pending_providers: + send_pending_oidc_provider_invalidated_email( + request, + stale_provider.added_by, + project_name=stale_provider.project_name, + ) + request.db.delete(stale_provider) + + # We either don't have a pending OIDC provider, or we *did* + # have one and we've just converted it. Either way, look for a full provider + # to actually do the macaroon minting with. + provider = oidc_service.find_provider(claims, pending=False) if not provider: return _invalid( errors=[
diff --git a/tests/common/db/oidc.py b/tests/common/db/oidc.py --- a/tests/common/db/oidc.py +++ b/tests/common/db/oidc.py @@ -12,7 +12,7 @@ import factory -from warehouse.oidc.models import GitHubProvider +from warehouse.oidc.models import GitHubProvider, PendingGitHubProvider from .base import WarehouseFactory @@ -22,7 +22,19 @@ class Meta: model = GitHubProvider id = factory.Faker("uuid4", cast_to=None) - repository_name = "foo" - repository_owner = "bar" - repository_owner_id = 123 + repository_name = factory.Faker("pystr", max_chars=12) + repository_owner = factory.Faker("pystr", max_chars=12) + repository_owner_id = factory.Faker("pystr", max_chars=12) + workflow_filename = "example.yml" + + +class PendingGitHubProviderFactory(WarehouseFactory): + class Meta: + model = PendingGitHubProvider + + id = factory.Faker("uuid4", cast_to=None) + project_name = "fake-nonexistent-project" + repository_name = factory.Faker("pystr", max_chars=12) + repository_owner = factory.Faker("pystr", max_chars=12) + repository_owner_id = factory.Faker("pystr", max_chars=12) workflow_filename = "example.yml" diff --git a/tests/conftest.py b/tests/conftest.py --- a/tests/conftest.py +++ b/tests/conftest.py @@ -46,7 +46,11 @@ from warehouse.email import services as email_services from warehouse.email.interfaces import IEmailSender from warehouse.macaroons import services as macaroon_services +from warehouse.macaroons.interfaces import IMacaroonService from warehouse.metrics import IMetricsService +from warehouse.oidc import services as oidc_services +from warehouse.oidc.interfaces import IOIDCProviderService +from warehouse.oidc.utils import GITHUB_OIDC_ISSUER_URL from warehouse.organizations import services as organization_services from warehouse.organizations.interfaces import IOrganizationService from warehouse.packaging import services as packaging_services @@ -136,6 +140,8 @@ def pyramid_services( token_service, user_service, project_service, + oidc_service, + macaroon_service, ): services = _Services() @@ -149,6 +155,8 @@ def pyramid_services( services.register_service(token_service, ITokenService, None, name="email") services.register_service(user_service, IUserService, None, name="") services.register_service(project_service, IProjectService, None, name="") + services.register_service(oidc_service, IOIDCProviderService, None, name="github") + services.register_service(macaroon_service, IMacaroonService, None, name="") return services @@ -318,6 +326,18 @@ def project_service(db_session, remote_addr): return packaging_services.ProjectService(db_session, remote_addr) [email protected] +def oidc_service(db_session): + # We pretend to be a verifier for GitHub OIDC JWTs, for the purposes of testing. + return oidc_services.NullOIDCProviderService( + db_session, + pretend.stub(), + GITHUB_OIDC_ISSUER_URL, + pretend.stub(), + pretend.stub(), + ) + + @pytest.fixture def macaroon_service(db_session): return macaroon_services.DatabaseMacaroonService(db_session) diff --git a/tests/unit/email/test_init.py b/tests/unit/email/test_init.py --- a/tests/unit/email/test_init.py +++ b/tests/unit/email/test_init.py @@ -5464,6 +5464,92 @@ def test_recovery_code_emails( class TestOIDCProviderEmails: + @pytest.mark.parametrize( + "fn, template_name", + [ + ( + email.send_pending_oidc_provider_invalidated_email, + "pending-oidc-provider-invalidated", + ), + ], + ) + def test_pending_oidc_provider_emails( + self, pyramid_request, pyramid_config, monkeypatch, fn, template_name + ): + stub_user = pretend.stub( + id="id", + username="username", + name="", + email="[email protected]", + primary_email=pretend.stub(email="[email protected]", verified=True), + ) + subject_renderer = pyramid_config.testing_add_renderer( + f"email/{ template_name }/subject.txt" + ) + subject_renderer.string_response = "Email Subject" + body_renderer = pyramid_config.testing_add_renderer( + f"email/{ template_name }/body.txt" + ) + body_renderer.string_response = "Email Body" + html_renderer = pyramid_config.testing_add_renderer( + f"email/{ template_name }/body.html" + ) + html_renderer.string_response = "Email HTML Body" + + send_email = pretend.stub( + delay=pretend.call_recorder(lambda *args, **kwargs: None) + ) + pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email) + monkeypatch.setattr(email, "send_email", send_email) + + pyramid_request.db = pretend.stub( + query=lambda a: pretend.stub( + filter=lambda *a: pretend.stub( + one=lambda: pretend.stub(user_id=stub_user.id) + ) + ), + ) + pyramid_request.user = stub_user + pyramid_request.registry.settings = {"mail.sender": "[email protected]"} + + project_name = "test_project" + result = fn( + pyramid_request, + stub_user, + project_name=project_name, + ) + + assert result == { + "project_name": project_name, + } + subject_renderer.assert_() + body_renderer.assert_(project_name=project_name) + html_renderer.assert_(project_name=project_name) + assert pyramid_request.task.calls == [pretend.call(send_email)] + assert send_email.delay.calls == [ + pretend.call( + f"{stub_user.username} <{stub_user.email}>", + { + "subject": "Email Subject", + "body_text": "Email Body", + "body_html": ( + "<html>\n<head></head>\n" + "<body><p>Email HTML Body</p></body>\n</html>\n" + ), + }, + { + "tag": "account:email:sent", + "user_id": stub_user.id, + "additional": { + "from_": "[email protected]", + "to": stub_user.email, + "subject": "Email Subject", + "redact_ip": False, + }, + }, + ) + ] + @pytest.mark.parametrize( "fn, template_name", [ diff --git a/tests/unit/oidc/test_models.py b/tests/unit/oidc/test_models.py --- a/tests/unit/oidc/test_models.py +++ b/tests/unit/oidc/test_models.py @@ -13,6 +13,7 @@ import pretend import pytest +from tests.common.db.oidc import GitHubProviderFactory, PendingGitHubProviderFactory from warehouse.oidc import models @@ -118,10 +119,13 @@ def test_github_provider_missing_claims(self, monkeypatch): claim_name: "fake" for claim_name in models.GitHubProvider.all_known_claims() } - signed_claims.pop("repository") + # Pop the first signed claim, so that it's the first one to fail. + signed_claims.pop("sub") + assert "sub" not in signed_claims + assert provider.__verifiable_claims__ assert not provider.verify_claims(signed_claims=signed_claims) assert sentry_sdk.capture_message.calls == [ - pretend.call("JWT for GitHubProvider is missing claim: repository") + pretend.call("JWT for GitHubProvider is missing claim: sub") ] def test_github_provider_verifies(self, monkeypatch): @@ -203,3 +207,44 @@ def test_github_provider_job_workflow_ref(self, claim, ref, valid): check = models.GitHubProvider.__verifiable_claims__["job_workflow_ref"] assert check(provider.job_workflow_ref, claim, {"ref": ref}) is valid + + +class TestPendingGitHubProvider: + def test_reify_does_not_exist_yet(self, db_request): + pending_provider = PendingGitHubProviderFactory.create() + assert ( + db_request.db.query(models.GitHubProvider) + .filter_by( + repository_name=pending_provider.repository_name, + repository_owner=pending_provider.repository_owner, + repository_owner_id=pending_provider.repository_owner_id, + workflow_filename=pending_provider.workflow_filename, + ) + .one_or_none() + is None + ) + provider = pending_provider.reify(db_request.db) + + # If an OIDC provider for this pending provider does not already exist, + # a new one is created and the pending provider is marked for deletion. + assert isinstance(provider, models.GitHubProvider) + assert pending_provider in db_request.db.deleted + assert provider.repository_name == pending_provider.repository_name + assert provider.repository_owner == pending_provider.repository_owner + assert provider.repository_owner_id == pending_provider.repository_owner_id + assert provider.workflow_filename == pending_provider.workflow_filename + + def test_reify_already_exists(self, db_request): + existing_provider = GitHubProviderFactory.create() + pending_provider = PendingGitHubProviderFactory.create( + repository_name=existing_provider.repository_name, + repository_owner=existing_provider.repository_owner, + repository_owner_id=existing_provider.repository_owner_id, + workflow_filename=existing_provider.workflow_filename, + ) + provider = pending_provider.reify(db_request.db) + + # If an OIDC provider for this pending provider already exists, + # it is returned and the pending provider is marked for deletion. + assert existing_provider == provider + assert pending_provider in db_request.db.deleted diff --git a/tests/unit/oidc/test_services.py b/tests/unit/oidc/test_services.py --- a/tests/unit/oidc/test_services.py +++ b/tests/unit/oidc/test_services.py @@ -16,6 +16,7 @@ from jwt import PyJWK, PyJWTError from zope.interface.verify import verifyClass +from tests.common.db.oidc import GitHubProviderFactory, PendingGitHubProviderFactory from warehouse.oidc import interfaces, services @@ -136,7 +137,7 @@ def test_find_provider(self, monkeypatch): token = pretend.stub() provider = pretend.stub(verify_claims=pretend.call_recorder(lambda c: True)) - find_provider_by_issuer = pretend.call_recorder(lambda *a: provider) + find_provider_by_issuer = pretend.call_recorder(lambda *a, **kw: provider) monkeypatch.setattr( services, "find_provider_by_issuer", find_provider_by_issuer ) @@ -164,7 +165,7 @@ def test_find_provider_issuer_lookup_fails(self, monkeypatch): ), ) - find_provider_by_issuer = pretend.call_recorder(lambda *a: None) + find_provider_by_issuer = pretend.call_recorder(lambda *a, **kw: None) monkeypatch.setattr( services, "find_provider_by_issuer", find_provider_by_issuer ) @@ -194,7 +195,7 @@ def test_find_provider_verify_claims_fails(self, monkeypatch): ) provider = pretend.stub(verify_claims=pretend.call_recorder(lambda c: False)) - find_provider_by_issuer = pretend.call_recorder(lambda *a: provider) + find_provider_by_issuer = pretend.call_recorder(lambda *a, **kw: provider) monkeypatch.setattr( services, "find_provider_by_issuer", find_provider_by_issuer ) @@ -592,6 +593,27 @@ def test_get_key_for_token(self, monkeypatch): assert service._get_key.calls == [pretend.call("fake-key-id")] assert services.jwt.get_unverified_header.calls == [pretend.call(token)] + def test_reify_provider(self, monkeypatch): + service = services.OIDCProviderService( + session=pretend.stub(), + provider="example", + issuer_url="https://example.com", + cache_url="rediss://fake.example.com", + metrics=pretend.stub(), + ) + + provider = pretend.stub() + pending_provider = pretend.stub( + reify=pretend.call_recorder(lambda *a: provider) + ) + project = pretend.stub( + oidc_providers=[], + ) + + assert service.reify_pending_provider(pending_provider, project) == provider + assert pending_provider.reify.calls == [pretend.call(service.db)] + assert project.oidc_providers == [provider] + class TestNullOIDCProviderService: def test_interface_matches(self): @@ -706,9 +728,109 @@ def test_find_provider(self, monkeypatch): ) provider = pretend.stub(verify_claims=pretend.call_recorder(lambda c: True)) - find_provider_by_issuer = pretend.call_recorder(lambda *a: provider) + find_provider_by_issuer = pretend.call_recorder(lambda *a, **kw: provider) monkeypatch.setattr( services, "find_provider_by_issuer", find_provider_by_issuer ) assert service.find_provider(claims) == provider + + def test_find_provider_full_pending(self, oidc_service): + pending_provider = PendingGitHubProviderFactory.create( + project_name="does-not-exist", + repository_name="bar", + repository_owner="foo", + repository_owner_id="123", + workflow_filename="example.yml", + ) + + claims = { + "jti": "6e67b1cb-2b8d-4be5-91cb-757edb2ec970", + "sub": "repo:foo/bar", + "aud": "pypi", + "ref": "fake", + "sha": "fake", + "repository": "foo/bar", + "repository_owner": "foo", + "repository_owner_id": "123", + "run_id": "fake", + "run_number": "fake", + "run_attempt": "1", + "repository_id": "fake", + "actor_id": "fake", + "actor": "foo", + "workflow": "fake", + "head_ref": "fake", + "base_ref": "fake", + "event_name": "fake", + "ref_type": "fake", + "environment": "fake", + "job_workflow_ref": "foo/bar/.github/workflows/example.yml@fake", + "iss": "https://token.actions.githubusercontent.com", + "nbf": 1650663265, + "exp": 1650664165, + "iat": 1650663865, + } + + expected_pending_provider = oidc_service.find_provider(claims, pending=True) + assert expected_pending_provider == pending_provider + + def test_find_provider_full(self, oidc_service): + provider = GitHubProviderFactory.create( + repository_name="bar", + repository_owner="foo", + repository_owner_id="123", + workflow_filename="example.yml", + ) + + claims = { + "jti": "6e67b1cb-2b8d-4be5-91cb-757edb2ec970", + "sub": "repo:foo/bar", + "aud": "pypi", + "ref": "fake", + "sha": "fake", + "repository": "foo/bar", + "repository_owner": "foo", + "repository_owner_id": "123", + "run_id": "fake", + "run_number": "fake", + "run_attempt": "1", + "repository_id": "fake", + "actor_id": "fake", + "actor": "foo", + "workflow": "fake", + "head_ref": "fake", + "base_ref": "fake", + "event_name": "fake", + "ref_type": "fake", + "environment": "fake", + "job_workflow_ref": "foo/bar/.github/workflows/example.yml@fake", + "iss": "https://token.actions.githubusercontent.com", + "nbf": 1650663265, + "exp": 1650664165, + "iat": 1650663865, + } + + expected_provider = oidc_service.find_provider(claims, pending=False) + assert expected_provider == provider + + def test_reify_provider(self): + service = services.NullOIDCProviderService( + session=pretend.stub(), + provider="example", + issuer_url="https://example.com", + cache_url="rediss://fake.example.com", + metrics=pretend.stub(), + ) + + provider = pretend.stub() + pending_provider = pretend.stub( + reify=pretend.call_recorder(lambda *a: provider) + ) + project = pretend.stub( + oidc_providers=[], + ) + + assert service.reify_pending_provider(pending_provider, project) == provider + assert pending_provider.reify.calls == [pretend.call(service.db)] + assert project.oidc_providers == [provider] diff --git a/tests/unit/oidc/test_views.py b/tests/unit/oidc/test_views.py --- a/tests/unit/oidc/test_views.py +++ b/tests/unit/oidc/test_views.py @@ -10,9 +10,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json + import pretend import pytest +from tests.common.db.accounts import UserFactory +from tests.common.db.oidc import PendingGitHubProviderFactory +from tests.common.db.packaging import ProjectFactory from warehouse.events.tags import EventTag from warehouse.macaroons import caveats from warehouse.macaroons.interfaces import IMacaroonService @@ -40,26 +45,6 @@ def test_mint_token_from_oidc_not_enabled(registry, admin): } -def test_mint_token_from_oidc_invalid_json(): - class Request: - def __init__(self): - self.response = pretend.stub(status=None) - self.registry = pretend.stub(settings={"warehouse.oidc.enabled": True}) - self.flags = pretend.stub(enabled=lambda *a: False) - - @property - def json_body(self): - raise ValueError - - req = Request() - resp = views.mint_token_from_oidc(req) - assert req.response.status == 422 - assert resp == { - "message": "Token request failed", - "errors": [{"code": "invalid-json", "description": "missing JSON body"}], - } - - @pytest.mark.parametrize( "body", [ @@ -69,6 +54,14 @@ def json_body(self): 12345, 3.14, None, + {}, + {"token": None}, + {"wrongkey": ""}, + {"token": 3.14}, + {"token": 0}, + {"token": [""]}, + {"token": []}, + {"token": {}}, ], ) def test_mint_token_from_oidc_invalid_payload(body): @@ -79,69 +72,19 @@ def __init__(self): self.flags = pretend.stub(enabled=lambda *a: False) @property - def json_body(self): - return body + def body(self): + return json.dumps(body) req = Request() resp = views.mint_token_from_oidc(req) - assert req.response.status == 422 - assert resp == { - "message": "Token request failed", - "errors": [ - { - "code": "invalid-payload", - "description": "payload is not a JSON dictionary", - } - ], - } - [email protected]( - "body", - [ - {}, - {"token": None}, - {"wrongkey": ""}, - ], -) -def test_mint_token_from_oidc_missing_token(body): - request = pretend.stub( - response=pretend.stub(status=None), - json_body=body, - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - flags=pretend.stub(enabled=lambda *a: False), - ) - resp = views.mint_token_from_oidc(request) - assert request.response.status == 422 - assert resp == { - "message": "Token request failed", - "errors": [{"code": "invalid-token", "description": "token is missing"}], - } - - [email protected]( - "body", - [ - {"token": 3.14}, - {"token": 0}, - {"token": [""]}, - {"token": []}, - {"token": {}}, - ], -) -def test_mint_token_from_oidc_nonstring_token(body): - request = pretend.stub( - response=pretend.stub(status=None), - json_body=body, - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - flags=pretend.stub(enabled=lambda *a: False), - ) - resp = views.mint_token_from_oidc(request) - assert request.response.status == 422 - assert resp == { - "message": "Token request failed", - "errors": [{"code": "invalid-token", "description": "token is not a string"}], - } + assert req.response.status == 422 + assert resp["message"] == "Token request failed" + assert isinstance(resp["errors"], list) + for err in resp["errors"]: + assert isinstance(err, dict) + assert err["code"] == "invalid-payload" + assert isinstance(err["description"], str) def test_mint_token_from_oidc_provider_verify_jwt_signature_fails(): @@ -150,7 +93,7 @@ def test_mint_token_from_oidc_provider_verify_jwt_signature_fails(): ) request = pretend.stub( response=pretend.stub(status=None), - json_body={"token": "faketoken"}, + body=json.dumps({"token": "faketoken"}), find_service=pretend.call_recorder(lambda cls, **kw: oidc_service), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), flags=pretend.stub(enabled=lambda *a: False), @@ -178,11 +121,11 @@ def test_mint_token_from_oidc_provider_lookup_fails(): claims = pretend.stub() oidc_service = pretend.stub( verify_jwt_signature=pretend.call_recorder(lambda token: claims), - find_provider=pretend.call_recorder(lambda claims: None), + find_provider=pretend.call_recorder(lambda claims, **kw: None), ) request = pretend.stub( response=pretend.stub(status=None), - json_body={"token": "faketoken"}, + body=json.dumps({"token": "faketoken"}), find_service=pretend.call_recorder(lambda cls, **kw: oidc_service), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), flags=pretend.stub(enabled=lambda *a: False), @@ -204,10 +147,163 @@ def test_mint_token_from_oidc_provider_lookup_fails(): pretend.call(IOIDCProviderService, name="github") ] assert oidc_service.verify_jwt_signature.calls == [pretend.call("faketoken")] - assert oidc_service.find_provider.calls == [pretend.call(claims)] + assert oidc_service.find_provider.calls == [ + pretend.call(claims, pending=True), + pretend.call(claims, pending=False), + ] + + +def test_mint_token_from_oidc_pending_provider_project_already_exists(db_request): + project = ProjectFactory.create() + pending_provider = PendingGitHubProviderFactory.create(project_name=project.name) + + db_request.registry.settings = {"warehouse.oidc.enabled": True} + db_request.flags.enabled = lambda f: False + db_request.body = json.dumps({"token": "faketoken"}) + claims = pretend.stub() + oidc_service = pretend.stub( + verify_jwt_signature=pretend.call_recorder(lambda token: claims), + find_provider=pretend.call_recorder( + lambda claims, pending=False: pending_provider + ), + ) + db_request.find_service = pretend.call_recorder(lambda *a, **kw: oidc_service) -def test_mint_token_from_oidc_ok(monkeypatch): + resp = views.mint_token_from_oidc(db_request) + assert db_request.response.status_code == 422 + assert resp == { + "message": "Token request failed", + "errors": [ + { + "code": "invalid-pending-provider", + "description": "valid token, but project already exists", + } + ], + } + + assert oidc_service.verify_jwt_signature.calls == [pretend.call("faketoken")] + assert oidc_service.find_provider.calls == [pretend.call(claims, pending=True)] + assert db_request.find_service.calls == [ + pretend.call(IOIDCProviderService, name="github") + ] + + +def test_mint_token_from_oidc_pending_provider_ok( + db_request, +): + user = UserFactory.create() + PendingGitHubProviderFactory.create( + project_name="does-not-exist", + added_by=user, + repository_name="bar", + repository_owner="foo", + repository_owner_id="123", + workflow_filename="example.yml", + ) + + db_request.registry.settings = {"warehouse.oidc.enabled": True} + db_request.flags.enabled = lambda f: False + db_request.body = json.dumps( + { + "token": ( + "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJqdGkiOiI2ZTY3YjFjYi0yYjhkLTRi" + "ZTUtOTFjYi03NTdlZGIyZWM5NzAiLCJzdWIiOiJyZXBvOmZvby9iYXIiLCJhdWQiOiJwe" + "XBpIiwicmVmIjoiZmFrZSIsInNoYSI6ImZha2UiLCJyZXBvc2l0b3J5IjoiZm9vL2Jhci" + "IsInJlcG9zaXRvcnlfb3duZXIiOiJmb28iLCJyZXBvc2l0b3J5X293bmVyX2lkIjoiMTI" + "zIiwicnVuX2lkIjoiZmFrZSIsInJ1bl9udW1iZXIiOiJmYWtlIiwicnVuX2F0dGVtcHQi" + "OiIxIiwicmVwb3NpdG9yeV9pZCI6ImZha2UiLCJhY3Rvcl9pZCI6ImZha2UiLCJhY3Rvc" + "iI6ImZvbyIsIndvcmtmbG93IjoiZmFrZSIsImhlYWRfcmVmIjoiZmFrZSIsImJhc2Vfcm" + "VmIjoiZmFrZSIsImV2ZW50X25hbWUiOiJmYWtlIiwicmVmX3R5cGUiOiJmYWtlIiwiZW5" + "2aXJvbm1lbnQiOiJmYWtlIiwiam9iX3dvcmtmbG93X3JlZiI6ImZvby9iYXIvLmdpdGh1" + "Yi93b3JrZmxvd3MvZXhhbXBsZS55bWxAZmFrZSIsImlzcyI6Imh0dHBzOi8vdG9rZW4uY" + "WN0aW9ucy5naXRodWJ1c2VyY29udGVudC5jb20iLCJuYmYiOjE2NTA2NjMyNjUsImV4cC" + "I6MTY1MDY2NDE2NSwiaWF0IjoxNjUwNjYzODY1fQ.f-FMv5FF5sdxAWeUilYDt9NoE7Et" + "0vbdNhK32c2oC-E" + ) + } + ) + db_request.remote_addr = "0.0.0.0" + + resp = views.mint_token_from_oidc(db_request) + assert resp["success"] + assert resp["token"].startswith("pypi-") + + +def test_mint_token_from_pending_oidc_provider_invalidates_others( + monkeypatch, db_request +): + time = pretend.stub(time=pretend.call_recorder(lambda: 0)) + monkeypatch.setattr(views, "time", time) + + user = UserFactory.create() + PendingGitHubProviderFactory.create( + project_name="does-not-exist", + added_by=user, + repository_name="bar", + repository_owner="foo", + repository_owner_id="123", + workflow_filename="example.yml", + ) + + # Create some other pending providers for the same nonexistent project, + # each of which should be invalidated. Invalidations occur based on the + # normalized project name. + emailed_users = [] + for project_name in ["does_not_exist", "does-not-exist", "dOeS-NoT-ExISt"]: + user = UserFactory.create() + PendingGitHubProviderFactory.create( + project_name=project_name, + added_by=user, + ) + emailed_users.append(user) + + send_pending_oidc_provider_invalidated_email = pretend.call_recorder( + lambda *a, **kw: None + ) + monkeypatch.setattr( + views, + "send_pending_oidc_provider_invalidated_email", + send_pending_oidc_provider_invalidated_email, + ) + + db_request.registry.settings = {"warehouse.oidc.enabled": True} + db_request.flags.enabled = lambda f: False + db_request.body = json.dumps( + { + "token": ( + "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJqdGkiOiI2ZTY3YjFjYi0yYjhkLTRi" + "ZTUtOTFjYi03NTdlZGIyZWM5NzAiLCJzdWIiOiJyZXBvOmZvby9iYXIiLCJhdWQiOiJwe" + "XBpIiwicmVmIjoiZmFrZSIsInNoYSI6ImZha2UiLCJyZXBvc2l0b3J5IjoiZm9vL2Jhci" + "IsInJlcG9zaXRvcnlfb3duZXIiOiJmb28iLCJyZXBvc2l0b3J5X293bmVyX2lkIjoiMTI" + "zIiwicnVuX2lkIjoiZmFrZSIsInJ1bl9udW1iZXIiOiJmYWtlIiwicnVuX2F0dGVtcHQi" + "OiIxIiwicmVwb3NpdG9yeV9pZCI6ImZha2UiLCJhY3Rvcl9pZCI6ImZha2UiLCJhY3Rvc" + "iI6ImZvbyIsIndvcmtmbG93IjoiZmFrZSIsImhlYWRfcmVmIjoiZmFrZSIsImJhc2Vfcm" + "VmIjoiZmFrZSIsImV2ZW50X25hbWUiOiJmYWtlIiwicmVmX3R5cGUiOiJmYWtlIiwiZW5" + "2aXJvbm1lbnQiOiJmYWtlIiwiam9iX3dvcmtmbG93X3JlZiI6ImZvby9iYXIvLmdpdGh1" + "Yi93b3JrZmxvd3MvZXhhbXBsZS55bWxAZmFrZSIsImlzcyI6Imh0dHBzOi8vdG9rZW4uY" + "WN0aW9ucy5naXRodWJ1c2VyY29udGVudC5jb20iLCJuYmYiOjE2NTA2NjMyNjUsImV4cC" + "I6MTY1MDY2NDE2NSwiaWF0IjoxNjUwNjYzODY1fQ.f-FMv5FF5sdxAWeUilYDt9NoE7Et" + "0vbdNhK32c2oC-E" + ) + } + ) + db_request.remote_addr = "0.0.0.0" + + resp = views.mint_token_from_oidc(db_request) + assert resp["success"] + assert resp["token"].startswith("pypi-") + + # We should have sent one invalidation email for each pending provider that + # was invalidated by the minting operation. + assert send_pending_oidc_provider_invalidated_email.calls == [ + pretend.call(db_request, emailed_users[0], project_name="does_not_exist"), + pretend.call(db_request, emailed_users[1], project_name="does-not-exist"), + pretend.call(db_request, emailed_users[2], project_name="dOeS-NoT-ExISt"), + ] + + +def test_mint_token_from_oidc_no_pending_provider_ok(monkeypatch): time = pretend.stub(time=pretend.call_recorder(lambda: 0)) monkeypatch.setattr(views, "time", time) @@ -227,7 +323,9 @@ def test_mint_token_from_oidc_ok(monkeypatch): claims = pretend.stub() oidc_service = pretend.stub( verify_jwt_signature=pretend.call_recorder(lambda token: claims), - find_provider=pretend.call_recorder(lambda claims: provider), + find_provider=pretend.call_recorder( + lambda claims, pending=False: provider if not pending else None + ), ) db_macaroon = pretend.stub(description="fakemacaroon") @@ -246,7 +344,7 @@ def find_service(iface, **kw): request = pretend.stub( response=pretend.stub(status=None), - json_body={"token": "faketoken"}, + body=json.dumps({"token": "faketoken"}), find_service=find_service, domain="fakedomain", remote_addr="0.0.0.0", @@ -261,7 +359,10 @@ def find_service(iface, **kw): } assert oidc_service.verify_jwt_signature.calls == [pretend.call("faketoken")] - assert oidc_service.find_provider.calls == [pretend.call(claims)] + assert oidc_service.find_provider.calls == [ + pretend.call(claims, pending=True), + pretend.call(claims, pending=False), + ] assert macaroon_service.create_macaroon.calls == [ pretend.call( "fakedomain",
"De novo" project creation via API tokens/OIDC There's currently a little bit of friction around bootstrapping API tokens on a new PyPI project: 1. Prepare a package for a new project 2. Upload the first version of the project to PyPI using a user-scoped API token (not ideal for long-term actions) or user credentials (shouldn't be used in CI) 3. Log into PyPI and create a project-scoped token for the new project 4. Use the new project-scoped token for all future releases A similar problem will happen with OIDC: we currently require a project to exist before an OIDC publisher can be registered against it, meaning that a user will have to upload at least one release with a user-scoped token or credentials before they can switch to a credential-less workflow. Some potential solutions: 1. Allow users to pre-create projects on PyPI before uploading any releases. - Pros: Solves the "de novo" problem above. - Cons: Malicious users can abuse this as a low-friction way to squat names. 2. Allow users to pre-register OIDC publishers; on use, the project would be created for the first time. - Pros: Solves the "de novo" problem *and* minimizes squatting potential. - Cons: Complicates the OIDC data mode: pre-registering a publisher means either pre-creating the project (same problem as (1)) or creating a "symbolic" link to a nonexistent project (another column). We'd have to be careful about invalidating a publisher if someone else races/beats it to creating a project, so that someone can't quietly takeover a project's releases via OIDC. 3. Something else?
From discussions, I'm currently leaning towards solution (1). Here's the proposed flow: 1. An already registered PyPI user who meets certain requirements can "pre-register" a project name via the Warehouse web UI * Some potential requirements: the account must be over a week old, must have a verified email, must have 2FA, etc. 2. Pre-registering is done in a new view + form, where the user specifies the name they'd like to pre-register. 3. If the name is available and passes PyPI's normal name restriction checks (e.g. for typosquatting) an empty project is created, with the user in the Owner role. 4. Simultaneously, a timer is begun: if the user does not upload a release to the project within the reservation timespan, the project is deleted entirely and released for anyone to use. This has the added effect of invalidating any tokens that the user might have minted (but not used) for the project, since the project ID (and user role) will no longer match. * Additionally, a user who allows the reservation period to expire *could* be temporarily locked out of other reservations for a short period, like 72 hours. Additional considerations: * No user should be able to pre-register more than a small handful of project names at once. 5 is probably a reasonable restriction, since it'll allow people uploading a handful of new projects to do them all at once. * We could consider adding a "cooldown" period (with exponential backoff) for expired registration periods, to prevent people from squatting a name by cycling it between two accounts. This may be more work than it's worth, however, since namesquatting on PyPI is already easier than that. Working on this this week. Just some notes: We'll probably want the button for this under the project management view (`/manage/projects`), i.e. somewhere on the top of this page: <img width="1147" alt="Screenshot 2022-11-03 at 5 26 42 PM" src="https://user-images.githubusercontent.com/3059210/199837501-da0564e6-704f-40b4-90ea-a23ca4e058e0.png"> ...probably right below "Your projects," with language like "Reserve a project name." The button will be inactive if the user isn't eligible to reserve a project name (per the restrictions above). Clicking that button should, in turn, produce a form modal (not a separate page, since it's pretty much a single input) that includes (1) the name to reserve, and (2) a confirmation checkbox ("I understand that this is a temporary reservation, and that I need to upload a package in the next XX hours to make it permanent"), plus a submit button. Pressing the button should produce a success or error flash, as appropriate. Current sketch: ![image](https://user-images.githubusercontent.com/3059210/200940872-1a573c2d-9f23-4665-8aa0-9b4f502705ff.png) As-is, I've dropped the plan for the modal: it doesn't add anything to the flow, and makes it less accessible. Hi @woodruffw, it's great that this effort is getting revived. Have you seen the previous discussion and my previous suggestions @ https://github.com/pypi/warehouse/issues/6378#issuecomment-576080170 + https://github.com/pypi/warehouse/issues/6378#issuecomment-802097436. It seems that people in the discussion agreed that it was a good idea, and it seems to partially align with what you're describing here. So I'm mostly just checking to see if you know about it and whether you're planning to incorporate those ideas. I did, thanks! And thanks for cross-referencing as well. I think the ultimate plan here is close to what you commented in that issue: we aren't planning single-use tokens, and instead we're going to make "reservations" part of the Warehouse UI. I'll have some initial work pushed up soon to demonstrate the rough idea there. Demonstrating the "reserved" chip on a project: ![Screenshot 2022-11-10 at 5 14 30 PM](https://user-images.githubusercontent.com/3059210/201217315-81f42d10-d10d-4b8e-82d5-d1219dcd7d19.png) @woodruffw that sounds roughly what I had in mind originally (except for the expiration part). Do you think the `Reserve` button could also automatically make a token so that it's not a two-step process but a single-step one? I think we want to limit this to use by OIDC publishers only (see #11272). > I think we want to limit this to use by OIDC publishers only (see #11272). I might not be understanding, but I think there's a chicken-and-egg problem with restricting this to OIDC publishers: we don't know which publishers have been registered against with PyPI projects until the project already exists, so we can't temporarily register the project itself via OIDC. The exception to that would be some kind of "TOFU" scheme, where the user could submit their OIDC token to an endpoint with the project name they'd like to reserve, which would then reserve it and produce a scoped API token. I *think* that would work, but it would need to be slightly different than the endpoint we've already built in #11272 (since it would need to specify the project being created). But even then we'd need to figure out how to add the owner role to the temporary project, since we don't know (ahead of time) which PyPI users are associated with which OIDC publishers. I'm thinking that there would be an 'add a publisher' view that has a free-form field for the project name, limited to projects that don't exist -- so `OIDCPublisher`s would be slightly less coupled to the `Project` than they currently are. The user creates the publisher, which doesn't necessarily reserve the project name, but lives in a 'pending' state until it enables them to publish successfully, creating a strong relationship between the `OIDCPublisher` and `Project` like we have now. As a result, we don't need 'reservations' or expirations, because if another user (or publisher) successfully publishes it before they do, we can just deactivate/disable all other publishers that are in a 'pending' state for that project name. So, _sort of_ TOFU, but more like TOFU with a headsup? I think that scheme makes sense! I'll need to think some more about how to shoehorn this into the existing `user <-> projects <-> providers` relationship. Currently every `OIDCProvider` has a many-many `Projects` relationship, which might be a little cumbersome to refactor. Some things I'm not sure about yet: * How should we handle multiple users adding the same publisher for different uncreated project names? For example, users Alice and Bob might both register `release.yml @ foo/bar`, but Alice registers it for `cool-project-1` while Bob registers it for `cool-project-2`. Should we honor both registrations, or treat them as first-come-first serve on just the uniqueness of the OIDC token's claims? * How do we track which user should ultimately get added to the project? We know the user(s) doing the reservation at the "add a publisher" stage, but when we're consuming the OIDC token all we know is what's in the token. This might require us to add some kind of `User` relationship for each `OIDCProvider`, which again complicates the data model (since the point of OIDC providers is to be entirely independent of user identities). Some random ideas (not fully fleshed out): * We could have a `PendingOIDCProvider` model hierarchy, roughly mirroring the existing `OIDCProvider` hierarchy. This would have a project *name* instead of a many-many project relationship, and we could "reify" the pending provider into a full `OIDCProvider` once a compatible OIDC token is seen. * Pros: Minimally invasive, requires minimal refactoring of the current OIDC handling * Cons: Structurally complex, requires significant duplication against the existing `OIDCProvider` hierarchy * We could add some kind of `pending_creation: str | None` field to `OIDCProvider`, along with a constraint of `pending_creation XOR projects`. `pending_creation` would then be the name of the project we'll create once a compatible OIDC token is seen. * Pros: Still relatively unintrusive, requires fewer changes than a separate hierarchy. * Cons: Still requires us to track the user somehow, also unclear how to handle "duplicate" requests from multiple users > Should we honor both registrations, or treat them as first-come-first serve on just the uniqueness of the OIDC token's claims? I think they should both be honored? This wouldn't be any different than setting the same publisher for multiple existing projects now. > How do we track which user should ultimately get added to the project? This should be the user that created the publisher -- probably something we want to track anyways. > Some random ideas (not fully fleshed out): I think either approach could work. The `PendingOIDCProvider` seems duplicative, but possibly also safer, as we could never accidentally forget to check the `pending_creation` field and allow a 'pending' publisher to actually publish something they shouldn't have been able to publish, or forget to clean up 'duplicate' publishers, etc. > This should be the user that created the publisher -- probably something we want to track anyways. Makes sense! Yeah, we should definitely be tracking this. > I think either approach could work. The `PendingOIDCProvider` seems duplicative, but possibly also safer, as we could never accidentally forget to check the `pending_creation` field and allow a 'pending' publisher to actually publish something they shouldn't have been able to publish, or forget to clean up 'duplicate' publishers, etc. Yeah, I think I'm leaning towards the `PendingOIDCProvider` approach. Thinking about it more, the duplication here won't be _too_ bad -- we can turn the actual claim verification parts (which are shared) into mixin functionality. Now that the initial models are merged, I'm going to start work on the corresponding routes and views. My current plan: a new "Publishing" link under the "Your account" ToC, which goes to a new view just for pending OIDC publishers. This view will closely mirror the extant `/project/{project}/settings/publishing/` view, but will be at the account layer instead. Edit: Something like this: <img width="375" alt="Screenshot 2022-11-28 at 2 57 43 PM" src="https://user-images.githubusercontent.com/3059210/204369135-be661906-92b0-464a-b682-abac531edca6.png"> Just to fully enumerate the behavior here, this is how API token exchange with a "pending" OIDC provider will work: 1. PyPI's token endpoint receives the OIDC JWT; 2. The JWT's signature is verified; 3. The `PendingOIDCProvider` for the JWT's claims is retrieved; Then, if the pending provider's project does **not** already exist, it is created, a temporary macaroon is minted for it, and the `PendingOIDCProvider` is "reified" into a full-fledged `OIDCProvider`. If the pending provider's project **does** already exist, then the `PendingOIDCProvider` is invalidated, removed, and no temporary macaroon is returned. From discussion with @di, there are some additional constraints we'll need to impose to limit the potential for abuse from "pending" OIDC providers: 1. In addition to the user restrictions described above (needs 2FA, verified email, etc.), we should limit the number of "pending" OIDC providers that any given user should be able to register at once. This will limit any individual user's ability to "grief" other users. 2. Each "pending" OIDC provider's "provider-unique" state needs to remain unique. For GitHub, that means that we need to retain the `UniqueConstraint` on `repository_name, repository_owner, workflow_filename`. This is more a consequence of the data model than a security property: we need to map a OIDC JWT to its pending provider, and then to exactly one project for creation. 3. Finally, as a consequence of (2): we need to present users with a big warning when pending OIDC provider registration fails, *if* the failure reason is that someone else has already registered a pending OIDC provider with the same "provider-unique" state. This will prevent users from accidentally activating a pending OIDC provider that they didn't actually register, giving some other user control over a newly created package. Finally: we should **not** give pending OIDC providers an expiration window: doing so would would allow an attacker to place a reservation as soon as the legitimate user's reservation has expired, leading to a confusing situation where the legitimate user *believes* they're creating a project under their own account but are actually creating it under the attacker's account. Edit: With these constraints, the `PendingOIDCProvider.project_name` does **not** have to be unique: multiple users can attempt to register for the same nonexistent PyPI project name using *different* OIDC providers, and the first one to actually submit an OIDC token is the winner. Edit: These constraints have been added to #12646. #12646 is the the penultimate component of this: it adds the views, routes, forms, etc. needed to register and manage "pending" OIDC providers. I'll do a follow-up PR for the final part, which will be validating OIDC JWTs using those "pending" providers similarly to the current functionality for "normal" OIDC providers. I'll open the final PR to close this out after #12646 is merged. Here's an enumeration of its behavior when `mint-token` is used with an OIDC token linked to a pending OIDC provider: 1. If the pending provider's project name already exists, fail (since the project has already been created, possibly by an unrelated party). * Remove the pending OIDC provider in the process, since it is now invalid. 2. If the pending provider's project name does **not** exist, create the project. * We need to do this up-front in order to create an appropriate Macaroon caveat for its token. * Convert the pending provider into a full provider, and attach it to the newly created project. * Perform the token minting as normal, now that we have both a project and a full OIDC provider. Oh, and some miscellaneous cleanup/usability tasks: 1. The provider pages should be inverted to list all existing providers, followed by the form(s) to add new providers. 2. *All* OIDC providers (both pending and normal) should be listed under `manage/account/publishing`, not just pending ones. 3. Possibly unifying the creation of both provider types under `manage/account/publishing`, with the correct one selected contextually (dropdown for normal providers, freeform text input for pending providers). 4. The admin app should provide a list of pending providers, at minimum, and possibly some functionality for administering them. > If the pending provider's project name already exists, fail (since the project has already been created, possibly by an unrelated party). > Remove the pending OIDC provider in the process, since it is now invalid. This should probably just happen at upload time, when the project is created by the unrelated third party (and we should probably notify the owner of the pending provider) > This should probably just happen at upload time, when the project is created by the unrelated third party (and we should probably notify the owner of the pending provider) Makes sense! > I think we want to limit this to use by OIDC publishers only (see #11272). What about non-OIDC publishers? I think, it'd make sense to still support my original idea for those. Probably as a disconnected effort, though. Status update: starting work on those follow-on items now.
2023-01-31T22:59:39Z
[]
[]
pypi/warehouse
13,060
pypi__warehouse-13060
[ "13053" ]
31422a2e98c350cfe1b714c7f287975ba194b356
diff --git a/warehouse/admin/views/projects.py b/warehouse/admin/views/projects.py --- a/warehouse/admin/views/projects.py +++ b/warehouse/admin/views/projects.py @@ -129,6 +129,7 @@ def project_detail(project, request): "releases": releases, "maintainers": maintainers, "journal": journal, + "oidc_publishers": project.oidc_publishers, "ONE_MB": ONE_MB, "MAX_FILESIZE": MAX_FILESIZE, "ONE_GB": ONE_GB,
diff --git a/tests/unit/admin/views/test_projects.py b/tests/unit/admin/views/test_projects.py --- a/tests/unit/admin/views/test_projects.py +++ b/tests/unit/admin/views/test_projects.py @@ -19,6 +19,7 @@ from pyramid.httpexceptions import HTTPBadRequest, HTTPMovedPermanently, HTTPSeeOther +from tests.common.db.oidc import GitHubPublisherFactory from warehouse.admin.views import projects as views from warehouse.packaging.models import Project, Role from warehouse.search.tasks import reindex_project @@ -84,6 +85,7 @@ def test_gets_project(self, db_request): [RoleFactory(project=project) for _ in range(5)], key=lambda x: (x.role_name, x.user.username), ) + oidc_publishers = [GitHubPublisherFactory(projects=[project]) for _ in range(5)] db_request.matchdict["project_name"] = str(project.normalized_name) result = views.project_detail(project, db_request) @@ -92,6 +94,7 @@ def test_gets_project(self, db_request): "releases": [], "maintainers": roles, "journal": journals[:30], + "oidc_publishers": oidc_publishers, "ONE_MB": views.ONE_MB, "MAX_FILESIZE": views.MAX_FILESIZE, "MAX_PROJECT_SIZE": views.MAX_PROJECT_SIZE,
OIDC publishers should be manageable within the admin app Breakout of #11296: PyPI's admins should be able to administrate OIDC publishers (both full and "pending") from within the admin app/views.
2023-02-22T22:08:49Z
[]
[]
pypi/warehouse
13,075
pypi__warehouse-13075
[ "13071" ]
0aa20a995a2c9a4b4f4d468e14a05c0372d66dce
diff --git a/warehouse/admin/views/checks.py b/warehouse/admin/views/checks.py --- a/warehouse/admin/views/checks.py +++ b/warehouse/admin/views/checks.py @@ -86,8 +86,8 @@ def run_evaluation(request): if check.check_type == MalwareCheckType.EventHook: request.session.flash( - f"Running {check.name} on {EVALUATION_RUN_SIZE} {check.hooked_object.value}s\ -!", + f"Running {check.name} on {EVALUATION_RUN_SIZE} " + f"{check.hooked_object.value}s!", queue="success", ) request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE) diff --git a/warehouse/macaroons/caveats/_core.py b/warehouse/macaroons/caveats/_core.py --- a/warehouse/macaroons/caveats/_core.py +++ b/warehouse/macaroons/caveats/_core.py @@ -15,7 +15,7 @@ import json import typing -from collections.abc import Callable, Mapping, Sequence +from collections.abc import Mapping, Sequence from dataclasses import dataclass from typing import Any, ClassVar, TypeVar @@ -111,7 +111,13 @@ def lookup(self, /, tag: int) -> type[Caveat] | None: _caveat_registry = _CaveatRegistry() -def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]: +# TODO: The return signature detected is `"Union[Type[Dataclass], DataclassProxy]"`, +# but the expectation is `Type[Dataclass]`. +# See https://github.com/pydantic/pydantic/issues/4498 but not exactly the same. +# This might not be corrected in pydantic until 2.0. +# Original signature with type hints: +# def as_caveat(*, tag: int) -> Callable[[type[T]], type[T]]: +def as_caveat(*, tag: int): def deco(cls: type[T]) -> type[T]: _caveat_registry.add(tag, typing.cast(type[Caveat], cls)) return cls
diff --git a/requirements/tests.txt b/requirements/tests.txt --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -106,66 +106,66 @@ charset-normalizer==3.0.1 \ --hash=sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7 \ --hash=sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8 # via requests -coverage==7.1.0 \ - --hash=sha256:04481245ef966fbd24ae9b9e537ce899ae584d521dfbe78f89cad003c38ca2ab \ - --hash=sha256:0c45948f613d5d18c9ec5eaa203ce06a653334cf1bd47c783a12d0dd4fd9c851 \ - --hash=sha256:10188fe543560ec4874f974b5305cd1a8bdcfa885ee00ea3a03733464c4ca265 \ - --hash=sha256:218fe982371ac7387304153ecd51205f14e9d731b34fb0568181abaf7b443ba0 \ - --hash=sha256:29571503c37f2ef2138a306d23e7270687c0efb9cab4bd8038d609b5c2393a3a \ - --hash=sha256:2a60d6513781e87047c3e630b33b4d1e89f39836dac6e069ffee28c4786715f5 \ - --hash=sha256:2bf1d5f2084c3932b56b962a683074a3692bce7cabd3aa023c987a2a8e7612f6 \ - --hash=sha256:3164d31078fa9efe406e198aecd2a02d32a62fecbdef74f76dad6a46c7e48311 \ - --hash=sha256:32df215215f3af2c1617a55dbdfb403b772d463d54d219985ac7cd3bf124cada \ - --hash=sha256:33d1ae9d4079e05ac4cc1ef9e20c648f5afabf1a92adfaf2ccf509c50b85717f \ - --hash=sha256:33ff26d0f6cc3ca8de13d14fde1ff8efe1456b53e3f0273e63cc8b3c84a063d8 \ - --hash=sha256:38da2db80cc505a611938d8624801158e409928b136c8916cd2e203970dde4dc \ - --hash=sha256:3b155caf3760408d1cb903b21e6a97ad4e2bdad43cbc265e3ce0afb8e0057e73 \ - --hash=sha256:3b946bbcd5a8231383450b195cfb58cb01cbe7f8949f5758566b881df4b33baf \ - --hash=sha256:3baf5f126f30781b5e93dbefcc8271cb2491647f8283f20ac54d12161dff080e \ - --hash=sha256:4b14d5e09c656de5038a3f9bfe5228f53439282abcab87317c9f7f1acb280352 \ - --hash=sha256:51b236e764840a6df0661b67e50697aaa0e7d4124ca95e5058fa3d7cbc240b7c \ - --hash=sha256:63ffd21aa133ff48c4dff7adcc46b7ec8b565491bfc371212122dd999812ea1c \ - --hash=sha256:6a43c7823cd7427b4ed763aa7fb63901ca8288591323b58c9cd6ec31ad910f3c \ - --hash=sha256:755e89e32376c850f826c425ece2c35a4fc266c081490eb0a841e7c1cb0d3bda \ - --hash=sha256:7a726d742816cb3a8973c8c9a97539c734b3a309345236cd533c4883dda05b8d \ - --hash=sha256:7c7c0d0827e853315c9bbd43c1162c006dd808dbbe297db7ae66cd17b07830f0 \ - --hash=sha256:7ed681b0f8e8bcbbffa58ba26fcf5dbc8f79e7997595bf071ed5430d8c08d6f3 \ - --hash=sha256:7ee5c9bb51695f80878faaa5598040dd6c9e172ddcf490382e8aedb8ec3fec8d \ - --hash=sha256:8361be1c2c073919500b6601220a6f2f98ea0b6d2fec5014c1d9cfa23dd07038 \ - --hash=sha256:8ae125d1134bf236acba8b83e74c603d1b30e207266121e76484562bc816344c \ - --hash=sha256:9817733f0d3ea91bea80de0f79ef971ae94f81ca52f9b66500c6a2fea8e4b4f8 \ - --hash=sha256:98b85dd86514d889a2e3dd22ab3c18c9d0019e696478391d86708b805f4ea0fa \ - --hash=sha256:9ccb092c9ede70b2517a57382a601619d20981f56f440eae7e4d7eaafd1d1d09 \ - --hash=sha256:9d58885215094ab4a86a6aef044e42994a2bd76a446dc59b352622655ba6621b \ - --hash=sha256:b643cb30821e7570c0aaf54feaf0bfb630b79059f85741843e9dc23f33aaca2c \ - --hash=sha256:bc7c85a150501286f8b56bd8ed3aa4093f4b88fb68c0843d21ff9656f0009d6a \ - --hash=sha256:beeb129cacea34490ffd4d6153af70509aa3cda20fdda2ea1a2be870dfec8d52 \ - --hash=sha256:c31b75ae466c053a98bf26843563b3b3517b8f37da4d47b1c582fdc703112bc3 \ - --hash=sha256:c4e4881fa9e9667afcc742f0c244d9364d197490fbc91d12ac3b5de0bf2df146 \ - --hash=sha256:c5b15ed7644ae4bee0ecf74fee95808dcc34ba6ace87e8dfbf5cb0dc20eab45a \ - --hash=sha256:d12d076582507ea460ea2a89a8c85cb558f83406c8a41dd641d7be9a32e1274f \ - --hash=sha256:d248cd4a92065a4d4543b8331660121b31c4148dd00a691bfb7a5cdc7483cfa4 \ - --hash=sha256:d47dd659a4ee952e90dc56c97d78132573dc5c7b09d61b416a9deef4ebe01a0c \ - --hash=sha256:d4a5a5879a939cb84959d86869132b00176197ca561c664fc21478c1eee60d75 \ - --hash=sha256:da9b41d4539eefd408c46725fb76ecba3a50a3367cafb7dea5f250d0653c1040 \ - --hash=sha256:db61a79c07331e88b9a9974815c075fbd812bc9dbc4dc44b366b5368a2936063 \ - --hash=sha256:ddb726cb861c3117a553f940372a495fe1078249ff5f8a5478c0576c7be12050 \ - --hash=sha256:ded59300d6330be27bc6cf0b74b89ada58069ced87c48eaf9344e5e84b0072f7 \ - --hash=sha256:e2617759031dae1bf183c16cef8fcfb3de7617f394c813fa5e8e46e9b82d4222 \ - --hash=sha256:e5cdbb5cafcedea04924568d990e20ce7f1945a1dd54b560f879ee2d57226912 \ - --hash=sha256:ec8e767f13be637d056f7e07e61d089e555f719b387a7070154ad80a0ff31801 \ - --hash=sha256:ef382417db92ba23dfb5864a3fc9be27ea4894e86620d342a116b243ade5d35d \ - --hash=sha256:f2cba5c6db29ce991029b5e4ac51eb36774458f0a3b8d3137241b32d1bb91f06 \ - --hash=sha256:f5b4198d85a3755d27e64c52f8c95d6333119e49fd001ae5798dac872c95e0f8 \ - --hash=sha256:ffeeb38ee4a80a30a6877c5c4c359e5498eec095878f1581453202bfacc8fbc2 +coverage==7.2.0 \ + --hash=sha256:049806ae2df69468c130f04f0fab4212c46b34ba5590296281423bb1ae379df2 \ + --hash=sha256:08e3dd256b8d3e07bb230896c8c96ec6c5dffbe5a133ba21f8be82b275b900e8 \ + --hash=sha256:0f03c229f1453b936916f68a47b3dfb5e84e7ad48e160488168a5e35115320c8 \ + --hash=sha256:171dd3aa71a49274a7e4fc26f5bc167bfae5a4421a668bc074e21a0522a0af4b \ + --hash=sha256:1856a8c4aa77eb7ca0d42c996d0ca395ecafae658c1432b9da4528c429f2575c \ + --hash=sha256:28563a35ef4a82b5bc5160a01853ce62b9fceee00760e583ffc8acf9e3413753 \ + --hash=sha256:2c15bd09fd5009f3a79c8b3682b52973df29761030b692043f9834fc780947c4 \ + --hash=sha256:2c9fffbc39dc4a6277e1525cab06c161d11ee3995bbc97543dc74fcec33e045b \ + --hash=sha256:2d7daf3da9c7e0ed742b3e6b4de6cc464552e787b8a6449d16517b31bbdaddf5 \ + --hash=sha256:32e6a730fd18b2556716039ab93278ccebbefa1af81e6aa0c8dba888cf659e6e \ + --hash=sha256:34d7211be69b215ad92298a962b2cd5a4ef4b17c7871d85e15d3d1b6dc8d8c96 \ + --hash=sha256:358d3bce1468f298b19a3e35183bdb13c06cdda029643537a0cc37e55e74e8f1 \ + --hash=sha256:3713a8ec18781fda408f0e853bf8c85963e2d3327c99a82a22e5c91baffcb934 \ + --hash=sha256:40785553d68c61e61100262b73f665024fd2bb3c6f0f8e2cd5b13e10e4df027b \ + --hash=sha256:4655ecd813f4ba44857af3e9cffd133ab409774e9d2a7d8fdaf4fdfd2941b789 \ + --hash=sha256:465ea431c3b78a87e32d7d9ea6d081a1003c43a442982375cf2c247a19971961 \ + --hash=sha256:4b8fd32f85b256fc096deeb4872aeb8137474da0c0351236f93cbedc359353d6 \ + --hash=sha256:4c1153a6156715db9d6ae8283480ae67fb67452aa693a56d7dae9ffe8f7a80da \ + --hash=sha256:577a8bc40c01ad88bb9ab1b3a1814f2f860ff5c5099827da2a3cafc5522dadea \ + --hash=sha256:59a427f8a005aa7254074719441acb25ac2c2f60c1f1026d43f846d4254c1c2f \ + --hash=sha256:5e29a64e9586194ea271048bc80c83cdd4587830110d1e07b109e6ff435e5dbc \ + --hash=sha256:74cd60fa00f46f28bd40048d6ca26bd58e9bee61d2b0eb4ec18cea13493c003f \ + --hash=sha256:7efa21611ffc91156e6f053997285c6fe88cfef3fb7533692d0692d2cb30c846 \ + --hash=sha256:7f992b32286c86c38f07a8b5c3fc88384199e82434040a729ec06b067ee0d52c \ + --hash=sha256:875b03d92ac939fbfa8ae74a35b2c468fc4f070f613d5b1692f9980099a3a210 \ + --hash=sha256:88ae5929f0ef668b582fd7cad09b5e7277f50f912183cf969b36e82a1c26e49a \ + --hash=sha256:8d5302eb84c61e758c9d68b8a2f93a398b272073a046d07da83d77b0edc8d76b \ + --hash=sha256:90e7a4cbbb7b1916937d380beb1315b12957b8e895d7d9fb032e2038ac367525 \ + --hash=sha256:9240a0335365c29c968131bdf624bb25a8a653a9c0d8c5dbfcabf80b59c1973c \ + --hash=sha256:932048364ff9c39030c6ba360c31bf4500036d4e15c02a2afc5a76e7623140d4 \ + --hash=sha256:93db11da6e728587e943dff8ae1b739002311f035831b6ecdb15e308224a4247 \ + --hash=sha256:971b49dbf713044c3e5f6451b39f65615d4d1c1d9a19948fa0f41b0245a98765 \ + --hash=sha256:9cc9c41aa5af16d845b53287051340c363dd03b7ef408e45eec3af52be77810d \ + --hash=sha256:9dbb21561b0e04acabe62d2c274f02df0d715e8769485353ddf3cf84727e31ce \ + --hash=sha256:a6ceeab5fca62bca072eba6865a12d881f281c74231d2990f8a398226e1a5d96 \ + --hash=sha256:ad12c74c6ce53a027f5a5ecbac9be20758a41c85425c1bbab7078441794b04ee \ + --hash=sha256:b09dd7bef59448c66e6b490cc3f3c25c14bc85d4e3c193b81a6204be8dd355de \ + --hash=sha256:bd67df6b48db18c10790635060858e2ea4109601e84a1e9bfdd92e898dc7dc79 \ + --hash=sha256:bf9e02bc3dee792b9d145af30db8686f328e781bd212fdef499db5e9e4dd8377 \ + --hash=sha256:bfa065307667f1c6e1f4c3e13f415b0925e34e56441f5fda2c84110a4a1d8bda \ + --hash=sha256:c160e34e388277f10c50dc2c7b5e78abe6d07357d9fe7fcb2f3c156713fd647e \ + --hash=sha256:c243b25051440386179591a8d5a5caff4484f92c980fb6e061b9559da7cc3f64 \ + --hash=sha256:c3c4beddee01c8125a75cde3b71be273995e2e9ec08fbc260dd206b46bb99969 \ + --hash=sha256:cd38140b56538855d3d5722c6d1b752b35237e7ea3f360047ce57f3fade82d98 \ + --hash=sha256:d7f2a7df523791e6a63b40360afa6792a11869651307031160dc10802df9a252 \ + --hash=sha256:da32526326e8da0effb452dc32a21ffad282c485a85a02aeff2393156f69c1c3 \ + --hash=sha256:dc4f9a89c82faf6254d646180b2e3aa4daf5ff75bdb2c296b9f6a6cf547e26a7 \ + --hash=sha256:f0557289260125a6c453ad5673ba79e5b6841d9a20c9e101f758bfbedf928a77 \ + --hash=sha256:f332d61fbff353e2ef0f3130a166f499c3fad3a196e7f7ae72076d41a6bfb259 \ + --hash=sha256:f3ff4205aff999164834792a3949f82435bc7c7655c849226d5836c3242d7451 \ + --hash=sha256:ffa637a2d5883298449a5434b699b22ef98dd8e2ef8a1d9e60fa9cfe79813411 # via -r requirements/tests.in factory-boy==3.2.1 \ --hash=sha256:a98d277b0c047c75eb6e4ab8508a7f81fb03d2cb21986f627913546ef7a2a55e \ --hash=sha256:eb02a7dd1b577ef606b75a253b9818e6f9eaf996d94449c9d5ebb124f90dc795 # via -r requirements/tests.in -faker==16.7.0 \ - --hash=sha256:4b98c197169e083304afd12c1ee1e5101f5c817161c3d690eb318a15b805108d \ - --hash=sha256:c522c78f2d7572724bde05de8571205e9a594f95b57e08e388e0677976ebd400 +faker==17.0.0 \ + --hash=sha256:17cf85aeb0363a3384ccd4c1f52b52ec8f414c7afaab74ae1f4c3e09a06e14de \ + --hash=sha256:21c3c6c45183308151c14f62afe59bf54ace68f663e0180973698ba2a9a3b2c4 # via factory-boy freezegun==1.2.2 \ --hash=sha256:cd22d1ba06941384410cd967d8a99d5ae2442f57dfafeff2fda5de8dc5c05446 \ @@ -179,9 +179,9 @@ iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 # via pytest -mirakuru==2.4.2 \ - --hash=sha256:ec84d4d81b4bca96cb0e598c6b3d198a92f036a0c1223c881482c02a98508226 \ - --hash=sha256:fdb67d141cc9f7abd485a515d618daf3272c3e6ff48380749997ff8e8c5f2cb2 +mirakuru==2.5.0 \ + --hash=sha256:20c440f07e11589735e2634bcf5ab9df0e9f901a1a18e323e15e0200112499fc \ + --hash=sha256:4484f9405610886de3eb1bb1405b3ccd9b0d88b3ea4de68ee7873a873c11956b # via pytest-postgresql packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -202,6 +202,7 @@ pretend==1.0.9 \ --hash=sha256:e389b12b7073604be67845dbe32bf8297360ad9a609b24846fe15d86e0b7dc01 # via -r requirements/tests.in psutil==5.9.4 \ + --hash=sha256:149555f59a69b33f056ba1c4eb22bb7bf24332ce631c44a319cec09f876aaeff \ --hash=sha256:16653106f3b59386ffe10e0bad3bb6299e169d5327d3f187614b1cb8f24cf2e1 \ --hash=sha256:3d7f9739eb435d4b1338944abe23f49584bde5395f27487d2ee25ad9a8774a62 \ --hash=sha256:3ff89f9b835100a825b14c2808a106b6fdcc4b15483141482a12c725e7f78549 \ @@ -213,7 +214,8 @@ psutil==5.9.4 \ --hash=sha256:852dd5d9f8a47169fe62fd4a971aa07859476c2ba22c2254d4a1baa4e10b95ad \ --hash=sha256:9120cd39dca5c5e1c54b59a41d205023d436799b1c8c4d3ff71af18535728e94 \ --hash=sha256:c1ca331af862803a42677c120aff8a814a804e09832f166f226bfd22b56feee8 \ - --hash=sha256:efeae04f9516907be44904cc7ce08defb6b665128992a56957abc9b61dca94b7 + --hash=sha256:efeae04f9516907be44904cc7ce08defb6b665128992a56957abc9b61dca94b7 \ + --hash=sha256:fd8522436a6ada7b4aad6638662966de0d61d241cb821239b2ae7013d41a43d4 # via mirakuru pyparsing==3.0.9 \ --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ @@ -252,17 +254,17 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via python-dateutil -soupsieve==2.3.2.post1 \ - --hash=sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759 \ - --hash=sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d +soupsieve==2.4 \ + --hash=sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955 \ + --hash=sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a # via beautifulsoup4 toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f # via responses -types-toml==0.10.8.3 \ - --hash=sha256:a2286a053aea6ab6ff814659272b1d4a05d86a1dd52b807a87b23511993b46c5 \ - --hash=sha256:f37244eff4cd7eace9cb70d0bac54d3eba77973aa4ef26c271ac3d1c6503a48e +types-toml==0.10.8.5 \ + --hash=sha256:2432017febe43174af0f3c65f03116e3d3cf43e7e1406b8200e106da8cf98992 \ + --hash=sha256:bf80fce7d2d74be91148f47b88d9ae5adeb1024abef22aa2fdbabc036d6b8b3c # via responses urllib3==1.26.14 \ --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \ diff --git a/tests/unit/accounts/test_services.py b/tests/unit/accounts/test_services.py --- a/tests/unit/accounts/test_services.py +++ b/tests/unit/accounts/test_services.py @@ -242,7 +242,7 @@ def test_check_password_ip_rate_limited(self, user_service, metrics, remote_addr def test_check_password_invalid(self, user_service, metrics): user = UserFactory.create() user_service.hasher = pretend.stub( - verify_and_update=pretend.call_recorder(lambda l, r: (False, None)) + verify_and_update=pretend.call_recorder(lambda L, r: (False, None)) ) assert not user_service.check_password(user.id, "user password") @@ -262,7 +262,7 @@ def test_check_password_invalid(self, user_service, metrics): def test_check_password_valid(self, user_service, metrics): user = UserFactory.create() user_service.hasher = pretend.stub( - verify_and_update=pretend.call_recorder(lambda l, r: (True, None)) + verify_and_update=pretend.call_recorder(lambda L, r: (True, None)) ) assert user_service.check_password(user.id, "user password", tags=["bar"]) @@ -283,7 +283,7 @@ def test_check_password_updates(self, user_service): user = UserFactory.create() password = user.password user_service.hasher = pretend.stub( - verify_and_update=pretend.call_recorder(lambda l, r: (True, "new password")) + verify_and_update=pretend.call_recorder(lambda L, r: (True, "new password")) ) assert user_service.check_password(user.id, "user password") diff --git a/tests/unit/malware/test_tasks.py b/tests/unit/malware/test_tasks.py --- a/tests/unit/malware/test_tasks.py +++ b/tests/unit/malware/test_tasks.py @@ -423,12 +423,12 @@ def test_only_wiped_out(self, db_session, monkeypatch): assert request.log.error.calls == [ pretend.call( - "ExampleHookedCheck is wiped_out and cannot be synced. Please remove check \ -from codebase." + "ExampleHookedCheck is wiped_out and cannot be synced. " + "Please remove check from codebase." ), pretend.call( - "ExampleScheduledCheck is wiped_out and cannot be synced. Please remove check \ -from codebase." + "ExampleScheduledCheck is wiped_out and cannot be synced. " + "Please remove check from codebase." ), ] diff --git a/tests/unit/rate_limiting/test_core.py b/tests/unit/rate_limiting/test_core.py --- a/tests/unit/rate_limiting/test_core.py +++ b/tests/unit/rate_limiting/test_core.py @@ -119,7 +119,7 @@ def test_resets_in_expired(self, metrics): ] ) - limiter._window = pretend.stub(get_window_stats=lambda l, *a: next(stats)) + limiter._window = pretend.stub(get_window_stats=lambda L, *a: next(stats)) resets_in = limiter.resets_in("foo")
chore(deps): bump grpcio-status from 1.51.1 to 1.51.3 Bumps [grpcio-status](https://grpc.io) from 1.51.1 to 1.51.3. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=grpcio-status&package-manager=pip&previous-version=1.51.1&new-version=1.51.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
2023-02-23T20:27:31Z
[]
[]
pypi/warehouse
13,144
pypi__warehouse-13144
[ "13141" ]
28badc7b46e1643db0d5c3926c20781a81cc7343
diff --git a/warehouse/oidc/__init__.py b/warehouse/oidc/__init__.py --- a/warehouse/oidc/__init__.py +++ b/warehouse/oidc/__init__.py @@ -34,4 +34,5 @@ def includeme(config): # to simplify caching exclusion. auth = config.get_settings().get("auth.domain") - config.add_route("oidc.mint_token", "/_/oidc/github/mint-token", domain=auth) + config.add_route("oidc.audience", "/_/oidc/audience", domain=auth) + config.add_route("oidc.github.mint_token", "/_/oidc/github/mint-token", domain=auth) diff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py --- a/warehouse/oidc/views.py +++ b/warehouse/oidc/views.py @@ -13,6 +13,7 @@ import time from pydantic import BaseModel, StrictStr, ValidationError +from pyramid.response import Response from pyramid.view import view_config from sqlalchemy import func @@ -44,7 +45,26 @@ def _ratelimiters(request): @view_config( - route_name="oidc.mint_token", + route_name="oidc.audience", + require_methods=["GET"], + renderer="json", + require_csrf=False, + has_translations=False, +) +def oidc_audience(request): + oidc_enabled = request.registry.settings[ + "warehouse.oidc.enabled" + ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC) + + if not oidc_enabled: + return Response(status=403, json={"message": "OIDC functionality not enabled"}) + + audience = request.registry.settings["warehouse.oidc.audience"] + return {"audience": audience} + + +@view_config( + route_name="oidc.github.mint_token", require_methods=["POST"], renderer="json", require_csrf=False,
diff --git a/tests/unit/oidc/test_views.py b/tests/unit/oidc/test_views.py --- a/tests/unit/oidc/test_views.py +++ b/tests/unit/oidc/test_views.py @@ -43,6 +43,35 @@ def test_ratelimiters(): ] [email protected]( + ("registry", "admin"), [(False, False), (False, True), (True, True)] +) +def test_oidc_audience_not_enabled(registry, admin): + request = pretend.stub( + registry=pretend.stub(settings={"warehouse.oidc.enabled": registry}), + flags=pretend.stub(enabled=lambda *a: admin), + ) + + response = views.oidc_audience(request) + assert response.status_code == 403 + assert response.json == {"message": "OIDC functionality not enabled"} + + +def test_oidc_audience(): + request = pretend.stub( + registry=pretend.stub( + settings={ + "warehouse.oidc.enabled": True, + "warehouse.oidc.audience": "fakeaudience", + } + ), + flags=pretend.stub(enabled=lambda *a: False), + ) + + response = views.oidc_audience(request) + assert response == {"audience": "fakeaudience"} + + @pytest.mark.parametrize( ("registry", "admin"), [(False, False), (False, True), (True, True)] )
OIDC: Create an audience route Creating this as a reminder for myself. The OIDC minting routes should include an `/_/oidc/audience` endpoint that just returns the OIDC audience expected by any particular Warehouse instance. xref #12465
2023-03-07T01:52:59Z
[]
[]
pypi/warehouse
13,161
pypi__warehouse-13161
[ "12430" ]
8c13ba12eefd7b5c29801716ddcda56592e6dbee
diff --git a/warehouse/cache/origin/__init__.py b/warehouse/cache/origin/__init__.py --- a/warehouse/cache/origin/__init__.py +++ b/warehouse/cache/origin/__init__.py @@ -93,11 +93,17 @@ def wrapped(context, request): CacheKeys = collections.namedtuple("CacheKeys", ["cache", "purge"]) -def key_factory(keystring, iterate_on=None): +def key_factory(keystring, iterate_on=None, if_attr_exists=None): def generate_key(obj): if iterate_on: for itr in operator.attrgetter(iterate_on)(obj): yield keystring.format(itr=itr, obj=obj) + elif if_attr_exists: + try: + attr = operator.attrgetter(if_attr_exists)(obj) + yield keystring.format(attr=attr, obj=obj) + except AttributeError: + pass else: yield keystring.format(obj=obj) diff --git a/warehouse/organizations/views.py b/warehouse/organizations/views.py new file mode 100644 --- /dev/null +++ b/warehouse/organizations/views.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound +from pyramid.view import view_config + +from warehouse.cache.origin import origin_cache +from warehouse.organizations.models import Organization + + +@view_config( + route_name="organizations.profile", + context=Organization, + renderer="organizations/profile.html", + decorator=[ + origin_cache(1 * 24 * 60 * 60, stale_if_error=1 * 24 * 60 * 60) # 1 day each. + ], + has_translations=True, +) +def profile(organization, request): + if organization.name != request.matchdict.get("organization", organization.name): + return HTTPMovedPermanently( + request.current_route_path(organization=organization.name) + ) + + if organization.is_active: + return {"organization": organization} + raise HTTPNotFound() diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py --- a/warehouse/packaging/__init__.py +++ b/warehouse/packaging/__init__.py @@ -17,6 +17,7 @@ from warehouse.accounts.models import Email, User from warehouse.cache.origin import key_factory, receive_set from warehouse.manage.tasks import update_role_invitation_status +from warehouse.organizations.models import Organization from warehouse.packaging.interfaces import ( IDocsStorage, IFileStorage, @@ -45,6 +46,18 @@ def email_primary_receive_set(config, target, value, oldvalue, initiator): receive_set(Email.primary, config, target) [email protected]_for(Organization.name, "set") +def org_name_receive_set(config, target, value, oldvalue, initiator): + if oldvalue is not NO_VALUE: + receive_set(Organization.name, config, target) + + [email protected]_for(Organization.display_name, "set") +def org_display_name_receive_set(config, target, value, oldvalue, initiator): + if oldvalue is not NO_VALUE: + receive_set(Organization.display_name, config, target) + + def includeme(config): # Register whatever file storage backend has been configured for storing # our package files. @@ -91,6 +104,7 @@ def includeme(config): key_factory("project/{obj.normalized_name}"), key_factory("user/{itr.username}", iterate_on="users"), key_factory("all-projects"), + key_factory("org/{attr.normalized_name}", if_attr_exists="organization"), ], ) config.register_origin_cache_keys( @@ -100,6 +114,9 @@ def includeme(config): key_factory("project/{obj.project.normalized_name}"), key_factory("user/{itr.username}", iterate_on="project.users"), key_factory("all-projects"), + key_factory( + "org/{attr.normalized_name}", if_attr_exists="project.organization" + ), ], ) config.register_origin_cache_keys( @@ -109,11 +126,15 @@ def includeme(config): key_factory("project/{obj.project.normalized_name}"), ], ) - config.register_origin_cache_keys(User, cache_keys=["user/{obj.username}"]) + config.register_origin_cache_keys( + User, + cache_keys=["user/{obj.username}"], + ) config.register_origin_cache_keys( User.name, purge_keys=[ key_factory("user/{obj.username}"), + key_factory("org/{itr.normalized_name}", iterate_on="organizations"), key_factory("project/{itr.normalized_name}", iterate_on="projects"), ], ) @@ -124,6 +145,25 @@ def includeme(config): key_factory("project/{itr.normalized_name}", iterate_on="user.projects"), ], ) + config.register_origin_cache_keys( + Organization, cache_keys=["org/{obj.normalized_name}"] + ) + config.register_origin_cache_keys( + Organization.name, + purge_keys=[ + key_factory("user/{itr.username}", iterate_on="users"), + key_factory("org/{obj.normalized_name}"), + key_factory("project/{itr.normalized_name}", iterate_on="projects"), + ], + ) + config.register_origin_cache_keys( + Organization.display_name, + purge_keys=[ + key_factory("user/{itr.username}", iterate_on="users"), + key_factory("org/{obj.normalized_name}"), + key_factory("project/{itr.normalized_name}", iterate_on="projects"), + ], + ) config.add_periodic_task(crontab(minute="*/5"), update_description_html) config.add_periodic_task(crontab(minute="*/5"), update_role_invitation_status) diff --git a/warehouse/routes.py b/warehouse/routes.py --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -151,6 +151,13 @@ def includeme(config): traverse="/{username}", domain=warehouse, ) + config.add_route( + "organizations.profile", + "/org/{organization}/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization}", + domain=warehouse, + ) config.add_route("accounts.login", "/account/login/", domain=warehouse) config.add_route("accounts.two-factor", "/account/two-factor/", domain=warehouse) config.add_route(
diff --git a/tests/unit/cache/origin/test_init.py b/tests/unit/cache/origin/test_init.py --- a/tests/unit/cache/origin/test_init.py +++ b/tests/unit/cache/origin/test_init.py @@ -230,6 +230,62 @@ def test_iterate_on(self): assert cache_keys.cache == ["foo"] assert list(cache_keys.purge) == ["bar", "bar/biz", "bar/baz"] + def test_if_attr_exists_exists(self): + key_maker = origin.key_maker_factory( + cache_keys=["foo"], + purge_keys=[ + origin.key_factory("bar"), + origin.key_factory("bar/{attr}", if_attr_exists="foo"), + ], + ) + cache_keys = key_maker(pretend.stub(foo="bar")) + + assert isinstance(cache_keys, origin.CacheKeys) + assert cache_keys.cache == ["foo"] + assert list(cache_keys.purge) == ["bar", "bar/bar"] + + def test_if_attr_exists_nested(self): + key_maker = origin.key_maker_factory( + cache_keys=["foo"], + purge_keys=[ + origin.key_factory("bar"), + origin.key_factory("bar/{attr}", if_attr_exists="foo.bar"), + ], + ) + cache_keys = key_maker(pretend.stub(foo=pretend.stub(bar="bar"))) + + assert isinstance(cache_keys, origin.CacheKeys) + assert cache_keys.cache == ["foo"] + assert list(cache_keys.purge) == ["bar", "bar/bar"] + + def test_if_attr_exists_does_not_exist(self): + key_maker = origin.key_maker_factory( + cache_keys=["foo"], + purge_keys=[ + origin.key_factory("bar"), + origin.key_factory("bar/{attr}", if_attr_exists="foo"), + ], + ) + cache_keys = key_maker(pretend.stub()) + + assert isinstance(cache_keys, origin.CacheKeys) + assert cache_keys.cache == ["foo"] + assert list(cache_keys.purge) == ["bar"] + + def test_if_attr_exists_nested_does_not_exist(self): + key_maker = origin.key_maker_factory( + cache_keys=["foo"], + purge_keys=[ + origin.key_factory("bar"), + origin.key_factory("bar/{attr}", if_attr_exists="foo.bar"), + ], + ) + cache_keys = key_maker(pretend.stub()) + + assert isinstance(cache_keys, origin.CacheKeys) + assert cache_keys.cache == ["foo"] + assert list(cache_keys.purge) == ["bar"] + def test_register_origin_keys(monkeypatch): class Fake1: diff --git a/tests/unit/organizations/test_views.py b/tests/unit/organizations/test_views.py new file mode 100644 --- /dev/null +++ b/tests/unit/organizations/test_views.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pretend +import pytest + +from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound + +from warehouse.organizations import views + +from ...common.db.organizations import OrganizationFactory + + +class TestOrganizationProfile: + def test_redirects_name(self, db_request): + org = OrganizationFactory.create() + + if org.name.upper() != org.name: + organization_name = org.name.upper() + else: + organization_name = org.name.lower() + + db_request.current_route_path = pretend.call_recorder( + lambda organization: "/user/the-redirect/" + ) + db_request.matchdict = {"organization": organization_name} + + result = views.profile(org, db_request) + + assert isinstance(result, HTTPMovedPermanently) + assert result.headers["Location"] == "/user/the-redirect/" + assert db_request.current_route_path.calls == [ + pretend.call(organization=org.name) + ] + + def test_returns_organization(self, db_request): + org = OrganizationFactory.create() + assert views.profile(org, db_request) == {"organization": org} + + def test_4oh4_before_approval(self, db_request): + org = OrganizationFactory.create(is_active=False, is_approved=False) + + with pytest.raises(HTTPNotFound): + views.profile(org, db_request) + + org.is_active = True + assert views.profile(org, db_request) == {"organization": org} diff --git a/tests/unit/packaging/test_init.py b/tests/unit/packaging/test_init.py --- a/tests/unit/packaging/test_init.py +++ b/tests/unit/packaging/test_init.py @@ -18,6 +18,7 @@ from warehouse import packaging from warehouse.accounts.models import Email, User from warehouse.manage.tasks import update_role_invitation_status +from warehouse.organizations.models import Organization from warehouse.packaging.interfaces import ( IDocsStorage, IFileStorage, @@ -40,8 +41,10 @@ def test_includeme(monkeypatch, with_bq_sync, with_2fa_mandate): create_service=pretend.call_recorder(lambda *a, **kw: pretend.stub()) ) - def key_factory(keystring, iterate_on=None): - return pretend.call(keystring, iterate_on=iterate_on) + def key_factory(keystring, iterate_on=None, if_attr_exists=None): + return pretend.call( + keystring, iterate_on=iterate_on, if_attr_exists=if_attr_exists + ) monkeypatch.setattr(packaging, "key_factory", key_factory) settings = { @@ -92,6 +95,9 @@ def key_factory(keystring, iterate_on=None): key_factory("project/{obj.normalized_name}"), key_factory("user/{itr.username}", iterate_on="users"), key_factory("all-projects"), + key_factory( + "org/{attr.normalized_name}", if_attr_exists="organization" + ), ], ), pretend.call( @@ -101,6 +107,9 @@ def key_factory(keystring, iterate_on=None): key_factory("project/{obj.project.normalized_name}"), key_factory("user/{itr.username}", iterate_on="project.users"), key_factory("all-projects"), + key_factory( + "org/{attr.normalized_name}", if_attr_exists="project.organization" + ), ], ), pretend.call( @@ -115,6 +124,7 @@ def key_factory(keystring, iterate_on=None): User.name, purge_keys=[ key_factory("user/{obj.username}"), + key_factory("org/{itr.normalized_name}", iterate_on="organizations"), key_factory("project/{itr.normalized_name}", iterate_on="projects"), ], ), @@ -127,6 +137,23 @@ def key_factory(keystring, iterate_on=None): ), ], ), + pretend.call(Organization, cache_keys=["org/{obj.normalized_name}"]), + pretend.call( + Organization.name, + purge_keys=[ + key_factory("user/{itr.username}", iterate_on="users"), + key_factory("org/{obj.normalized_name}"), + key_factory("project/{itr.normalized_name}", iterate_on="projects"), + ], + ), + pretend.call( + Organization.display_name, + purge_keys=[ + key_factory("user/{itr.username}", iterate_on="users"), + key_factory("org/{obj.normalized_name}"), + key_factory("project/{itr.normalized_name}", iterate_on="projects"), + ], + ), ] if with_bq_sync: diff --git a/tests/unit/test_routes.py b/tests/unit/test_routes.py --- a/tests/unit/test_routes.py +++ b/tests/unit/test_routes.py @@ -155,6 +155,13 @@ def add_policy(name, filename): traverse="/{username}", domain=warehouse, ), + pretend.call( + "organizations.profile", + "/org/{organization}/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization}", + domain=warehouse, + ), pretend.call("accounts.login", "/account/login/", domain=warehouse), pretend.call("accounts.two-factor", "/account/two-factor/", domain=warehouse), pretend.call(
Organization name should be clickable 1. In any email generated after an organization operation, the organization name in the email should be clickable 2. When a team or project is viewed, the organization name above the team or project should be clickable. ![project1](https://user-images.githubusercontent.com/37237726/197818834-43b9a40f-bc73-4621-b222-f13aa4de65c4.png) ![team](https://user-images.githubusercontent.com/37237726/197818850-f711e180-92ae-4918-8b49-cd7fb86c1a21.png) The first image shows an organization and project and the second image shows an organization and team. In both instances, the organization name is not clickable.
For the manage views, would clicking that lead to the internal manage page for the organization or to the public profile? For emails, it seems that public profile would be our only option. We can't do that until we have such a page. Currently, clicking on project name leads to the public profile of the project. To be consistent, we would probably want clicking on organization name (in both manage views and emails) to also lead to the public profile of the organization.
2023-03-08T15:39:34Z
[]
[]
pypi/warehouse
13,164
pypi__warehouse-13164
[ "12425" ]
25f79e52e72969590780e17ad5ff48f0202da130
diff --git a/warehouse/accounts/interfaces.py b/warehouse/accounts/interfaces.py --- a/warehouse/accounts/interfaces.py +++ b/warehouse/accounts/interfaces.py @@ -265,6 +265,11 @@ def loads(token): Gets the data corresponding to the token provided """ + def unsafe_load_payload(token): + """ + Gets the data corresponding to the token provided *regardless of validity* + """ + class IPasswordBreachedService(Interface): failure_message = Attribute("The message to describe the failure that occurred") diff --git a/warehouse/accounts/services.py b/warehouse/accounts/services.py --- a/warehouse/accounts/services.py +++ b/warehouse/accounts/services.py @@ -643,6 +643,20 @@ def loads(self, token, return_timestamp=False): return data + def unsafe_load_payload(self, token): + """ + ¡DANGER! + + This method does not validate expiration whatsoever! + It can *and should* only be used for inspecting an expired token then + doing nothing with it whatsoever. + """ + signature_valid, data = self.serializer.loads_unsafe(token) + + if signature_valid: + return data + return None + def database_login_factory(context, request): return DatabaseUserService( diff --git a/warehouse/events/tags.py b/warehouse/events/tags.py --- a/warehouse/events/tags.py +++ b/warehouse/events/tags.py @@ -76,6 +76,7 @@ class Account(EventTagEnum): OrganizationRoleAdd = "account:organization_role:add" OrganizationRoleChange = "account:organization_role:change" OrganizationRoleDeclineInvite = "account:organization_role:decline_invite" + OrganizationRoleExpireInvite = "account:organization_role:expire_invite" OrganizationRoleInvite = "account:organization_role:invite" OrganizationRoleRemove = "account:organization_role:remove" OrganizationRoleRevokeInvite = "account:organization_role:revoke_invite" @@ -157,6 +158,7 @@ class Organization(EventTagEnum): OrganizationRoleAdd = "organization:organization_role:add" OrganizationRoleChange = "organization:organization_role:change" OrganizationRoleDeclineInvite = "organization:organization_role:decline_invite" + OrganizationRoleExpireInvite = "organization:organization_role:expire_invite" OrganizationRoleInvite = "organization:organization_role:invite" OrganizationRoleRemove = "organization:organization_role:remove" OrganizationRoleRevokeInvite = "organization:organization_role:revoke_invite" diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -1970,6 +1970,122 @@ def add_organization_project(self): return HTTPSeeOther(self.request.path) +def _send_organization_invitation(request, organization, role_name, user): + organization_service = request.find_service(IOrganizationService, context=None) + token_service = request.find_service(ITokenService, name="email") + + existing_role = organization_service.get_organization_role_by_user( + organization.id, user.id + ) + organization_invite = organization_service.get_organization_invite_by_user( + organization.id, user.id + ) + # Cover edge case where invite is invalid but task + # has not updated invite status + try: + invite_token = token_service.loads(organization_invite.token) + except (TokenExpired, AttributeError): + invite_token = None + + if existing_role: + request.session.flash( + request._( + "User '${username}' already has ${role_name} role for organization", + mapping={ + "username": user.username, + "role_name": existing_role.role_name.value, + }, + ), + queue="error", + ) + elif user.primary_email is None or not user.primary_email.verified: + request.session.flash( + request._( + "User '${username}' does not have a verified primary email " + "address and cannot be added as a ${role_name} for organization", + mapping={"username": user.username, "role_name": role_name}, + ), + queue="error", + ) + elif ( + organization_invite + and organization_invite.invite_status == OrganizationInvitationStatus.Pending + and invite_token + ): + request.session.flash( + request._( + "User '${username}' already has an active invite. " + "Please try again later.", + mapping={"username": user.username}, + ), + queue="error", + ) + else: + invite_token = token_service.dumps( + { + "action": "email-organization-role-verify", + "desired_role": role_name, + "user_id": user.id, + "organization_id": organization.id, + "submitter_id": request.user.id, + } + ) + if organization_invite: + organization_invite.invite_status = OrganizationInvitationStatus.Pending + organization_invite.token = invite_token + else: + organization_service.add_organization_invite( + organization_id=organization.id, + user_id=user.id, + invite_token=invite_token, + ) + organization.record_event( + tag=EventTag.Organization.OrganizationRoleInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by_user_id": str(request.user.id), + "role_name": role_name, + "target_user_id": str(user.id), + }, + ) + user.record_event( + tag=EventTag.Account.OrganizationRoleInvite, + ip_address=request.remote_addr, + additional={ + "submitted_by_user_id": str(request.user.id), + "organization_name": organization.name, + "role_name": role_name, + }, + ) + owner_users = set(organization_owners(request, organization)) + send_organization_member_invited_email( + request, + owner_users, + user=user, + desired_role=role_name, + initiator_username=request.user.username, + organization_name=organization.name, + email_token=invite_token, + token_age=token_service.max_age, + ) + send_organization_role_verification_email( + request, + user, + desired_role=role_name, + initiator_username=request.user.username, + organization_name=organization.name, + email_token=invite_token, + token_age=token_service.max_age, + ) + request.session.flash( + request._( + "Invitation sent to '${username}'", + mapping={"username": user.username}, + ), + queue="success", + ) + + @view_config( route_name="manage.organization.roles", context=Organization, @@ -1998,119 +2114,8 @@ def manage_organization_roles( role_name = form.role_name.data userid = user_service.find_userid(username) user = user_service.get_user(userid) - token_service = request.find_service(ITokenService, name="email") - - existing_role = organization_service.get_organization_role_by_user( - organization.id, user.id - ) - organization_invite = organization_service.get_organization_invite_by_user( - organization.id, user.id - ) - # Cover edge case where invite is invalid but task - # has not updated invite status - try: - invite_token = token_service.loads(organization_invite.token) - except (TokenExpired, AttributeError): - invite_token = None - if existing_role: - request.session.flash( - request._( - "User '${username}' already has ${role_name} role for organization", - mapping={ - "username": username, - "role_name": existing_role.role_name.value, - }, - ), - queue="error", - ) - elif user.primary_email is None or not user.primary_email.verified: - request.session.flash( - request._( - "User '${username}' does not have a verified primary email " - "address and cannot be added as a ${role_name} for organization", - mapping={"username": username, "role_name": role_name.value}, - ), - queue="error", - ) - elif ( - organization_invite - and organization_invite.invite_status - == OrganizationInvitationStatus.Pending - and invite_token - ): - request.session.flash( - request._( - "User '${username}' already has an active invite. " - "Please try again later.", - mapping={"username": username}, - ), - queue="error", - ) - else: - invite_token = token_service.dumps( - { - "action": "email-organization-role-verify", - "desired_role": role_name.value, - "user_id": user.id, - "organization_id": organization.id, - "submitter_id": request.user.id, - } - ) - if organization_invite: - organization_invite.invite_status = OrganizationInvitationStatus.Pending - organization_invite.token = invite_token - else: - organization_service.add_organization_invite( - organization_id=organization.id, - user_id=user.id, - invite_token=invite_token, - ) - organization.record_event( - tag=EventTag.Organization.OrganizationRoleInvite, - ip_address=request.remote_addr, - additional={ - "submitted_by_user_id": str(request.user.id), - "role_name": role_name.value, - "target_user_id": str(userid), - }, - ) - user.record_event( - tag=EventTag.Account.OrganizationRoleInvite, - ip_address=request.remote_addr, - additional={ - "submitted_by_user_id": str(request.user.id), - "organization_name": organization.name, - "role_name": role_name.value, - }, - ) - owner_users = set(organization_owners(request, organization)) - send_organization_member_invited_email( - request, - owner_users, - user=user, - desired_role=role_name.value, - initiator_username=request.user.username, - organization_name=organization.name, - email_token=invite_token, - token_age=token_service.max_age, - ) - send_organization_role_verification_email( - request, - user, - desired_role=role_name.value, - initiator_username=request.user.username, - organization_name=organization.name, - email_token=invite_token, - token_age=token_service.max_age, - ) - request.session.flash( - request._( - "Invitation sent to '${username}'", - mapping={"username": username}, - ), - queue="success", - ) + _send_organization_invitation(request, organization, role_name.value, user) return HTTPSeeOther(request.path) @@ -2131,6 +2136,53 @@ def manage_organization_roles( } +@view_config( + route_name="manage.organization.resend_invite", + context=Organization, + uses_session=True, + require_active_organization=True, + require_methods=["POST"], + permission="manage:organization", + has_translations=True, +) +def resend_organization_invitation(organization, request): + organization_service = request.find_service(IOrganizationService, context=None) + user_service = request.find_service(IUserService, context=None) + token_service = request.find_service(ITokenService, name="email") + user = user_service.get_user(request.POST["user_id"]) + + _next = request.route_path( + "manage.organization.roles", + organization_name=organization.normalized_name, + ) + + organization_invite = organization_service.get_organization_invite_by_user( + organization.id, user.id + ) + if organization_invite is None: + request.session.flash( + request._("Could not find organization invitation."), queue="error" + ) + return HTTPSeeOther(_next) + + # Note: underlying itsdangerous method of "token_service.unsafe_load_payload never + # fails, it just returns None if the payload is not deserializable. Our wrapper + # does at least validate that the signature was valid. + token_data = token_service.unsafe_load_payload(organization_invite.token) + if token_data is None: + request.session.flash( + request._("Organization invitation could not be re-sent."), queue="error" + ) + return HTTPSeeOther(_next) + + role_name = token_data.get("desired_role") + _send_organization_invitation( + request, organization, role_name, organization_invite.user + ) + + return HTTPSeeOther(_next) + + @view_config( route_name="manage.organization.revoke_invite", context=Organization, @@ -2165,7 +2217,13 @@ def revoke_organization_invitation(organization, request): try: token_data = token_service.loads(organization_invite.token) except TokenExpired: - request.session.flash(request._("Invitation already expired."), queue="success") + request.session.flash( + request._( + "Expired invitation for '${username}' deleted.", + mapping={"username": user.username}, + ), + queue="success", + ) return HTTPSeeOther( request.route_path( "manage.organization.roles", diff --git a/warehouse/organizations/tasks.py b/warehouse/organizations/tasks.py --- a/warehouse/organizations/tasks.py +++ b/warehouse/organizations/tasks.py @@ -42,6 +42,20 @@ def update_organization_invitation_status(request): try: token_service.loads(invite.token) except TokenExpired: + invite.user.record_event( + tag=EventTag.Account.OrganizationRoleExpireInvite, + ip_address=request.remote_addr, + additional={ + "organization_name": invite.organization.name, + }, + ) + invite.organization.record_event( + tag=EventTag.Organization.OrganizationRoleExpireInvite, + ip_address=request.remote_addr, + additional={ + "target_user_id": str(invite.user.id), + }, + ) invite.invite_status = OrganizationInvitationStatus.Expired diff --git a/warehouse/routes.py b/warehouse/routes.py --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -305,6 +305,13 @@ def includeme(config): traverse="/{organization_name}", domain=warehouse, ) + config.add_route( + "manage.organization.resend_invite", + "/manage/organization/{organization_name}/people/resend_invite/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ) config.add_route( "manage.organization.change_role", "/manage/organization/{organization_name}/people/change/",
diff --git a/tests/unit/accounts/test_services.py b/tests/unit/accounts/test_services.py --- a/tests/unit/accounts/test_services.py +++ b/tests/unit/accounts/test_services.py @@ -1014,6 +1014,32 @@ def test_loads_token_is_invalid(self, token_service): with pytest.raises(TokenInvalid): token_service.loads("invalid") + def test_unsafe_load_payload(self, token_service): + sign_time = pytz.UTC.localize( + datetime.datetime.utcnow() - datetime.timedelta(days=1) + ) + with freezegun.freeze_time(sign_time): + token = token_service.dumps({"foo": "bar"}) + + with pytest.raises(TokenExpired): + token_service.loads(token) + + assert token_service.unsafe_load_payload(token) == {"foo": "bar"} + + def test_unsafe_load_payload_signature_invalid(self, token_service): + sign_time = pytz.UTC.localize( + datetime.datetime.utcnow() - datetime.timedelta(minutes=10) + ) + with freezegun.freeze_time(sign_time): + token = services.TokenService("wrongsecret", "pepper", max_age=3600).dumps( + {"foo": "bar"} + ) + + with pytest.raises(TokenInvalid): + token_service.loads(token) + + assert token_service.unsafe_load_payload(token) is None + def test_database_login_factory(monkeypatch, pyramid_services, metrics, remote_addr): service_obj = pretend.stub() diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -4444,6 +4444,217 @@ def test_reinvite_organization_role_after_expiration( ] +class TestResendOrganizationInvitations: + def test_resend_invitation( + self, db_request, token_service, enable_organizations, monkeypatch + ): + organization = OrganizationFactory.create(name="foobar") + user = UserFactory.create(username="testuser") + EmailFactory.create(user=user, verified=True, primary=True) + OrganizationInvitationFactory.create( + organization=organization, + user=user, + invite_status=OrganizationInvitationStatus.Expired, + ) + owner_user = UserFactory.create() + OrganizationRoleFactory( + user=owner_user, + organization=organization, + role_name=OrganizationRoleType.Owner, + ) + + send_organization_member_invited_email = pretend.call_recorder( + lambda r, u, **k: None + ) + monkeypatch.setattr( + views, + "send_organization_member_invited_email", + send_organization_member_invited_email, + ) + send_organization_role_verification_email = pretend.call_recorder( + lambda r, u, **k: None + ) + monkeypatch.setattr( + views, + "send_organization_role_verification_email", + send_organization_role_verification_email, + ) + + db_request.method = "POST" + db_request.POST = MultiDict({"user_id": user.id}) + db_request.remote_addr = "10.10.10.10" + db_request.user = owner_user + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "/manage/organizations" + ) + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None) + ) + token_service.loads = pretend.raiser(TokenExpired) + token_service.unsafe_load_payload = pretend.call_recorder( + lambda data: { + "action": "email-organization-role-verify", + "desired_role": "Manager", + "user_id": user.id, + "organization_id": organization.id, + "submitter_id": owner_user.id, + } + ) + + result = views.resend_organization_invitation(organization, db_request) + db_request.db.flush() + + assert ( + db_request.db.query(OrganizationInvitation) + .filter(OrganizationInvitation.user == user) + .filter(OrganizationInvitation.organization == organization) + .filter( + OrganizationInvitation.invite_status + == OrganizationInvitationStatus.Pending + ) + .one() + ) + assert db_request.session.flash.calls == [ + pretend.call(f"Invitation sent to '{user.username}'", queue="success") + ] + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/manage/organizations" + + assert send_organization_member_invited_email.calls == [ + pretend.call( + db_request, + {owner_user}, + user=user, + desired_role="Manager", + initiator_username=db_request.user.username, + organization_name=organization.name, + email_token=token_service.dumps( + { + "action": "email-organization-role-verify", + "desired_role": "Manager", + "user_id": user.id, + "organization_id": organization.id, + "submitter_id": db_request.user.id, + } + ), + token_age=token_service.max_age, + ) + ] + assert send_organization_role_verification_email.calls == [ + pretend.call( + db_request, + user, + desired_role="Manager", + initiator_username=db_request.user.username, + organization_name=organization.name, + email_token=token_service.dumps( + { + "action": "email-organization-role-verify", + "desired_role": "Manager", + "user_id": user.id, + "organization_id": organization.id, + "submitter_id": db_request.user.id, + } + ), + token_age=token_service.max_age, + ) + ] + + def test_resend_invitation_fails_corrupt_token( + self, db_request, token_service, enable_organizations, monkeypatch + ): + organization = OrganizationFactory.create(name="foobar") + user = UserFactory.create(username="testuser") + OrganizationInvitationFactory.create( + organization=organization, + user=user, + invite_status=OrganizationInvitationStatus.Expired, + ) + owner_user = UserFactory.create() + OrganizationRoleFactory( + user=owner_user, + organization=organization, + role_name=OrganizationRoleType.Owner, + ) + + db_request.method = "POST" + db_request.POST = MultiDict({"user_id": user.id}) + db_request.remote_addr = "10.10.10.10" + db_request.user = owner_user + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "/manage/organizations" + ) + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None) + ) + token_service.loads = pretend.raiser(TokenExpired) + token_service.unsafe_load_payload = pretend.call_recorder(lambda data: None) + + result = views.resend_organization_invitation(organization, db_request) + db_request.db.flush() + + assert ( + db_request.db.query(OrganizationInvitation) + .filter(OrganizationInvitation.user == user) + .filter(OrganizationInvitation.organization == organization) + .filter( + OrganizationInvitation.invite_status + == OrganizationInvitationStatus.Pending + ) + .one_or_none() + ) is None + assert db_request.session.flash.calls == [ + pretend.call("Organization invitation could not be re-sent.", queue="error") + ] + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/manage/organizations" + + def test_resend_invitation_fails_missing_invitation( + self, db_request, token_service, enable_organizations, monkeypatch + ): + organization = OrganizationFactory.create(name="foobar") + user = UserFactory.create(username="testuser") + owner_user = UserFactory.create() + OrganizationRoleFactory( + user=owner_user, + organization=organization, + role_name=OrganizationRoleType.Owner, + ) + + db_request.method = "POST" + db_request.POST = MultiDict({"user_id": user.id}) + db_request.remote_addr = "10.10.10.10" + db_request.user = owner_user + db_request.route_path = pretend.call_recorder( + lambda *a, **kw: "/manage/organizations" + ) + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None) + ) + + result = views.resend_organization_invitation(organization, db_request) + db_request.db.flush() + + assert ( + db_request.db.query(OrganizationInvitation) + .filter(OrganizationInvitation.user == user) + .filter(OrganizationInvitation.organization == organization) + .filter( + OrganizationInvitation.invite_status + == OrganizationInvitationStatus.Pending + ) + .one_or_none() + ) is None + assert db_request.session.flash.calls == [ + pretend.call("Could not find organization invitation.", queue="error") + ] + + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/manage/organizations" + + class TestRevokeOrganizationInvitation: def test_revoke_invitation( self, db_request, token_service, enable_organizations, monkeypatch @@ -4597,7 +4808,7 @@ def test_token_expired(self, db_request, token_service, enable_organizations): .one_or_none() ) assert db_request.session.flash.calls == [ - pretend.call("Invitation already expired.", queue="success") + pretend.call("Expired invitation for 'testuser' deleted.", queue="success") ] assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/manage/organizations/roles" diff --git a/tests/unit/organizations/test_tasks.py b/tests/unit/organizations/test_tasks.py --- a/tests/unit/organizations/test_tasks.py +++ b/tests/unit/organizations/test_tasks.py @@ -15,6 +15,7 @@ import pretend from warehouse.accounts.interfaces import ITokenService, TokenExpired +from warehouse.events.tags import EventTag from warehouse.organizations.models import ( Organization, OrganizationInvitationStatus, @@ -45,9 +46,14 @@ class TestUpdateInvitationStatus: - def test_update_invitation_status(self, db_request): + def test_update_invitation_status( + self, db_request, user_service, organization_service + ): organization = OrganizationFactory.create() + organization.record_event = pretend.call_recorder(lambda *a, **kw: None) user = UserFactory.create() + user.record_event = pretend.call_recorder(lambda *a, **kw: None) + invite = OrganizationInvitationFactory(user=user, organization=organization) token_service = pretend.stub(loads=pretend.raiser(TokenExpired)) @@ -60,9 +66,27 @@ def test_update_invitation_status(self, db_request): ] assert invite.invite_status == OrganizationInvitationStatus.Expired - def test_no_updates(self, db_request): + assert user.record_event.calls == [ + pretend.call( + tag=EventTag.Account.OrganizationRoleExpireInvite, + ip_address="1.2.3.4", + additional={"organization_name": invite.organization.name}, + ) + ] + assert organization.record_event.calls == [ + pretend.call( + tag=EventTag.Organization.OrganizationRoleExpireInvite, + ip_address="1.2.3.4", + additional={"target_user_id": str(invite.user.id)}, + ) + ] + + def test_no_updates(self, db_request, user_service, organization_service): organization = OrganizationFactory.create() + organization.record_event = pretend.call_recorder(lambda *a, **kw: None) user = UserFactory.create() + user.record_event = pretend.call_recorder(lambda *a, **kw: None) + invite = OrganizationInvitationFactory(user=user, organization=organization) token_service = pretend.stub(loads=lambda token: {}) @@ -75,6 +99,9 @@ def test_no_updates(self, db_request): ] assert invite.invite_status == OrganizationInvitationStatus.Pending + assert user.record_event.calls == [] + assert organization.record_event.calls == [] + class TestDeleteOrganizations: def test_delete_declined_organizations(self, db_request): diff --git a/tests/unit/test_routes.py b/tests/unit/test_routes.py --- a/tests/unit/test_routes.py +++ b/tests/unit/test_routes.py @@ -312,6 +312,13 @@ def add_policy(name, filename): traverse="/{organization_name}", domain=warehouse, ), + pretend.call( + "manage.organization.resend_invite", + "/manage/organization/{organization_name}/people/resend_invite/", + factory="warehouse.organizations.models:OrganizationFactory", + traverse="/{organization_name}", + domain=warehouse, + ), pretend.call( "manage.organization.change_role", "/manage/organization/{organization_name}/people/change/",
User invitation expiry The user invite to join an organization or to collaborate on a project has a time limit of 6 hours. Currently, the invite expires before 6 hours. To recreate the error- - Click on Your organizations - Click on Manage for a specific organization - Click on Teams - Click on Manage for a specific team - Click on Members - Scroll down to the bottom of the page and select a user in the drop down list - Click on Add The user invitation expires before 6 hours. If the invite has expired, it should not be possible to revoke the invite.
On the first part, this requires a little investigation to reproduce. The second part seems to be a lacking equivalent to the task we use to update RoleInvitation statuses: https://github.com/pypi/warehouse/blob/39daea188d2e1e6494c50753cd48cb5a8da3e8c4/warehouse/manage/tasks.py#L18-L31
2023-03-08T20:45:13Z
[]
[]
pypi/warehouse
13,175
pypi__warehouse-13175
[ "12420" ]
9fbc03ada55b0bb492bbed184f5a274a0655417b
diff --git a/warehouse/manage/views.py b/warehouse/manage/views.py --- a/warehouse/manage/views.py +++ b/warehouse/manage/views.py @@ -3608,7 +3608,11 @@ def delete_project(project, request): submitter_role = get_user_role_in_project(project, request.user, request) - for contributor in project.users: + contributors = project.users + if project.organization: + contributors += project.organization.owners + + for contributor in contributors: contributor_role = get_user_role_in_project(project, contributor, request) send_removed_project_email(
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -7004,6 +7004,84 @@ def test_delete_project(self, monkeypatch, db_request): ] assert not (db_request.db.query(Project).filter(Project.name == "foo").count()) + def test_delete_project_sends_emails_to_owners(self, monkeypatch, db_request): + organization = OrganizationFactory.create(name="baz") + project = ProjectFactory.create(name="foo") + OrganizationProjectFactory.create(organization=organization, project=project) + + db_request.user = UserFactory.create() + OrganizationRoleFactory.create( + organization=organization, + user=db_request.user, + role_name=OrganizationRoleType.Owner, + ) + + # Add a second Owner + owner2 = UserFactory.create() + OrganizationRoleFactory.create( + organization=organization, + user=owner2, + role_name=OrganizationRoleType.Owner, + ) + # Add a Manager, who won't receive the email + manager = UserFactory.create() + OrganizationRoleFactory.create( + organization=organization, + user=manager, + role_name=OrganizationRoleType.Manager, + ) + + db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect") + db_request.session = pretend.stub( + flash=pretend.call_recorder(lambda *a, **kw: None) + ) + db_request.POST["confirm_project_name"] = project.name + + get_user_role_in_project = pretend.call_recorder( + lambda project, user, req: "Owner" + ) + monkeypatch.setattr(views, "get_user_role_in_project", get_user_role_in_project) + + send_removed_project_email = pretend.call_recorder(lambda req, user, **k: None) + monkeypatch.setattr( + views, "send_removed_project_email", send_removed_project_email + ) + + result = views.delete_project(project, db_request) + + assert db_request.session.flash.calls == [ + pretend.call("Deleted the project 'foo'", queue="success") + ] + assert db_request.route_path.calls == [pretend.call("manage.projects")] + assert isinstance(result, HTTPSeeOther) + assert result.headers["Location"] == "/the-redirect" + + assert get_user_role_in_project.calls == [ + pretend.call(project, db_request.user, db_request), + pretend.call(project, db_request.user, db_request), + pretend.call(project, owner2, db_request), + ] + + assert send_removed_project_email.calls == [ + pretend.call( + db_request, + db_request.user, + project_name=project.name, + submitter_name=db_request.user.username, + submitter_role="Owner", + recipient_role="Owner", + ), + pretend.call( + db_request, + owner2, + project_name=project.name, + submitter_name=db_request.user.username, + submitter_role="Owner", + recipient_role="Owner", + ), + ] + assert not (db_request.db.query(Project).filter(Project.name == "foo").count()) + class TestManageProjectDocumentation: def test_manage_project_documentation(self):
Missing email in organization account operations For the following operations, no email is generated- Revoking collaborator invitation- - Click on Your organizations - Click on Manage for a specific organization - Click on Projects - Click on Manage for a specific project - Click on Collaborators - Click on Revoke invite and then confirm the operation Deleting a project- - Click on Your organizations - Click on Manage for a specific organization - Click on Projects - Click on Manage for a specific project - Click on Settings - Enter the project name - Click on Delete project When deleting the project, the email to the owners should mention who deleted it. This information is not required when revoking external collaborator invitation.
Priority - medium For revoking, we generate an email with "An invitation for you to join the "sample1" organization has been canceled." Refs: `warehouse.email.send_canceled_as_invited_organization_member_email()` - no further action needed. Org Project deletion needs a minor change, will add a PR.
2023-03-09T15:24:37Z
[]
[]
pypi/warehouse
13,272
pypi__warehouse-13272
[ "13270" ]
16d91ab1788633b52264e60e5254d18a1be30ecb
diff --git a/warehouse/accounts/views.py b/warehouse/accounts/views.py --- a/warehouse/accounts/views.py +++ b/warehouse/accounts/views.py @@ -1468,6 +1468,7 @@ def add_pending_github_oidc_publisher(self): repository_name=form.repository.data, repository_owner=form.normalized_owner, workflow_filename=form.workflow_filename.data, + environment=form.normalized_environment, ) .first() is not None @@ -1490,6 +1491,7 @@ def add_pending_github_oidc_publisher(self): repository_owner=form.normalized_owner, repository_owner_id=form.owner_id, workflow_filename=form.workflow_filename.data, + environment=form.normalized_environment, ) self.request.db.add(pending_publisher) diff --git a/warehouse/manage/views/__init__.py b/warehouse/manage/views/__init__.py --- a/warehouse/manage/views/__init__.py +++ b/warehouse/manage/views/__init__.py @@ -1263,6 +1263,7 @@ def add_github_oidc_publisher(self): GitHubPublisher.repository_name == form.repository.data, GitHubPublisher.repository_owner == form.normalized_owner, GitHubPublisher.workflow_filename == form.workflow_filename.data, + GitHubPublisher.environment == form.normalized_environment, ) .one_or_none() ) @@ -1272,6 +1273,7 @@ def add_github_oidc_publisher(self): repository_owner=form.normalized_owner, repository_owner_id=form.owner_id, workflow_filename=form.workflow_filename.data, + environment=form.normalized_environment, ) self.request.db.add(publisher) diff --git a/warehouse/migrations/versions/689dea7d202a_add_github_oidc_publisher_environment_.py b/warehouse/migrations/versions/689dea7d202a_add_github_oidc_publisher_environment_.py new file mode 100644 --- /dev/null +++ b/warehouse/migrations/versions/689dea7d202a_add_github_oidc_publisher_environment_.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +add GitHub OIDC publisher environment constraint + +Revision ID: 689dea7d202a +Revises: d142f435bb39 +Create Date: 2023-04-11 17:57:08.941312 +""" + +import sqlalchemy as sa + +from alembic import op + +revision = "689dea7d202a" +down_revision = "d142f435bb39" + + +def upgrade(): + op.add_column( + "github_oidc_publishers", sa.Column("environment", sa.String(), nullable=True) + ) + op.add_column( + "pending_github_oidc_publishers", + sa.Column("environment", sa.String(), nullable=True), + ) + + +def downgrade(): + op.drop_column("pending_github_oidc_publishers", "environment") + op.drop_column("github_oidc_publishers", "environment") diff --git a/warehouse/oidc/forms.py b/warehouse/oidc/forms.py --- a/warehouse/oidc/forms.py +++ b/warehouse/oidc/forms.py @@ -25,7 +25,7 @@ class GitHubPublisherBase(forms.Form): - __params__ = ["owner", "repository", "workflow_filename"] + __params__ = ["owner", "repository", "workflow_filename", "environment"] owner = wtforms.StringField( validators=[ @@ -50,6 +50,11 @@ class GitHubPublisherBase(forms.Form): ] ) + # Environment names are not case sensitive. An environment name may not + # exceed 255 characters and must be unique within the repository. + # https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment + environment = wtforms.StringField() + def __init__(self, *args, api_token, **kwargs): super().__init__(*args, **kwargs) self._api_token = api_token @@ -148,6 +153,12 @@ def validate_workflow_filename(self, field): _("Workflow filename must be a filename only, without directories") ) + @property + def normalized_environment(self): + return ( + self.environment.data.lower() if self.environment.data is not None else None + ) + class PendingGitHubPublisherForm(GitHubPublisherBase): __params__ = GitHubPublisherBase.__params__ + ["project_name"] diff --git a/warehouse/oidc/models.py b/warehouse/oidc/models.py --- a/warehouse/oidc/models.py +++ b/warehouse/oidc/models.py @@ -57,6 +57,28 @@ def _check_job_workflow_ref(ground_truth, signed_claim, all_signed_claims): return f"{ground_truth}@{ref}" == signed_claim +def _check_environment(ground_truth, signed_claim, all_signed_claims): + # When there is an environment, we expect a case-insensitive string. + # https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment + # For tokens that are generated outside of an environment, the claim will + # be missing. + + # If we haven't set an environment name for the publisher, we don't need to + # check this claim + if ground_truth is None: + return True + + # Defensive: GitHub might give us an empty environment if this token wasn't + # generated from within an environment, in which case the check should + # fail. + if not signed_claim: + return False + + # We store the normalized environment name, but we normalize both here to + # ensure we can't accidentally become case-sensitive. + return ground_truth.lower() == signed_claim.lower() + + def _check_sub(ground_truth, signed_claim, _all_signed_claims): # We expect a string formatted as follows: # repo:ORG/REPO[:OPTIONAL-STUFF] @@ -238,6 +260,7 @@ class GitHubPublisherMixin: repository_owner = Column(String, nullable=False) repository_owner_id = Column(String, nullable=False) workflow_filename = Column(String, nullable=False) + environment = Column(String, nullable=True) __verifiable_claims__ = { "sub": _check_sub, @@ -245,6 +268,7 @@ class GitHubPublisherMixin: "repository_owner": _check_claim_binary(str.__eq__), "repository_owner_id": _check_claim_binary(str.__eq__), "job_workflow_ref": _check_job_workflow_ref, + "environment": _check_environment, } __unchecked_claims__ = { @@ -343,6 +367,7 @@ def reify(self, session): GitHubPublisher.repository_name == self.repository_name, GitHubPublisher.repository_owner == self.repository_owner, GitHubPublisher.workflow_filename == self.workflow_filename, + GitHubPublisher.environment == self.environment, ) .one_or_none() ) @@ -352,6 +377,7 @@ def reify(self, session): repository_owner=self.repository_owner, repository_owner_id=self.repository_owner_id, workflow_filename=self.workflow_filename, + environment=self.environment, ) session.delete(self)
diff --git a/tests/common/db/oidc.py b/tests/common/db/oidc.py --- a/tests/common/db/oidc.py +++ b/tests/common/db/oidc.py @@ -26,6 +26,7 @@ class Meta: repository_owner = factory.Faker("pystr", max_chars=12) repository_owner_id = factory.Faker("pystr", max_chars=12) workflow_filename = "example.yml" + environment = "production" class PendingGitHubPublisherFactory(WarehouseFactory): @@ -38,3 +39,4 @@ class Meta: repository_owner = factory.Faker("pystr", max_chars=12) repository_owner_id = factory.Faker("pystr", max_chars=12) workflow_filename = "example.yml" + environment = "production" diff --git a/tests/unit/accounts/test_views.py b/tests/unit/accounts/test_views.py --- a/tests/unit/accounts/test_views.py +++ b/tests/unit/accounts/test_views.py @@ -3493,6 +3493,7 @@ def test_add_pending_github_oidc_publisher_already_exists(self, monkeypatch): repository=pretend.stub(data="some-repo"), normalized_owner="some-owner", workflow_filename=pretend.stub(data="some-workflow.yml"), + normalized_environment="some-environment", ) pending_github_publisher_form_cls = pretend.call_recorder( lambda *a, **kw: pending_github_publisher_form_obj @@ -3575,6 +3576,7 @@ def test_add_pending_github_oidc_publisher(self, monkeypatch): publisher_name="some-publisher", id=uuid.uuid4(), publisher_url="some-url", + environment="some-environment", ) # NOTE: Can't set __str__ using pretend.stub() monkeypatch.setattr( @@ -3591,6 +3593,7 @@ def test_add_pending_github_oidc_publisher(self, monkeypatch): normalized_owner="some-owner", owner_id="some-owner-id", workflow_filename=pretend.stub(data="some-workflow.yml"), + normalized_environment=pending_publisher.environment, ) pending_github_publisher_form_cls = pretend.call_recorder( lambda *a, **kw: pending_github_publisher_form_obj @@ -3635,6 +3638,7 @@ def test_add_pending_github_oidc_publisher(self, monkeypatch): repository_owner="some-owner", repository_owner_id="some-owner-id", workflow_filename="some-workflow.yml", + environment="some-environment", ) ] assert request.db.add.calls == [pretend.call(pending_publisher)] diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -6099,6 +6099,7 @@ def test_add_github_oidc_publisher_preexisting(self, monkeypatch): owner_id="1234", workflow_filename="fakeworkflow.yml", publisher_url="some-url", + environment="some-environment", ) # NOTE: Can't set __str__ using pretend.stub() monkeypatch.setattr(publisher.__class__, "__str__", lambda s: "fakespecifier") @@ -6142,6 +6143,7 @@ def test_add_github_oidc_publisher_preexisting(self, monkeypatch): repository=pretend.stub(data=publisher.repository_name), normalized_owner=publisher.owner, workflow_filename=pretend.stub(data=publisher.workflow_filename), + normalized_environment=publisher.environment, ) github_publisher_form_cls = pretend.call_recorder( lambda *a, **kw: github_publisher_form_obj @@ -6230,6 +6232,7 @@ def test_add_github_oidc_publisher_created(self, monkeypatch): normalized_owner="fakeowner", owner_id="1234", workflow_filename=pretend.stub(data="fakeworkflow.yml"), + normalized_environment="some-environment", ) github_publisher_form_cls = pretend.call_recorder( lambda *a, **kw: github_publisher_form_obj @@ -6303,6 +6306,7 @@ def test_add_github_oidc_publisher_already_registered_with_project( owner="fakeowner", owner_id="1234", workflow_filename="fakeworkflow.yml", + environment="some-environment", ) # NOTE: Can't set __str__ using pretend.stub() monkeypatch.setattr(publisher.__class__, "__str__", lambda s: "fakespecifier") @@ -6341,6 +6345,7 @@ def test_add_github_oidc_publisher_already_registered_with_project( repository=pretend.stub(data=publisher.repository_name), normalized_owner=publisher.owner, workflow_filename=pretend.stub(data=publisher.workflow_filename), + normalized_environment=publisher.environment, ) github_publisher_form_cls = pretend.call_recorder( lambda *a, **kw: github_publisher_form_obj diff --git a/tests/unit/oidc/test_forms.py b/tests/unit/oidc/test_forms.py --- a/tests/unit/oidc/test_forms.py +++ b/tests/unit/oidc/test_forms.py @@ -309,3 +309,16 @@ def test_validate_workflow_filename(self, workflow_filename): with pytest.raises(wtforms.validators.ValidationError): form.validate_workflow_filename(field) + + @pytest.mark.parametrize( + "data, expected", + [ + ("wu-tang", "wu-tang"), + ("WU-TANG", "wu-tang"), + ("", ""), + (None, None), + ], + ) + def test_normalized_environment(self, data, expected): + form = forms.GitHubPublisherForm(api_token=pretend.stub(), environment=data) + assert form.normalized_environment == expected diff --git a/tests/unit/oidc/test_models.py b/tests/unit/oidc/test_models.py --- a/tests/unit/oidc/test_models.py +++ b/tests/unit/oidc/test_models.py @@ -45,6 +45,7 @@ def test_github_publisher_all_known_claims(self): "repository_owner", "repository_owner_id", "job_workflow_ref", + "environment", # preverified claims "iss", "iat", @@ -80,6 +81,7 @@ def test_github_publisher_computed_properties(self): repository_owner="fakeowner", repository_owner_id="fakeid", workflow_filename="fakeworkflow.yml", + environment="fakeenv", ) for claim_name in publisher.__verifiable_claims__.keys(): @@ -140,12 +142,14 @@ def test_github_publisher_missing_claims(self, monkeypatch): pretend.call("JWT for GitHubPublisher is missing claim: sub") ] - def test_github_publisher_verifies(self, monkeypatch): + @pytest.mark.parametrize("environment", [None, "some-environment"]) + def test_github_publisher_verifies(self, monkeypatch, environment): publisher = models.GitHubPublisher( repository_name="fakerepo", repository_owner="fakeowner", repository_owner_id="fakeid", workflow_filename="fakeworkflow.yml", + environment=environment, ) noop_check = pretend.call_recorder(lambda gt, sc, ac: True) @@ -234,6 +238,21 @@ def test_github_publisher_sub_claim(self, truth, claim, valid): check = models.GitHubPublisher.__verifiable_claims__["sub"] assert check(truth, claim, pretend.stub()) is valid + @pytest.mark.parametrize( + ("truth", "claim", "valid"), + [ + (None, None, True), + (None, "some-environment", True), + ("some-environment", "some-environment", True), + ("some-environment", "sOmE-eNvIrOnMeNt", True), + ("some-environment", None, False), + ("some-environment", "some-other-environment", False), + ], + ) + def test_github_publisher_environment_claim(self, truth, claim, valid): + check = models.GitHubPublisher.__verifiable_claims__["environment"] + assert check(truth, claim, pretend.stub()) is valid + class TestPendingGitHubPublisher: def test_reify_does_not_exist_yet(self, db_request): @@ -245,6 +264,7 @@ def test_reify_does_not_exist_yet(self, db_request): repository_owner=pending_publisher.repository_owner, repository_owner_id=pending_publisher.repository_owner_id, workflow_filename=pending_publisher.workflow_filename, + environment=pending_publisher.environment, ) .one_or_none() is None @@ -259,6 +279,7 @@ def test_reify_does_not_exist_yet(self, db_request): assert publisher.repository_owner == pending_publisher.repository_owner assert publisher.repository_owner_id == pending_publisher.repository_owner_id assert publisher.workflow_filename == pending_publisher.workflow_filename + assert publisher.environment == pending_publisher.environment def test_reify_already_exists(self, db_request): existing_publisher = GitHubPublisherFactory.create() @@ -267,6 +288,7 @@ def test_reify_already_exists(self, db_request): repository_owner=existing_publisher.repository_owner, repository_owner_id=existing_publisher.repository_owner_id, workflow_filename=existing_publisher.workflow_filename, + environment=existing_publisher.environment, ) publisher = pending_publisher.reify(db_request.db)
[OIDC] Introduce additional (optional) trust constraints Summarized from a private discussion on the OIDC beta. At the moment, the OIDC trust relationship between GitHub and PyPI is defined by three variables: the GitHub org name, the repo name, the workflow within the repository. If PyPI receives an OIDC token that satisfies these three variables, it considers that token valid for exchange/minting purposes. This is sufficient for many security models, but may not be sufficient for projects with maintainers who can commit and/or make branches but who should **not** be able to make releases to PyPI. An example "malicious maintainer" scenario: * `foo/bar @ release.yml` is the trusted OIDC configuration, with `release.yml` configured to trigger on `release: published` events; * Mallory has commit and branch access to `foo/bar`, but not release or environment modification access; * Mallory creates a `malicious` branch on `foo/bar` and modifies `release.yml` to accept the `workflow_dispatch` trigger; * Mallory manually dispatches `release.yml` using the GitHub API or Web UI, resulting in her ability to publish to PyPI without ordinary release permissions. Critically, this can happen even when `release.yml` constrains the publishing job to a specific environment (e.g. `environment: release`), since Mallory can simply strip the release off of the workflow before triggering it. To prevent this, we need to introduce additional trust constraints: if the PyPI OIDC configuration includes the GitHub Actions environment name, Mallory won't be able to strip it off. From there, the environment can enable "manual approval," meaning that Mallory won't be able to trigger it unless she's explicitly trusted as a workflow runner by whoever configured OIDC publishing (i.e., a project owner on PyPI). CC @di @steiza
2023-03-22T21:18:46Z
[]
[]
pypi/warehouse
13,368
pypi__warehouse-13368
[ "13364" ]
3c0599e5038570afaa6ad3ff385410af3e96f8cc
diff --git a/warehouse/email/__init__.py b/warehouse/email/__init__.py --- a/warehouse/email/__init__.py +++ b/warehouse/email/__init__.py @@ -977,8 +977,8 @@ def send_recovery_code_reminder_email(request, user): return {"username": user.username} -@_email("oidc-publisher-added") -def send_oidc_publisher_added_email(request, user, project_name, publisher): +@_email("trusted-publisher-added") +def send_trusted_publisher_added_email(request, user, project_name, publisher): # We use the request's user, since they're the one triggering the action. return { "username": request.user.username, @@ -988,8 +988,8 @@ def send_oidc_publisher_added_email(request, user, project_name, publisher): } -@_email("oidc-publisher-removed") -def send_oidc_publisher_removed_email(request, user, project_name, publisher): +@_email("trusted-publisher-removed") +def send_trusted_publisher_removed_email(request, user, project_name, publisher): # We use the request's user, since they're the one triggering the action. return { "username": request.user.username, @@ -999,8 +999,8 @@ def send_oidc_publisher_removed_email(request, user, project_name, publisher): } -@_email("pending-oidc-publisher-invalidated") -def send_pending_oidc_publisher_invalidated_email(request, user, project_name): +@_email("pending-trusted-publisher-invalidated") +def send_pending_trusted_publisher_invalidated_email(request, user, project_name): return { "project_name": project_name, } diff --git a/warehouse/manage/views/__init__.py b/warehouse/manage/views/__init__.py --- a/warehouse/manage/views/__init__.py +++ b/warehouse/manage/views/__init__.py @@ -49,8 +49,6 @@ send_collaborator_removed_email, send_collaborator_role_changed_email, send_email_verification_email, - send_oidc_publisher_added_email, - send_oidc_publisher_removed_email, send_password_change_email, send_primary_email_change_email, send_project_role_verification_email, @@ -61,6 +59,8 @@ send_removed_project_release_file_email, send_role_changed_as_collaborator_email, send_team_collaborator_added_email, + send_trusted_publisher_added_email, + send_trusted_publisher_removed_email, send_two_factor_added_email, send_two_factor_removed_email, send_unyanked_project_release_email, @@ -1286,7 +1286,7 @@ def add_github_oidc_publisher(self): return response for user in self.project.users: - send_oidc_publisher_added_email( + send_trusted_publisher_added_email( self.request, user, project_name=self.project.name, @@ -1355,7 +1355,7 @@ def delete_oidc_publisher(self): return self.default_response for user in self.project.users: - send_oidc_publisher_removed_email( + send_trusted_publisher_removed_email( self.request, user, project_name=self.project.name, diff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py --- a/warehouse/oidc/views.py +++ b/warehouse/oidc/views.py @@ -20,7 +20,7 @@ from sqlalchemy import func from warehouse.admin.flags import AdminFlagValue -from warehouse.email import send_pending_oidc_publisher_invalidated_email +from warehouse.email import send_pending_trusted_publisher_invalidated_email from warehouse.events.tags import EventTag from warehouse.macaroons import caveats from warehouse.macaroons.interfaces import IMacaroonService @@ -156,7 +156,7 @@ def _invalid(errors): .all() ) for stale_publisher in stale_pending_publishers: - send_pending_oidc_publisher_invalidated_email( + send_pending_trusted_publisher_invalidated_email( request, stale_publisher.added_by, project_name=stale_publisher.project_name,
diff --git a/tests/unit/email/test_init.py b/tests/unit/email/test_init.py --- a/tests/unit/email/test_init.py +++ b/tests/unit/email/test_init.py @@ -5511,17 +5511,17 @@ def test_recovery_code_emails( ] -class TestOIDCPublisherEmails: +class TestTrustedPublisherEmails: @pytest.mark.parametrize( "fn, template_name", [ ( - email.send_pending_oidc_publisher_invalidated_email, - "pending-oidc-publisher-invalidated", + email.send_pending_trusted_publisher_invalidated_email, + "pending-trusted-publisher-invalidated", ), ], ) - def test_pending_oidc_publisher_emails( + def test_pending_trusted_publisher_emails( self, pyramid_request, pyramid_config, monkeypatch, fn, template_name ): stub_user = pretend.stub( @@ -5601,11 +5601,11 @@ def test_pending_oidc_publisher_emails( @pytest.mark.parametrize( "fn, template_name", [ - (email.send_oidc_publisher_added_email, "oidc-publisher-added"), - (email.send_oidc_publisher_removed_email, "oidc-publisher-removed"), + (email.send_trusted_publisher_added_email, "trusted-publisher-added"), + (email.send_trusted_publisher_removed_email, "trusted-publisher-removed"), ], ) - def test_oidc_publisher_emails( + def test_trusted_publisher_emails( self, pyramid_request, pyramid_config, monkeypatch, fn, template_name ): stub_user = pretend.stub( diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -6244,7 +6244,7 @@ def test_add_github_oidc_publisher_created(self, monkeypatch): monkeypatch.setattr(views, "GitHubPublisherForm", github_publisher_form_cls) monkeypatch.setattr( views, - "send_oidc_publisher_added_email", + "send_trusted_publisher_added_email", pretend.call_recorder(lambda *a, **kw: None), ) @@ -6282,7 +6282,7 @@ def test_add_github_oidc_publisher_created(self, monkeypatch): ] assert request.db.add.calls == [pretend.call(project.oidc_publishers[0])] assert github_publisher_form_obj.validate.calls == [pretend.call()] - assert views.send_oidc_publisher_added_email.calls == [ + assert views.send_trusted_publisher_added_email.calls == [ pretend.call( request, fakeuser, @@ -6563,7 +6563,7 @@ def test_delete_oidc_publisher_registered_to_multiple_projects(self, monkeypatch monkeypatch.setattr(views, "DeletePublisherForm", delete_publisher_form_cls) monkeypatch.setattr( views, - "send_oidc_publisher_removed_email", + "send_trusted_publisher_removed_email", pretend.call_recorder(lambda *a, **kw: None), ) @@ -6610,7 +6610,7 @@ def test_delete_oidc_publisher_registered_to_multiple_projects(self, monkeypatch assert delete_publisher_form_cls.calls == [pretend.call(request.POST)] assert delete_publisher_form_obj.validate.calls == [pretend.call()] - assert views.send_oidc_publisher_removed_email.calls == [ + assert views.send_trusted_publisher_removed_email.calls == [ pretend.call( request, fakeuser, project_name="fakeproject", publisher=publisher ) @@ -6664,7 +6664,7 @@ def test_delete_oidc_publisher_entirely(self, monkeypatch): monkeypatch.setattr(views, "DeletePublisherForm", delete_publisher_form_cls) monkeypatch.setattr( views, - "send_oidc_publisher_removed_email", + "send_trusted_publisher_removed_email", pretend.call_recorder(lambda *a, **kw: None), ) @@ -6709,7 +6709,7 @@ def test_delete_oidc_publisher_entirely(self, monkeypatch): assert delete_publisher_form_cls.calls == [pretend.call(request.POST)] assert delete_publisher_form_obj.validate.calls == [pretend.call()] - assert views.send_oidc_publisher_removed_email.calls == [ + assert views.send_trusted_publisher_removed_email.calls == [ pretend.call( request, fakeuser, project_name="fakeproject", publisher=publisher ) diff --git a/tests/unit/oidc/test_views.py b/tests/unit/oidc/test_views.py --- a/tests/unit/oidc/test_views.py +++ b/tests/unit/oidc/test_views.py @@ -136,7 +136,7 @@ def body(self): assert isinstance(err["description"], str) -def test_mint_token_from_oidc_publisher_verify_jwt_signature_fails(): +def test_mint_token_from_trusted_publisher_verify_jwt_signature_fails(): oidc_service = pretend.stub( verify_jwt_signature=pretend.call_recorder(lambda token: None), ) @@ -166,7 +166,7 @@ def test_mint_token_from_oidc_publisher_verify_jwt_signature_fails(): assert oidc_service.verify_jwt_signature.calls == [pretend.call("faketoken")] -def test_mint_token_from_oidc_publisher_lookup_fails(): +def test_mint_token_from_trusted_publisher_lookup_fails(): claims = pretend.stub() oidc_service = pretend.stub( verify_jwt_signature=pretend.call_recorder(lambda token: claims), @@ -292,7 +292,7 @@ def test_mint_token_from_oidc_pending_publisher_ok( ] -def test_mint_token_from_pending_oidc_publisher_invalidates_others( +def test_mint_token_from_pending_trusted_publisher_invalidates_others( monkeypatch, db_request ): time = pretend.stub(time=pretend.call_recorder(lambda: 0)) @@ -320,13 +320,13 @@ def test_mint_token_from_pending_oidc_publisher_invalidates_others( ) emailed_users.append(user) - send_pending_oidc_publisher_invalidated_email = pretend.call_recorder( + send_pending_trusted_publisher_invalidated_email = pretend.call_recorder( lambda *a, **kw: None ) monkeypatch.setattr( views, - "send_pending_oidc_publisher_invalidated_email", - send_pending_oidc_publisher_invalidated_email, + "send_pending_trusted_publisher_invalidated_email", + send_pending_trusted_publisher_invalidated_email, ) db_request.registry.settings = {"warehouse.oidc.enabled": True} @@ -365,7 +365,7 @@ def test_mint_token_from_pending_oidc_publisher_invalidates_others( # We should have sent one invalidation email for each pending publisher that # was invalidated by the minting operation. - assert send_pending_oidc_publisher_invalidated_email.calls == [ + assert send_pending_trusted_publisher_invalidated_email.calls == [ pretend.call(db_request, emailed_users[0], project_name="does_not_exist"), pretend.call(db_request, emailed_users[1], project_name="does-not-exist"), pretend.call(db_request, emailed_users[2], project_name="dOeS-NoT-ExISt"),
[OIDC] Replace (most) user-facing references to "OIDC" with "Trusted publish{ers,ing}" This will more accurately reflect the terminology used in the PyPI docs and other public-facing resources. @webknjaz pointed out that the email templates, specifically, need updating.
@hugovk pointed out a few other places that need updating: * https://pypi.org/help/#openid-connect * https://pypi.org/manage/account/publishing/ * https://pypi.org/manage/project/my-project-name/settings/publishing/
2023-04-05T19:36:39Z
[]
[]
pypi/warehouse
13,395
pypi__warehouse-13395
[ "12432" ]
f29cda4f0b0e402c8c36ab2058520cdae58dddbd
diff --git a/warehouse/api/billing.py b/warehouse/api/billing.py --- a/warehouse/api/billing.py +++ b/warehouse/api/billing.py @@ -12,7 +12,7 @@ import stripe -from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent, HTTPNotFound +from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent from pyramid.view import view_config from warehouse.subscriptions.interfaces import IBillingService, ISubscriptionService @@ -76,8 +76,6 @@ def handle_billing_webhook_event(request, event): subscription_service.update_subscription_status( id, StripeSubscriptionStatus.Canceled ) - else: - raise HTTPNotFound("Subscription not found") # Occurs whenever a subscription changes e.g. status changes. case "customer.subscription.updated": subscription = event["data"]["object"] @@ -94,8 +92,6 @@ def handle_billing_webhook_event(request, event): if id := subscription_service.find_subscriptionid(subscription_id): # Update subscription status. subscription_service.update_subscription_status(id, status) - else: - raise HTTPNotFound("Subscription not found") # Occurs whenever a customer is deleted. case "customer.deleted": customer = event["data"]["object"] @@ -105,8 +101,6 @@ def handle_billing_webhook_event(request, event): if subscription_service.get_subscriptions_by_customer(customer_id): # Delete the customer and all associated subscription data subscription_service.delete_customer(customer_id) - else: - raise HTTPNotFound("Customer subscription data not found") # Occurs whenever a customer is updated. case "customer.updated": customer = event["data"]["object"] diff --git a/warehouse/manage/views/organizations.py b/warehouse/manage/views/organizations.py --- a/warehouse/manage/views/organizations.py +++ b/warehouse/manage/views/organizations.py @@ -341,6 +341,14 @@ def save_organization(self): previous_organization_description=previous_organization_description, previous_organization_orgtype=previous_organization_orgtype, ) + if self.organization.customer is not None: + self.billing_service.update_customer( + self.organization.customer.customer_id, + self.organization.customer_name( + self.request.registry.settings["site.name"] + ), + self.organization.description, + ) self.request.session.flash("Organization details updated", queue="success") @@ -485,10 +493,8 @@ def __init__(self, organization, request): def customer_id(self): if self.organization.customer is None: customer = self.billing_service.create_customer( - name=( + name=self.organization.customer_name( self.request.registry.settings["site.name"] - + " Organization - " - + self.organization.name ), description=self.organization.description, ) diff --git a/warehouse/mock/billing.py b/warehouse/mock/billing.py --- a/warehouse/mock/billing.py +++ b/warehouse/mock/billing.py @@ -18,7 +18,6 @@ from pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther from pyramid.view import view_config, view_defaults -from warehouse.admin.flags import AdminFlagValue from warehouse.api.billing import handle_billing_webhook_event from warehouse.organizations.models import Organization from warehouse.subscriptions.interfaces import IBillingService @@ -36,9 +35,9 @@ class MockBillingViews: def __init__(self, organization, request): billing_service = request.find_service(IBillingService, context=None) - if request.flags.enabled( - AdminFlagValue.DISABLE_ORGANIZATIONS - ) or not isinstance(billing_service, MockStripeBillingService): + if not request.organization_access or not isinstance( + billing_service, MockStripeBillingService + ): raise HTTPNotFound self.organization = organization self.request = request diff --git a/warehouse/organizations/models.py b/warehouse/organizations/models.py --- a/warehouse/organizations/models.py +++ b/warehouse/organizations/models.py @@ -417,6 +417,9 @@ def active_subscription(self): else: return None + def customer_name(self, site_name="PyPI"): + return f"{site_name} Organization - {self.display_name} ({self.name})" + class OrganizationNameCatalog(db.Model): diff --git a/warehouse/subscriptions/interfaces.py b/warehouse/subscriptions/interfaces.py --- a/warehouse/subscriptions/interfaces.py +++ b/warehouse/subscriptions/interfaces.py @@ -34,6 +34,11 @@ def create_customer(name, description): Create the Customer resource via Billing API with the given name and description """ + def update_customer(customer_id, name, description): + """ + Update a Customer resource via Billing API with the given name and description + """ + def create_checkout_session(customer_id, price_ids, success_url, cancel_url): """ # Create new Checkout Session for the order diff --git a/warehouse/subscriptions/services.py b/warehouse/subscriptions/services.py --- a/warehouse/subscriptions/services.py +++ b/warehouse/subscriptions/services.py @@ -78,6 +78,13 @@ def create_customer(self, name, description): description=description, ) + def update_customer(self, customer_id, name, description): + return self.api.Customer.modify( + customer_id, + name=name, + description=description, + ) + def create_checkout_session(self, customer_id, price_ids, success_url, cancel_url): """ # Create new Checkout Session for the order
diff --git a/tests/unit/api/test_billing.py b/tests/unit/api/test_billing.py --- a/tests/unit/api/test_billing.py +++ b/tests/unit/api/test_billing.py @@ -16,7 +16,7 @@ import pytest import stripe -from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent, HTTPNotFound +from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent from warehouse.api import billing @@ -238,7 +238,7 @@ def test_handle_billing_webhook_event_subscription_deleted_update( billing.handle_billing_webhook_event(db_request, event) - def test_handle_billing_webhook_event_subscription_deleted_not_found( + def test_handle_billing_webhook_event_subscription_deleted( self, db_request, subscription_service ): organization = OrganizationFactory.create() @@ -258,8 +258,7 @@ def test_handle_billing_webhook_event_subscription_deleted_not_found( }, } - with pytest.raises(HTTPNotFound): - billing.handle_billing_webhook_event(db_request, event) + billing.handle_billing_webhook_event(db_request, event) def test_handle_billing_webhook_event_subscription_deleted_invalid_status( self, db_request @@ -359,8 +358,7 @@ def test_handle_billing_webhook_event_subscription_updated_not_found( }, } - with pytest.raises(HTTPNotFound): - billing.handle_billing_webhook_event(db_request, event) + billing.handle_billing_webhook_event(db_request, event) def test_handle_billing_webhook_event_subscription_updated_no_change( self, db_request @@ -478,8 +476,7 @@ def test_handle_billing_webhook_event_customer_deleted_no_subscriptions( }, } - with pytest.raises(HTTPNotFound): - billing.handle_billing_webhook_event(db_request, event) + billing.handle_billing_webhook_event(db_request, event) def test_handle_billing_webhook_event_customer_deleted_invalid_customer( self, db_request diff --git a/tests/unit/manage/views/test_organizations.py b/tests/unit/manage/views/test_organizations.py --- a/tests/unit/manage/views/test_organizations.py +++ b/tests/unit/manage/views/test_organizations.py @@ -553,17 +553,28 @@ def test_manage_organization( ), ] - @pytest.mark.parametrize("orgtype", list(OrganizationType)) + @pytest.mark.parametrize( + ["orgtype", "has_customer"], + [(orgtype, True) for orgtype in list(OrganizationType)] + + [(orgtype, False) for orgtype in list(OrganizationType)], + ) def test_save_organization( self, db_request, pyramid_user, orgtype, + has_customer, + billing_service, organization_service, enable_organizations, monkeypatch, ): organization = OrganizationFactory.create(orgtype=orgtype) + customer = StripeCustomerFactory.create() + if has_customer: + OrganizationStripeCustomerFactory.create( + organization=organization, customer=customer + ) db_request.POST = { "display_name": organization.display_name, "link_url": organization.link_url, @@ -571,11 +582,18 @@ def test_save_organization( "orgtype": organization.orgtype, } + db_request.registry.settings["site.name"] = "PiePeaEye" + monkeypatch.setattr( organization_service, "update_organization", pretend.call_recorder(lambda *a, **kw: None), ) + monkeypatch.setattr( + billing_service, + "update_customer", + pretend.call_recorder(lambda stripe_customer_id, name, description: None), + ) save_organization_obj = pretend.stub( validate=lambda: True, data=db_request.POST @@ -598,6 +616,20 @@ def test_save_organization( assert organization_service.update_organization.calls == [ pretend.call(organization.id, **db_request.POST) ] + assert billing_service.update_customer.calls == ( + [ + pretend.call( + customer.customer_id, + ( + f"PiePeaEye Organization - {organization.display_name} " + f"({organization.name})" + ), + organization.description, + ) + ] + if has_customer + else [] + ) assert send_email.calls == [ pretend.call( db_request, diff --git a/tests/unit/mock/test_billing.py b/tests/unit/mock/test_billing.py --- a/tests/unit/mock/test_billing.py +++ b/tests/unit/mock/test_billing.py @@ -25,6 +25,7 @@ def organization(self): return OrganizationFactory.create() def test_disable_organizations(self, db_request, organization): + db_request.organization_access = False with pytest.raises(HTTPNotFound): billing.MockBillingViews(organization, db_request) diff --git a/tests/unit/organizations/test_models.py b/tests/unit/organizations/test_models.py --- a/tests/unit/organizations/test_models.py +++ b/tests/unit/organizations/test_models.py @@ -66,6 +66,19 @@ def test_traversal_cant_find(self, db_request): class TestOrganization: + def test_customer_name(self, db_session): + organization = DBOrganizationFactory.create( + name="pypi", display_name="The Python Package Index" + ) + assert ( + organization.customer_name() + == "PyPI Organization - The Python Package Index (pypi)" + ) + assert ( + organization.customer_name("Test PyPI") + == "Test PyPI Organization - The Python Package Index (pypi)" + ) + def test_acl(self, db_session): organization = DBOrganizationFactory.create() owner1 = DBOrganizationRoleFactory.create(organization=organization) diff --git a/tests/unit/subscriptions/test_services.py b/tests/unit/subscriptions/test_services.py --- a/tests/unit/subscriptions/test_services.py +++ b/tests/unit/subscriptions/test_services.py @@ -153,6 +153,26 @@ def test_create_customer(self, billing_service, organization_service): assert customer is not None assert customer["id"] + def test_update_customer(self, billing_service, organization_service): + organization = OrganizationFactory.create() + + customer = billing_service.create_customer( + name=organization.name, + description=organization.description, + ) + + assert customer is not None + assert customer["name"] == organization.name + + customer = billing_service.update_customer( + customer_id=customer["id"], + name="wutangClan", + description=organization.description, + ) + + assert customer is not None + assert customer["name"] == "wutangClan" + def test_create_checkout_session(self, billing_service, subscription_service): subscription_price = StripeSubscriptionPriceFactory.create() success_url = "http://what.ever"
Deleting an organization should add a warning When deleting an organization, a warning should let the owner know to download invoices/receipts from Stripe. It should also indicate what will happen to the subscription plan - whether the plan will be cancelled automatically or if the user has to cancel the plan before deleting the organization.
Seems we have two chances to communicate this, one in the "danger box" ![Image](https://user-images.githubusercontent.com/1200832/199565593-1cf42267-5528-43cd-8c71-34841fe4f2a3.png) and one in the confirm modal ![Image](https://user-images.githubusercontent.com/1200832/199565610-eb622cd8-c530-40ed-b4d9-a6f49a442e37.png) I'm not sure how hard it is to add text to the modal, but if its possible I think this would be good to do for _both_. Priority - high
2023-04-08T16:02:52Z
[]
[]
pypi/warehouse
13,460
pypi__warehouse-13460
[ "12965" ]
7340873aa5be3da62c4d3cfba273decd090cdd9e
diff --git a/docs/user/main.py b/docs/user/main.py --- a/docs/user/main.py +++ b/docs/user/main.py @@ -1,20 +1,6 @@ from pathlib import Path -OIDC_PUBLISHING = """ -!!! info - - Trusted publishing functionality is currently in closed beta. - - You can request access to the closed beta using - [this form](https://forms.gle/XUsRT8KTKy66TuUp7). - - **NOTE**: Access to the beta is provided on a *per-user* basis: users can - register trusted publishers against projects once added to the beta, but - other owners of the project can't modify trusted publisher settings unless - they're *also* in the beta. -""" - ORG_ACCOUNTS = """ !!! info @@ -25,7 +11,7 @@ to be one of the first to know how you can begin using them. """ -PREVIEW_FEATURES = {"oidc-publishing": OIDC_PUBLISHING, "org-accounts": ORG_ACCOUNTS} +PREVIEW_FEATURES = {"org-accounts": ORG_ACCOUNTS} _HERE = Path(__file__).parent.resolve() diff --git a/warehouse/accounts/models.py b/warehouse/accounts/models.py --- a/warehouse/accounts/models.py +++ b/warehouse/accounts/models.py @@ -198,11 +198,6 @@ def can_reset_password(self): ] ) - # XXX: Can be removed once OIDC is removed from beta. - @property - def in_oidc_beta(self): - return self.is_superuser or self.has_oidc_beta_access - def __acl__(self): return [ (Allow, "group:admins", "admin"), diff --git a/warehouse/accounts/views.py b/warehouse/accounts/views.py --- a/warehouse/accounts/views.py +++ b/warehouse/accounts/views.py @@ -24,7 +24,6 @@ HTTPSeeOther, HTTPTooManyRequests, ) -from pyramid.response import Response from pyramid.security import forget, remember from pyramid.view import view_config, view_defaults from sqlalchemy.exc import NoResultFound @@ -1376,9 +1375,6 @@ def manage_publishing(self): if not self.oidc_enabled: raise HTTPNotFound - if not self.request.user.in_oidc_beta: - return Response(status=403) - if self.request.flags.enabled(AdminFlagValue.DISALLOW_OIDC): self.request.session.flash( self.request._( @@ -1399,9 +1395,6 @@ def add_pending_github_oidc_publisher(self): if not self.oidc_enabled: raise HTTPNotFound - if not self.request.user.in_oidc_beta: - return Response(status=403) - if self.request.flags.enabled(AdminFlagValue.DISALLOW_OIDC): self.request.session.flash( self.request._( @@ -1536,9 +1529,6 @@ def delete_pending_oidc_publisher(self): if not self.oidc_enabled: raise HTTPNotFound - if not self.request.user.in_oidc_beta: - return Response(status=403) - if self.request.flags.enabled(AdminFlagValue.DISALLOW_OIDC): self.request.session.flash( self.request._( diff --git a/warehouse/admin/views/users.py b/warehouse/admin/views/users.py --- a/warehouse/admin/views/users.py +++ b/warehouse/admin/views/users.py @@ -92,7 +92,6 @@ class UserForm(forms.Form): is_superuser = wtforms.fields.BooleanField() is_moderator = wtforms.fields.BooleanField() is_psf_staff = wtforms.fields.BooleanField() - has_oidc_beta_access = wtforms.fields.BooleanField() prohibit_password_reset = wtforms.fields.BooleanField() hide_avatar = wtforms.fields.BooleanField() diff --git a/warehouse/manage/views/__init__.py b/warehouse/manage/views/__init__.py --- a/warehouse/manage/views/__init__.py +++ b/warehouse/manage/views/__init__.py @@ -1203,9 +1203,6 @@ def manage_project_oidc_publishers(self): if not self.oidc_enabled: raise HTTPNotFound - if not self.request.user.in_oidc_beta and not self.project.oidc_publishers: - return Response(status=403) - if self.request.flags.enabled(AdminFlagValue.DISALLOW_OIDC): self.request.session.flash( self.request._( @@ -1225,9 +1222,6 @@ def add_github_oidc_publisher(self): if not self.oidc_enabled: raise HTTPNotFound - if not self.request.user.in_oidc_beta: - return Response(status=403) - if self.request.flags.enabled(AdminFlagValue.DISALLOW_OIDC): self.request.session.flash( self.request._( @@ -1344,9 +1338,6 @@ def delete_oidc_publisher(self): if not self.oidc_enabled: raise HTTPNotFound - if not self.request.user.in_oidc_beta: - return Response(status=403) - if self.request.flags.enabled(AdminFlagValue.DISALLOW_OIDC): self.request.session.flash( (
diff --git a/tests/unit/accounts/test_models.py b/tests/unit/accounts/test_models.py --- a/tests/unit/accounts/test_models.py +++ b/tests/unit/accounts/test_models.py @@ -162,20 +162,3 @@ def test_acl(self, db_session): ("Allow", "group:admins", "admin"), ("Allow", "group:moderators", "moderator"), ] - - @pytest.mark.parametrize( - ("is_superuser", "has_oidc_beta_access", "in_oidc_beta"), - [ - (False, False, False), - (True, False, True), - (True, True, True), - (False, True, True), - ], - ) - def test_in_oidc_beta( - self, db_session, is_superuser, has_oidc_beta_access, in_oidc_beta - ): - user = DBUserFactory.create( - is_superuser=is_superuser, has_oidc_beta_access=has_oidc_beta_access - ) - assert user.in_oidc_beta == in_oidc_beta diff --git a/tests/unit/accounts/test_views.py b/tests/unit/accounts/test_views.py --- a/tests/unit/accounts/test_views.py +++ b/tests/unit/accounts/test_views.py @@ -25,7 +25,6 @@ HTTPSeeOther, HTTPTooManyRequests, ) -from pyramid.response import Response from sqlalchemy.exc import NoResultFound from webauthn.authentication.verify_authentication_response import ( VerifiedAuthentication, @@ -3003,9 +3002,7 @@ def find_service(iface, name=None, context=None): def test_manage_publishing(self, monkeypatch): metrics = pretend.stub() request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -3059,23 +3056,8 @@ def test_manage_publishing_oidc_disabled(self): with pytest.raises(HTTPNotFound): view.manage_publishing() - def test_manage_publishing_not_in_beta(self): - request = pretend.stub( - user=pretend.stub(in_oidc_beta=False), - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - find_service=lambda *a, **kw: None, - ) - - view = views.ManageAccountPublishingViews(request) - resp = view.manage_publishing() - - assert isinstance(resp, Response) - assert resp.status_code == 403 - def test_manage_publishing_admin_disabled(self, monkeypatch, pyramid_request): - pyramid_request.user = pretend.stub( - in_oidc_beta=True, - ) + pyramid_request.user = pretend.stub() pyramid_request.registry = pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -3139,25 +3121,10 @@ def test_add_pending_github_oidc_publisher_oidc_disabled(self): with pytest.raises(HTTPNotFound): view.add_pending_github_oidc_publisher() - def test_add_pending_github_oidc_publisher_not_in_beta(self): - request = pretend.stub( - user=pretend.stub(in_oidc_beta=False), - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - find_service=lambda *a, **kw: None, - ) - - view = views.ManageAccountPublishingViews(request) - resp = view.add_pending_github_oidc_publisher() - - assert isinstance(resp, Response) - assert resp.status_code == 403 - def test_add_pending_github_oidc_publisher_admin_disabled( self, monkeypatch, pyramid_request ): - pyramid_request.user = pretend.stub( - in_oidc_beta=True, - ) + pyramid_request.user = pretend.stub() pyramid_request.registry = pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -3220,7 +3187,7 @@ def test_add_pending_github_oidc_publisher_user_cannot_register( } ) pyramid_request.user = pretend.stub( - has_primary_verified_email=False, in_oidc_beta=True + has_primary_verified_email=False, ) pyramid_request.flags = pretend.stub( enabled=pretend.call_recorder(lambda f: False) @@ -3278,7 +3245,7 @@ def test_add_pending_github_oidc_publisher_user_cannot_register( def test_add_pending_github_oidc_publisher_too_many_already( self, monkeypatch, db_request ): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() EmailFactory(user=db_request.user, verified=True, primary=True) for i in range(3): pending_publisher = PendingGitHubPublisher( @@ -3342,7 +3309,6 @@ def test_add_pending_github_oidc_publisher_ratelimited( self, monkeypatch, pyramid_request ): pyramid_request.user = pretend.stub( - in_oidc_beta=True, has_primary_verified_email=True, pending_oidc_publishers=[], ) @@ -3397,7 +3363,6 @@ def test_add_pending_github_oidc_publisher_invalid_form( self, monkeypatch, pyramid_request ): pyramid_request.user = pretend.stub( - in_oidc_beta=True, has_primary_verified_email=True, pending_oidc_publishers=[], ) @@ -3460,7 +3425,7 @@ def test_add_pending_github_oidc_publisher_invalid_form( def test_add_pending_github_oidc_publisher_already_exists( self, monkeypatch, db_request ): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() EmailFactory(user=db_request.user, verified=True, primary=True) pending_publisher = PendingGitHubPublisher( project_name="some-project-name", @@ -3534,7 +3499,7 @@ def test_add_pending_github_oidc_publisher_already_exists( ] def test_add_pending_github_oidc_publisher(self, monkeypatch, db_request): - db_request.user = UserFactory(has_oidc_beta_access=True) + db_request.user = UserFactory() db_request.user.record_event = pretend.call_recorder(lambda **kw: None) EmailFactory(user=db_request.user, verified=True, primary=True) db_request.registry = pretend.stub( @@ -3628,25 +3593,10 @@ def test_delete_pending_oidc_publisher_oidc_disabled(self): with pytest.raises(HTTPNotFound): view.delete_pending_oidc_publisher() - def test_delete_pending_oidc_publisher_not_in_beta(self): - request = pretend.stub( - user=pretend.stub(in_oidc_beta=False), - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - find_service=lambda *a, **kw: None, - ) - - view = views.ManageAccountPublishingViews(request) - resp = view.delete_pending_oidc_publisher() - - assert isinstance(resp, Response) - assert resp.status_code == 403 - def test_delete_pending_oidc_publisher_admin_disabled( self, monkeypatch, pyramid_request ): - pyramid_request.user = pretend.stub( - in_oidc_beta=True, - ) + pyramid_request.user = pretend.stub() pyramid_request.registry = pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -3702,9 +3652,7 @@ def test_delete_pending_oidc_publisher_admin_disabled( def test_delete_pending_oidc_publisher_invalid_form( self, monkeypatch, pyramid_request ): - pyramid_request.user = pretend.stub( - in_oidc_beta=True, - ) + pyramid_request.user = pretend.stub() pyramid_request.registry = pretend.stub( settings={"warehouse.oidc.enabled": True} ) @@ -3735,7 +3683,7 @@ def test_delete_pending_oidc_publisher_invalid_form( ] def test_delete_pending_oidc_publisher_not_found(self, monkeypatch, db_request): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() pending_publisher = PendingGitHubPublisher( project_name="some-project-name", repository_name="some-repository", @@ -3786,9 +3734,7 @@ def test_delete_pending_oidc_publisher_no_access(self, monkeypatch, db_request): db_request.db.add(pending_publisher) db_request.db.flush() # To get the id - db_request.user = pretend.stub( - in_oidc_beta=True, - ) + db_request.user = pretend.stub() db_request.registry = pretend.stub(settings={"warehouse.oidc.enabled": True}) db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda f: False)) db_request.session = pretend.stub( @@ -3816,7 +3762,7 @@ def test_delete_pending_oidc_publisher_no_access(self, monkeypatch, db_request): assert db_request.db.query(PendingGitHubPublisher).all() == [pending_publisher] def test_delete_pending_oidc_publisher(self, monkeypatch, db_request): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() pending_publisher = PendingGitHubPublisher( project_name="some-project-name", repository_name="some-repository", diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -5963,9 +5963,7 @@ def find_service(iface, name=None, context=None): def test_manage_project_oidc_publishers(self, monkeypatch): project = pretend.stub(oidc_publishers=[]) request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -6001,9 +5999,7 @@ def test_manage_project_oidc_publishers_admin_disabled( self, monkeypatch, pyramid_request ): project = pretend.stub(oidc_publishers=[]) - pyramid_request.user = pretend.stub( - in_oidc_beta=True, - ) + pyramid_request.user = pretend.stub() pyramid_request.registry = pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -6061,21 +6057,6 @@ def test_manage_project_oidc_publishers_oidc_not_enabled(self): with pytest.raises(HTTPNotFound): view.manage_project_oidc_publishers() - def test_manage_project_oidc_publishers_not_in_beta(self): - project = pretend.stub(oidc_publishers=[]) - request = pretend.stub( - user=pretend.stub( - in_oidc_beta=False, - ), - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - find_service=lambda *a, **kw: None, - ) - - view = views.ManageOIDCPublisherViews(project, request) - resp = view.manage_project_oidc_publishers() - assert isinstance(resp, Response) - assert resp.status_code == 403 - def test_add_github_oidc_publisher_preexisting(self, monkeypatch): publisher = pretend.stub( id="fakeid", @@ -6101,7 +6082,6 @@ def test_add_github_oidc_publisher_preexisting(self, monkeypatch): request = pretend.stub( user=pretend.stub( - in_oidc_beta=True, username="some-user", ), registry=pretend.stub( @@ -6189,7 +6169,6 @@ def test_add_github_oidc_publisher_created(self, monkeypatch): request = pretend.stub( user=pretend.stub( - in_oidc_beta=True, username="some-user", ), registry=pretend.stub( @@ -6284,7 +6263,7 @@ def test_add_github_oidc_publisher_created(self, monkeypatch): def test_add_github_oidc_publisher_already_registered_with_project( self, monkeypatch, db_request ): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() EmailFactory(user=db_request.user, verified=True, primary=True) publisher = GitHubPublisher( repository_name="some-repository", @@ -6363,9 +6342,7 @@ def test_add_github_oidc_publisher_ratelimited(self, monkeypatch): metrics = pretend.stub(increment=pretend.call_recorder(lambda *a, **kw: None)) request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub( settings={ "warehouse.oidc.enabled": True, @@ -6411,27 +6388,10 @@ def test_add_github_oidc_publisher_oidc_not_enabled(self): with pytest.raises(HTTPNotFound): view.add_github_oidc_publisher() - def test_add_github_oidc_publisher_not_in_beta(self): - project = pretend.stub() - request = pretend.stub( - user=pretend.stub( - in_oidc_beta=False, - ), - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - find_service=lambda *a, **kw: None, - ) - - view = views.ManageOIDCPublisherViews(project, request) - resp = view.add_github_oidc_publisher() - assert isinstance(resp, Response) - assert resp.status_code == 403 - def test_add_github_oidc_publisher_admin_disabled(self, monkeypatch): project = pretend.stub() request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), find_service=lambda *a, **kw: None, flags=pretend.stub(enabled=pretend.call_recorder(lambda f: True)), @@ -6460,9 +6420,7 @@ def test_add_github_oidc_publisher_invalid_form(self, monkeypatch): project = pretend.stub() metrics = pretend.stub(increment=pretend.call_recorder(lambda *a, **kw: None)) request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), find_service=lambda *a, **kw: metrics, flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), @@ -6503,7 +6461,7 @@ def test_add_github_oidc_publisher_invalid_form(self, monkeypatch): def test_delete_oidc_publisher_registered_to_multiple_projects( self, monkeypatch, db_request ): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() EmailFactory(user=db_request.user, verified=True, primary=True) publisher = GitHubPublisher( repository_name="some-repository", @@ -6595,7 +6553,7 @@ def test_delete_oidc_publisher_registered_to_multiple_projects( ] def test_delete_oidc_publisher_entirely(self, monkeypatch, db_request): - db_request.user = UserFactory.create(has_oidc_beta_access=True) + db_request.user = UserFactory.create() EmailFactory(user=db_request.user, verified=True, primary=True) publisher = GitHubPublisher( repository_name="some-repository", @@ -6689,9 +6647,7 @@ def test_delete_oidc_publisher_invalid_form(self, monkeypatch): project = pretend.stub(oidc_publishers=[publisher]) metrics = pretend.stub(increment=pretend.call_recorder(lambda *a, **kw: None)) request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), find_service=lambda *a, **kw: metrics, flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), @@ -6742,9 +6698,7 @@ def test_delete_oidc_publisher_not_found(self, monkeypatch, other_publisher): ) metrics = pretend.stub(increment=pretend.call_recorder(lambda *a, **kw: None)) request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), find_service=lambda *a, **kw: metrics, flags=pretend.stub(enabled=pretend.call_recorder(lambda f: False)), @@ -6801,27 +6755,10 @@ def test_delete_oidc_publisher_oidc_not_enabled(self): with pytest.raises(HTTPNotFound): view.delete_oidc_publisher() - def test_add_delete_oidc_publisher_not_in_beta(self): - project = pretend.stub() - request = pretend.stub( - user=pretend.stub( - in_oidc_beta=False, - ), - registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), - find_service=lambda *a, **kw: None, - ) - - view = views.ManageOIDCPublisherViews(project, request) - resp = view.delete_oidc_publisher() - assert isinstance(resp, Response) - assert resp.status_code == 403 - def test_delete_oidc_publisher_admin_disabled(self, monkeypatch): project = pretend.stub() request = pretend.stub( - user=pretend.stub( - in_oidc_beta=True, - ), + user=pretend.stub(), registry=pretend.stub(settings={"warehouse.oidc.enabled": True}), find_service=lambda *a, **kw: None, flags=pretend.stub(enabled=pretend.call_recorder(lambda f: True)),
Closed beta for OIDC publishing Once #12915 is merged, we should be in a good place to start a closed beta for OIDC publishing. I'll use this issue to track adding the feature flags/ACL checks needed for that.
Also needed for a closed beta: admin app changes to allow admins to manage pending/normal OIDC providers. Edit: #13053. CC @cedricvanrompay-datadog and @webknjaz as two early testers. CC @alex as well, since I know you're interested 😉 /register Sign me up! In order to cut down on noise here, anyone interested, please fill out https://forms.gle/XUsRT8KTKy66TuUp7 and we'll be in touch once this is ready.
2023-04-20T17:30:57Z
[]
[]
pypi/warehouse
13,474
pypi__warehouse-13474
[ "13471" ]
9a802ea7ad4b75e1c951b75c8a58f026ca7dd86f
diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -458,6 +458,9 @@ def configure(settings=None): filters.setdefault("is_recent", "warehouse.filters:is_recent") filters.setdefault("canonicalize_name", "packaging.utils:canonicalize_name") filters.setdefault("format_author_email", "warehouse.filters:format_author_email") + filters.setdefault( + "remove_invalid_xml_unicode", "warehouse.filters:remove_invalid_xml_unicode" + ) # We also want to register some global functions for Jinja jglobals = config.get_settings().setdefault("jinja2.globals", {}) diff --git a/warehouse/filters.py b/warehouse/filters.py --- a/warehouse/filters.py +++ b/warehouse/filters.py @@ -188,5 +188,15 @@ def format_author_email(metadata_email: str) -> tuple[str, str]: return author_emails[0][0], author_emails[0][1] +def remove_invalid_xml_unicode(value: str) -> str: + """ + Remove invalid unicode characters from a string. + Useful for XML Templates. + + Ref: https://www.w3.org/TR/REC-xml/#NT-Char + """ + return "".join(c for c in value if ord(c) >= 32) + + def includeme(config): config.add_request_method(_camo_url, name="camo_url")
diff --git a/tests/unit/test_filters.py b/tests/unit/test_filters.py --- a/tests/unit/test_filters.py +++ b/tests/unit/test_filters.py @@ -267,3 +267,20 @@ def test_format_author_email(meta_email, expected_name, expected_email): author_name, author_email = filters.format_author_email(meta_email) assert author_name == expected_name assert author_email == expected_email + + [email protected]( + ("inp", "expected"), + [ + ("foo", "foo"), # no change + (" foo  bar ", " foo bar "), # U+001B : <control> ESCAPE [ESC] + ("foo \x1b bar", "foo bar"), # U+001B : <control> ESCAPE [ESC] + ("foo \x00 bar", "foo bar"), # U+0000 : <control> NULL + ("foo 🐍 bar", "foo 🐍 bar"), # U+1F40D : SNAKE [snake] (emoji) [Python] + ], +) +def test_remove_invalid_xml_unicode(inp, expected): + """ + Test that invalid XML unicode characters are removed. + """ + assert filters.remove_invalid_xml_unicode(inp) == expected
Invalid Unicode character in package name causing parsing error when trying to parse the RSS feeds **Describe the bug** When we tried to parse the [newest packages feed](https://warehouse.pypa.io/api-reference/feeds.html#newest-packages-feed) this morning Python raised an XML parsing exception because there was an invalid Unicode character in the project description of <https://pypi.org/project/dj-test-queries/>. **Expected behavior** The RSS feed to not include invalid Unicode characters. **To Reproduce** Publish a package with invalid Unicode characters in the description and then attempt to parse the newest packages feed. **My Platform** We're using https://pypi.org/project/aiohttp/ to read the feed and https://pypi.org/project/xmltodict/ to dump it to a `dict` for eaiser processing. **Additional context** <!-- Add any other context, links, etc. about the feature here. -->
2023-04-21T22:55:45Z
[]
[]
pypi/warehouse
13,488
pypi__warehouse-13488
[ "13487" ]
fb439afb88a7c7b70726a672a313513fb582e338
diff --git a/warehouse/manage/views/organizations.py b/warehouse/manage/views/organizations.py --- a/warehouse/manage/views/organizations.py +++ b/warehouse/manage/views/organizations.py @@ -558,7 +558,9 @@ def manage_subscription(self): renderer="manage/organization/activate_subscription.html", ) def activate_subscription(self): - return {"organization": self.organization} + # We're not ready for companies to activate their own subscriptions yet. + raise HTTPNotFound() + # return {"organization": self.organization} @view_config(route_name="manage.organization.subscription") def create_or_manage_subscription(self):
diff --git a/tests/unit/manage/views/test_organizations.py b/tests/unit/manage/views/test_organizations.py --- a/tests/unit/manage/views/test_organizations.py +++ b/tests/unit/manage/views/test_organizations.py @@ -1097,9 +1097,14 @@ def test_activate_subscription( enable_organizations, ): view = org_views.ManageOrganizationBillingViews(organization, db_request) - result = view.activate_subscription() - assert result == {"organization": organization} + # We're not ready for companies to activate their own subscriptions yet. + with pytest.raises(HTTPNotFound): + assert view.activate_subscription() + + # result = view.activate_subscription() + + # assert result == {"organization": organization} def test_create_subscription( self,
[UX+bug] UI for non-activated organizations in clickable where it shouldn't be: disabled `[Activate Billing]` can lead to HTTP 500 **Describe the bug** <!-- A clear and concise description the bug --> This is similar to https://github.com/pypi/warehouse/issues/10773. A disabled button is clickable for a non-reviewed/non-approved org and it allows navigating to a "forbidden" page that possibly tracebacks internally. **Expected behavior** <!-- A clear and concise description of what you expected to happen --> 1. The button shouldn't be clickable. 2. The activation page shouldn't render HTTP 500 but rather HTTP 401/403. **To Reproduce** <!-- Steps to reproduce the bug, or a link to PyPI where the bug is visible --> 1. Create an organization at https://pypi.org/manage/organizations/ 2. Click on a "disabled" `[Activate Billing]` button. 3. Get surprised it's clickable. 4. Click on the `[Activate Subscription]` button. 5. See an HTTP 500 page implying that PyPI is down. **My Platform** Chrome on Android, probably unrelated. **Additional context** <!-- Add any other context, links, etc. about the feature here. --> ![Screenshot_2023-04-23-14-03-34-54_40deb401b9ffe8e1df2f1cc5ba480b12](https://user-images.githubusercontent.com/578543/233863544-135ada22-ab7e-4a5a-85bf-a22cdab6201a.jpg) ![Screenshot_2023-04-23-14-04-44-17_40deb401b9ffe8e1df2f1cc5ba480b12](https://user-images.githubusercontent.com/578543/233863546-61f1e993-e303-40b7-85b5-31ceb88c1e1b.jpg)
Thanks! We changed the visibility to disable, but didn't actually disable the button. Yeah, but I suppose there's two bugs, the second one being the ability to navigate to the nested URLs behind that button. Agreed - but I think disabling the button AND href will be enough for now until we re-instate billing.
2023-04-23T20:58:16Z
[]
[]
pypi/warehouse
13,500
pypi__warehouse-13500
[ "12920" ]
e1dc362132e7acefe805e12f224e79bffca4bf40
diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -25,6 +25,7 @@ import packaging.specifiers import packaging.utils import packaging.version +import packaging_legacy.version import pkg_resources import requests import wtforms @@ -214,10 +215,10 @@ def _exc_with_message(exc, message, **kwargs): def _validate_pep440_version(form, field): - parsed = packaging.version.parse(field.data) - # Check that this version is a valid PEP 440 version at all. - if not isinstance(parsed, packaging.version.Version): + try: + parsed = packaging.version.parse(field.data) + except packaging.version.InvalidVersion: raise wtforms.validators.ValidationError( "Start and end with a letter or numeral containing only " "ASCII numeric and '.', '_' and '-'." @@ -1163,7 +1164,7 @@ def file_upload(request): .all() ) for i, r in enumerate( - sorted(releases, key=lambda x: packaging.version.parse(x.version)) + sorted(releases, key=lambda x: packaging_legacy.version.parse(x.version)) ): r._pypi_ordering = i
diff --git a/requirements/tests.in b/requirements/tests.in --- a/requirements/tests.in +++ b/requirements/tests.in @@ -7,5 +7,3 @@ pytest-postgresql>=3.1.3,<4.0.0 pytest-socket responses>=0.5.1 webtest -# Until resolved https://github.com/googleapis/python-bigquery/issues/1435 -packaging<22.0 diff --git a/requirements/tests.txt b/requirements/tests.txt --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -183,12 +183,10 @@ mirakuru==2.5.1 \ --hash=sha256:0a16f897841741f8cd784f790e54d74e61456ba36be9cb9de731b49e2e7a45dc \ --hash=sha256:5a60d641fa92c8bfcd383f6e52f7a0bf3f081da0467fc6e3e6a3f6b3e3e47a7b # via pytest-postgresql -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 - # via - # -r requirements/tests.in - # pytest +packaging==23.1 \ + --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ + --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f + # via pytest pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 @@ -217,10 +215,6 @@ psutil==5.9.4 \ --hash=sha256:efeae04f9516907be44904cc7ce08defb6b665128992a56957abc9b61dca94b7 \ --hash=sha256:fd8522436a6ada7b4aad6638662966de0d61d241cb821239b2ae7013d41a43d4 # via mirakuru -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging pytest==7.2.2 \ --hash=sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e \ --hash=sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4
Unpin constraint on `packaging` This was constrained in https://github.com/pypi/warehouse/pull/12664/ due to https://github.com/googleapis/python-bigquery/issues/1435 which is now resolved: https://github.com/pypi/warehouse/blob/186180c600da5a99a7bdc0da548f852351d91e84/requirements/deploy.in#L4 We attempted to unconstrain it in https://github.com/pypi/warehouse/pull/12871, however due to https://github.com/pypa/packaging/pull/407, we can't migrate to the latest version until https://github.com/pypa/packaging/pull/669 is resolved.
This now needs a release of `packaging` which includes https://github.com/pypa/packaging/pull/669, a release of https://github.com/di/packaging_legacy, and an update here to use that dependency.
2023-04-24T19:19:51Z
[]
[]
pypi/warehouse
13,514
pypi__warehouse-13514
[ "13463" ]
cb7bfac2172ca7db91251afd8b5ef50873ad7bc0
diff --git a/warehouse/manage/views/__init__.py b/warehouse/manage/views/__init__.py --- a/warehouse/manage/views/__init__.py +++ b/warehouse/manage/views/__init__.py @@ -1320,7 +1320,7 @@ def add_github_oidc_publisher(self): ) self.request.session.flash( - f"Added {publisher} to {self.project.name}", + f"Added {publisher} in {publisher.publisher_url} to {self.project.name}", queue="success", ) diff --git a/warehouse/oidc/models.py b/warehouse/oidc/models.py --- a/warehouse/oidc/models.py +++ b/warehouse/oidc/models.py @@ -330,16 +330,14 @@ def _workflow_slug(self): def publisher_name(self): return "GitHub" - @property - def publisher_url(self): - # NOTE: Until we embed the SHA, this URL is not guaranteed to contain - # the exact contents of the workflow that their OIDC publisher corresponds to. - return f"https://github.com/{self.repository}/blob/HEAD/{self._workflow_slug}" - @property def repository(self): return f"{self.repository_owner}/{self.repository_name}" + @property + def publisher_url(self): + return f"https://github.com/{self.repository}" + @property def job_workflow_ref(self): return f"{self.repository}/{self._workflow_slug}" @@ -349,7 +347,7 @@ def sub(self): return f"repo:{self.repository}" def __str__(self): - return f"{self.workflow_filename} @ {self.repository}" + return self.workflow_filename class GitHubPublisher(GitHubPublisherMixin, OIDCPublisher): diff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py --- a/warehouse/oidc/views.py +++ b/warehouse/oidc/views.py @@ -187,7 +187,7 @@ def _invalid(errors): serialized, dm = macaroon_service.create_macaroon( request.domain, ( - f"OpenID token: {publisher.publisher_url} " + f"OpenID token: {str(publisher)} " f"({datetime.fromtimestamp(not_before).isoformat()})" ), [
diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -6062,10 +6062,10 @@ def test_add_github_oidc_publisher_preexisting(self, monkeypatch): id="fakeid", publisher_name="GitHub", repository_name="fakerepo", + publisher_url="https://github.com/fakeowner/fakerepo", owner="fakeowner", owner_id="1234", workflow_filename="fakeworkflow.yml", - publisher_url="some-url", environment="some-environment", ) # NOTE: Can't set __str__ using pretend.stub() @@ -6139,14 +6139,17 @@ def test_add_github_oidc_publisher_preexisting(self, monkeypatch): "publisher": "GitHub", "id": "fakeid", "specifier": "fakespecifier", - "url": "some-url", + "url": "https://github.com/fakeowner/fakerepo", "submitted_by": "some-user", }, ) ] assert request.session.flash.calls == [ pretend.call( - "Added fakespecifier to fakeproject", + ( + "Added fakespecifier in https://github.com/fakeowner/fakerepo " + "to fakeproject" + ), queue="success", ) ] @@ -6231,18 +6234,18 @@ def test_add_github_oidc_publisher_created(self, monkeypatch): additional={ "publisher": "GitHub", "id": "fakeid", - "specifier": "fakeworkflow.yml @ fakeowner/fakerepo", - "url": ( - "https://github.com/fakeowner/fakerepo/blob/HEAD/" - ".github/workflows/fakeworkflow.yml" - ), + "specifier": "fakeworkflow.yml", + "url": "https://github.com/fakeowner/fakerepo", "submitted_by": "some-user", }, ) ] assert request.session.flash.calls == [ pretend.call( - "Added fakeworkflow.yml @ fakeowner/fakerepo to fakeproject", + ( + "Added fakeworkflow.yml in https://github.com/fakeowner/fakerepo " + "to fakeproject" + ), queue="success", ) ] diff --git a/tests/unit/oidc/test_models.py b/tests/unit/oidc/test_models.py --- a/tests/unit/oidc/test_models.py +++ b/tests/unit/oidc/test_models.py @@ -89,12 +89,8 @@ def test_github_publisher_computed_properties(self): for claim_name in publisher.__required_verifiable_claims__.keys(): assert getattr(publisher, claim_name) is not None - assert str(publisher) == "fakeworkflow.yml @ fakeowner/fakerepo" - assert ( - publisher.publisher_url - == "https://github.com/fakeowner/fakerepo/blob/HEAD/" - f".github/workflows/{publisher.workflow_filename}" - ) + assert str(publisher) == "fakeworkflow.yml" + assert publisher.publisher_url == "https://github.com/fakeowner/fakerepo" def test_github_publisher_unaccounted_claims(self, monkeypatch): publisher = models.GitHubPublisher( diff --git a/tests/unit/oidc/test_views.py b/tests/unit/oidc/test_views.py --- a/tests/unit/oidc/test_views.py +++ b/tests/unit/oidc/test_views.py @@ -440,7 +440,7 @@ def find_service(iface, **kw): assert macaroon_service.create_macaroon.calls == [ pretend.call( "fakedomain", - f"OpenID token: https://fake/url ({datetime.fromtimestamp(0).isoformat()})", + f"OpenID token: fakespecifier ({datetime.fromtimestamp(0).isoformat()})", [ caveats.OIDCPublisher(oidc_publisher_id="fakepublisherid"), caveats.ProjectID(project_ids=["fakeprojectid"]),
URL to trusted publisher workflow is confusing As discussed [here](https://twitter.com/HEPfeickert/status/1649181900521472001), the URL in the list of trusted publishers is a little confusing to users because it links to a workflow on a specific branch (`HEAD`) where in reality any matching workflow on any branch in the repository can publish. Instead of a full URL to the workflow, we should display the name of the org/repo (with a link) and the filename of the workflow (without a link).
Relevant code: https://github.com/pypi/warehouse/blob/99dfe5397a9e22a452139c9c69fd58ad76ced5c3/warehouse/templates/manage/manage_base.html#L510-L519 https://github.com/pypi/warehouse/blob/91c36dd98671ac39bd4a488cb407264bc645ea0a/warehouse/oidc/models.py#L325-L329 I'm not sure I understand the comment - how does a SHA get embedded, and how might it be used? Could we convert to use the repr of the publisher? We already have this method: https://github.com/pypi/warehouse/blob/91c36dd98671ac39bd4a488cb407264bc645ea0a/warehouse/oidc/models.py#L343-L344 Otherwise we'd have to construct some other string for this purpose, since we preserve the `publisher_url` in Events (unless we don't care about the `HEAD` aspect there either?) > I'm not sure I understand the comment - how does a SHA get embedded, and how might it be used? I'm not sure I understand the comment either: @woodruffw, any thoughts? > Otherwise we'd have to construct some other string for this purpose, since we preserve the publisher_url in Events (unless we don't care about the HEAD aspect there either?) We should probably update this to be the _actual_ URL to the workflow that _actually_ published the file (which we can construct from the OIDC claims) which will likely include the SHA, which might be what @woodruffw was talking about in that comment. > I'm not sure I understand the comment either: @woodruffw, any thoughts? Yeah, that's a misleading comment on my part: that was referring to somehow embedding the OIDC token's `ref` claim in various publisher management events, but that's currently not possible due to how we disconnect the state during token exchange. > Otherwise we'd have to construct some other string for this purpose, since we preserve the publisher_url in Events (unless we don't care about the HEAD aspect there either?) Correct, this is what the comment was referring to. IMO it'd be fine to remove the `publisher_url` from both, and replace both with individual fields for the components.
2023-04-25T17:39:41Z
[]
[]
pypi/warehouse
13,606
pypi__warehouse-13606
[ "9660", "11526", "11380" ]
da6a5281a7318284feff22c42d70b375f3de1cbb
diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -59,6 +59,7 @@ Dependency, DependencyKind, Description, + DynamicFieldsEnum, File, Filename, JournalEntry, @@ -395,6 +396,37 @@ def _validate_classifiers(form, field): ) +def _validate_dynamic(_form, field): + declared_dynamic_fields = {str.title(k) for k in field.data or []} + disallowed_dynamic_fields = {"Name", "Version", "Metadata-Version"} + if invalid := (declared_dynamic_fields & disallowed_dynamic_fields): + raise wtforms.validators.ValidationError( + f"The following metadata field(s) are valid, " + f"but cannot be marked as dynamic: {invalid!r}", + ) + allowed_dynamic_fields = set(DynamicFieldsEnum.enums) + if invalid := (declared_dynamic_fields - allowed_dynamic_fields): + raise wtforms.validators.ValidationError( + f"The following metadata field(s) are not valid " + f"and cannot be marked as dynamic: {invalid!r}" + ) + + +_extra_name_re = re.compile("^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") + + +def _validate_provides_extras(form, field): + metadata_version = packaging.version.Version(form.metadata_version.data) + + if metadata_version >= packaging.version.Version("2.3"): + if invalid := [ + name for name in field.data or [] if not _extra_name_re.match(name) + ]: + raise wtforms.validators.ValidationError( + f"The following Provides-Extra value(s) are invalid: {invalid!r}" + ) + + def _construct_dependencies(form, types): for name, kind in types.items(): for item in getattr(form, name).data: @@ -419,7 +451,7 @@ class MetadataForm(forms.Form): # Note: This isn't really Metadata 2.0, however bdist_wheel # claims it is producing a Metadata 2.0 metadata when in # reality it's more like 1.2 with some extensions. - ["1.0", "1.1", "1.2", "2.0", "2.1"], + ["1.0", "1.1", "1.2", "2.0", "2.1", "2.2", "2.3"], message="Use a known metadata version.", ), ], @@ -470,6 +502,9 @@ class MetadataForm(forms.Form): author = wtforms.StringField( description="Author", validators=[wtforms.validators.Optional()] ) + supported_platform = wtforms.StringField( + description="Supported-Platform", validators=[wtforms.validators.Optional()] + ) description_content_type = wtforms.StringField( description="Description-Content-Type", validators=[wtforms.validators.Optional(), _validate_description_content_type], @@ -495,6 +530,10 @@ class MetadataForm(forms.Form): description="Classifier", validators=[_validate_no_deprecated_classifiers, _validate_classifiers], ) + dynamic = ListField( + description="Dynamic", + validators=[_validate_dynamic], + ) platform = wtforms.StringField( description="Platform", validators=[wtforms.validators.Optional()] ) @@ -564,6 +603,10 @@ class MetadataForm(forms.Form): description="Requires-Dist", validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list], ) + provides_extra = ListField( + description="Provides-Extra", + validators=[wtforms.validators.Optional(), _validate_provides_extras], + ) provides_dist = ListField( description="Provides-Dist", validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list], @@ -613,6 +656,15 @@ def full_validate(self): "Include at least one message digest." ) + # Dynamic is only allowed with metadata version 2.2+ + if self.dynamic.data: + metadata_version = packaging.version.Version(self.metadata_version.data) + if metadata_version and metadata_version < packaging.version.Version("2.2"): + raise wtforms.validators.ValidationError( + "'Dynamic' is only allowed in metadata version 2.2 and higher, " + f"but you declared {self.metadata_version.data}" + ) + def _validate_filename(filename, filetype): # Our object storage does not tolerate some specific characters @@ -1107,6 +1159,8 @@ def file_upload(request): "home_page", "download_url", "requires_python", + "dynamic", + "provides_extra", } }, uploader=request.user if request.user else None, diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py --- a/warehouse/legacy/api/json.py +++ b/warehouse/legacy/api/json.py @@ -155,12 +155,16 @@ def _json_data(request, project, release, *, all_releases): "requires_dist": ( list(release.requires_dist) if release.requires_dist else None ), + "provides_extra": ( + list(release.provides_extra) if release.provides_extra else None + ), "docs_url": project.documentation_url, "bugtrack_url": None, "home_page": release.home_page, "download_url": release.download_url, "yanked": release.yanked, "yanked_reason": release.yanked_reason or None, + "dynamic": list(release.dynamic) if release.dynamic else None, }, "urls": releases[release.version], "vulnerabilities": vulnerabilities, diff --git a/warehouse/packaging/models.py b/warehouse/packaging/models.py --- a/warehouse/packaging/models.py +++ b/warehouse/packaging/models.py @@ -25,6 +25,7 @@ from sqlalchemy import ( BigInteger, CheckConstraint, + Column, FetchedValue, ForeignKey, Index, @@ -36,7 +37,7 @@ orm, sql, ) -from sqlalchemy.dialects.postgresql import CITEXT, UUID as PG_UUID +from sqlalchemy.dialects.postgresql import ARRAY, CITEXT, ENUM, UUID as PG_UUID from sqlalchemy.exc import MultipleResultsFound, NoResultFound from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.hybrid import hybrid_property @@ -465,6 +466,32 @@ class ReleaseURL(db.Model): url: Mapped[str] +DynamicFieldsEnum = ENUM( + "Platform", + "Supported-Platform", + "Summary", + "Description", + "Description-Content-Type", + "Keywords", + "Home-page", + "Download-URL", + "Author", + "Author-email", + "Maintainer", + "Maintainer-email", + "License", + "Classifier", + "Requires-Dist", + "Requires-Python", + "Requires-External", + "Project-URL", + "Provides-Extra", + "Provides-Dist", + "Obsoletes-Dist", + name="release_dynamic_fields", +) + + class Release(HasObservations, db.Model): __tablename__ = "releases" @@ -517,6 +544,12 @@ def __table_args__(cls): # noqa yanked_reason: Mapped[str] = mapped_column(server_default="") + dynamic = Column( # type: ignore[var-annotated] + ARRAY(DynamicFieldsEnum), + nullable=True, + comment="Array of metadata fields marked as Dynamic (PEP 643/Metadata 2.2)", + ) + _classifiers: Mapped[list[Classifier]] = orm.relationship( secondary="release_classifiers", order_by=Classifier.ordering, @@ -569,6 +602,12 @@ def __table_args__(cls): # noqa _provides_dist = _dependency_relation(DependencyKind.provides_dist) provides_dist = association_proxy("_provides_dist", "specifier") + provides_extra = Column( # type: ignore[var-annotated] + ARRAY(Text), + nullable=True, + comment="Array of extra names (PEP 566/685|Metadata 2.1/2.3)", + ) + _obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist) obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")
diff --git a/tests/unit/forklift/test_legacy.py b/tests/unit/forklift/test_legacy.py --- a/tests/unit/forklift/test_legacy.py +++ b/tests/unit/forklift/test_legacy.py @@ -388,6 +388,72 @@ def test_validate_classifiers_invalid(self, db_request, data): with pytest.raises(ValidationError): legacy._validate_classifiers(form, field) + @pytest.mark.parametrize( + "data", [["Requires-Dist"], ["Requires-Dist", "Requires-Python"]] + ) + def test_validate_dynamic_valid(self, db_request, data): + form = pretend.stub() + field = pretend.stub(data=data) + + legacy._validate_dynamic(form, field) + + @pytest.mark.parametrize( + "data", + [ + ["Version"], + ["Name"], + ["Version", "Name"], + ["Provides-Extra", "I-Am-Not-Metadata"], + ], + ) + def test_validate_dynamic_invalid(self, db_request, data): + form = pretend.stub() + field = pretend.stub(data=data) + + with pytest.raises(ValidationError): + legacy._validate_dynamic(form, field) + + @pytest.mark.parametrize("data", [["dev"], ["dev-test"]]) + def test_validate_provides_extras_valid(self, db_request, data): + form = pretend.stub( + provides_extra=pretend.stub(data=data), + metadata_version=pretend.stub(data="2.3"), + ) + field = pretend.stub(data=data) + + legacy._validate_provides_extras(form, field) + + @pytest.mark.parametrize("data", [["dev_test"], ["dev.lint", "dev--test"]]) + def test_validate_provides_extras_invalid(self, db_request, data): + form = pretend.stub( + provides_extra=pretend.stub(data=data), + metadata_version=pretend.stub(data="2.3"), + ) + field = pretend.stub(data=data) + + with pytest.raises(ValidationError): + legacy._validate_provides_extras(form, field) + + @pytest.mark.parametrize("data", [["dev"], ["dev-test"]]) + def test_validate_provides_extras_valid_2_2(self, db_request, data): + form = pretend.stub( + provides_extra=pretend.stub(data=data), + metadata_version=pretend.stub(data="2.2"), + ) + field = pretend.stub(data=data) + + legacy._validate_provides_extras(form, field) + + @pytest.mark.parametrize("data", [["dev_test"], ["dev.lint", "dev--test"]]) + def test_validate_provides_extras_invalid_2_2(self, db_request, data): + form = pretend.stub( + provides_extra=pretend.stub(data=data), + metadata_version=pretend.stub(data="2.2"), + ) + field = pretend.stub(data=data) + + legacy._validate_provides_extras(form, field) + def test_construct_dependencies(): types = {"requires": DependencyKind.requires, "provides": DependencyKind.provides} @@ -478,6 +544,26 @@ def test_requires_python(self): form = legacy.MetadataForm(MultiDict({"requires_python": ">= 3.5"})) form.requires_python.validate(form) + @pytest.mark.parametrize( + "data", + [ + { + "filetype": "bdist_wheel", + "metadata_version": "2.1", + "dynamic": "requires", + }, + { + "metadata_version": "1.2", + "sha256_digest": "dummy", + "dynamic": "requires", + }, + ], + ) + def test_dynamic_wrong_metadata_version(self, data): + form = legacy.MetadataForm(MultiDict(data)) + with pytest.raises(ValidationError): + form.full_validate() + class TestFileValidation: def test_defaults_to_true(self): @@ -3417,6 +3503,121 @@ def test_upload_succeeds_creates_release( ), ] + @pytest.mark.parametrize( + "version, expected_version", + [ + ("1.0", "1.0"), + ("v1.0", "1.0"), + ], + ) + def test_upload_succeeds_creates_release_metadata_2_3( + self, pyramid_config, db_request, metrics, version, expected_version + ): + user = UserFactory.create() + EmailFactory.create(user=user) + project = ProjectFactory.create() + RoleFactory.create(user=user, project=project) + + db_request.db.add(Classifier(classifier="Environment :: Other Environment")) + db_request.db.add(Classifier(classifier="Programming Language :: Python")) + + filename = "{}-{}.tar.gz".format(project.name, "1.0") + + pyramid_config.testing_securitypolicy(identity=user) + db_request.user = user + db_request.user_agent = "warehouse-tests/6.6.6" + db_request.POST = MultiDict( + { + "metadata_version": "2.3", + "name": project.name, + "version": version, + "summary": "This is my summary!", + "filetype": "sdist", + "md5_digest": _TAR_GZ_PKG_MD5, + "content": pretend.stub( + filename=filename, + file=io.BytesIO(_TAR_GZ_PKG_TESTDATA), + type="application/tar", + ), + "supported_platform": "i386-win32-2791", + } + ) + db_request.POST.extend( + [ + ("classifiers", "Environment :: Other Environment"), + ("classifiers", "Programming Language :: Python"), + ("requires_dist", "foo"), + ("requires_dist", "bar (>1.0)"), + ("project_urls", "Test, https://example.com/"), + ("requires_external", "Cheese (>1.0)"), + ("provides_extra", "testing"), + ("provides_extra", "plugin"), + ("dynamic", "Supported-Platform"), + ] + ) + + storage_service = pretend.stub(store=lambda path, filepath, meta: None) + db_request.find_service = lambda svc, name=None, context=None: { + IFileStorage: storage_service, + IMetricsService: metrics, + }.get(svc) + + resp = legacy.file_upload(db_request) + + assert resp.status_code == 200 + + # Ensure that a Release object has been created. + release = ( + db_request.db.query(Release) + .filter( + (Release.project == project) & (Release.version == expected_version) + ) + .one() + ) + assert release.summary == "This is my summary!" + assert release.classifiers == [ + "Environment :: Other Environment", + "Programming Language :: Python", + ] + assert set(release.requires_dist) == {"foo", "bar (>1.0)"} + assert release.project_urls == {"Test": "https://example.com/"} + assert set(release.requires_external) == {"Cheese (>1.0)"} + assert release.version == expected_version + assert release.canonical_version == "1" + assert release.uploaded_via == "warehouse-tests/6.6.6" + assert set(release.provides_extra) == {"testing", "plugin"} + assert set(release.dynamic) == {"Supported-Platform"} + + # Ensure that a File object has been created. + db_request.db.query(File).filter( + (File.release == release) & (File.filename == filename) + ).one() + + # Ensure that a Filename object has been created. + db_request.db.query(Filename).filter(Filename.filename == filename).one() + + # Ensure that all of our journal entries have been created + journals = ( + db_request.db.query(JournalEntry) + .options(joinedload(JournalEntry.submitted_by)) + .order_by("submitted_date", "id") + .all() + ) + assert [(j.name, j.version, j.action, j.submitted_by) for j in journals] == [ + ( + release.project.name, + release.version, + "new release", + user, + ), + ( + release.project.name, + release.version, + f"add source file {filename}", + user, + ), + ] + def test_all_valid_classifiers_can_be_created(self, db_request): for classifier in classifiers: db_request.db.add(Classifier(classifier=classifier)) diff --git a/tests/unit/legacy/api/test_json.py b/tests/unit/legacy/api/test_json.py --- a/tests/unit/legacy/api/test_json.py +++ b/tests/unit/legacy/api/test_json.py @@ -232,6 +232,7 @@ def test_renders(self, pyramid_config, db_request, db_session): "docs_url": "/the/fake/url/", "download_url": None, "downloads": {"last_day": -1, "last_week": -1, "last_month": -1}, + "dynamic": None, "home_page": None, "keywords": None, "license": None, @@ -242,6 +243,7 @@ def test_renders(self, pyramid_config, db_request, db_session): "platform": None, "project_url": "/the/fake/url/", "project_urls": expected_urls, + "provides_extra": None, "release_url": "/the/fake/url/", "requires_dist": None, "requires_python": None, @@ -483,6 +485,8 @@ def test_detail_renders(self, pyramid_config, db_request, db_session): description=DescriptionFactory.create( content_type=description_content_type ), + dynamic=["Platform", "Supported-Platform"], + provides_extra=["testing", "plugin"], ) ] @@ -540,6 +544,7 @@ def test_detail_renders(self, pyramid_config, db_request, db_session): "docs_url": "/the/fake/url/", "download_url": None, "downloads": {"last_day": -1, "last_week": -1, "last_month": -1}, + "dynamic": ["Platform", "Supported-Platform"], "home_page": None, "keywords": None, "license": None, @@ -550,6 +555,7 @@ def test_detail_renders(self, pyramid_config, db_request, db_session): "platform": None, "project_url": "/the/fake/url/", "project_urls": expected_urls, + "provides_extra": ["testing", "plugin"], "release_url": "/the/fake/url/", "requires_dist": None, "requires_python": None, @@ -630,6 +636,7 @@ def test_minimal_renders(self, pyramid_config, db_request): "docs_url": None, "download_url": None, "downloads": {"last_day": -1, "last_week": -1, "last_month": -1}, + "dynamic": None, "home_page": None, "keywords": None, "license": None, @@ -640,6 +647,7 @@ def test_minimal_renders(self, pyramid_config, db_request): "platform": None, "project_url": "/the/fake/url/", "project_urls": None, + "provides_extra": None, "release_url": "/the/fake/url/", "requires_dist": None, "requires_python": None,
Support Metadata Version 2.2 **What's the problem this feature will solve?** Since PEP 643 is accepted, PyPI should start accepting Metadata 2.2 distributions. See PyO3/maturin#564. **Describe the solution you'd like** Allow `Metadata-Version: 2.2` to pass the legacy upload endpoint’s form validation, and validate the corresponding `Dynamic` field. The latter part could be a bit tricky since `Dynamic` is not allowed for wheels. Not sure how it could be best handled (I don’t have much experience with WTForm). **Additional context** https://www.python.org/dev/peps/pep-0643/ Add support for PEP 685 (Metadata Version 2.3) **What's the problem this feature will solve?** [PEP 685](https://peps.python.org/pep-0685/) specifies rules for normalisation of extra names in core metadata, and in turn bumps the metadata version to 2.3. Currently it is not possible to upload wheels with metadata 2.3 because warehouse rejects them as having an unsupported metadata version. **Describe the solution you'd like** The PEP suggests tools consuming metadata (like warehouse) emit warnings if invalid extra names are found for *older* metadata versions, although the tool may raise an error. I assume there's no mechanism in the upload API to return a warning, only an error? I don't think an error is appropriate here as it will likely break uploads of packages with previously valid metadata. Tools should also warn (but may error) for invalid extras with metadata version 2.3. Therefore I propose leaving the validation unchanged for older versions, but returning an error if the metadata version is 2.3 (or later) and an extra name is invalid. **Additional context** <!-- Add any other context, links, etc. about the feature here. --> I have an initial implementation here: https://github.com/pypa/warehouse/pull/11380/commits/48b5063a65a382d3bba622351858aea9c058f6c0 Add support for metadata 2.2 and 2.3 Fixes #9660 and #11526 I couldn't find any existing tests for the `Metadata-Version` validation so I haven't added any yet. There are tests for the new validation functions for `Dynamic` and `Provides-Extra` though.
There's a couple levels of 'validation' that could happen for the `Dynamic` field here: 1. Validate that `Name` and `Version` are not marked as `Dynamic` (feasible) 2. Validate that wheels do not set `Dynamic` (probably feasible) 3. Validation across multiple artifacts (probably infeasible): - If a field is not marked as `Dynamic`, then the value of the field in any wheel built from the sdist MUST match the value in the sdist. If the field is not in the sdist, and not marked as `Dynamic`, then it MUST NOT be present in the wheel. - If a field is marked as `Dynamic`, it may contain any valid value in a wheel built from the sdist (including not being present at all). - Backends MUST NOT mark a field as `Dynamic` if they can determine that it was generated from data that will not change at build time. Are we OK with only doing 1 & 2? (cc @pfmoore) From my perspective just allowing 2.2 when no `Dynamic` is used would already be great and solve https://github.com/PyO3/maturin/pull/564 I think (1) is reasonable. The PEP allows wheels to set `Dynamic`, so I think (2) is wrong, and should be omitted. I'm happy that Warehouse shouldn't try to do (3). > The PEP allows wheels to set Dynamic, so I think (2) is wrong, and should be omitted. Got it. I was taking this from the original issue and thought I was missing something in the PEP. I'll update the OP accordingly. If all we need to do is 1) then this is straightforward. What's the next step for this? Is it just waiting on a PR? Yes. Any update on this? @domdfcoding gentle ping, are you still interested in working on this? I'm trying to revive this in #13606
2023-05-08T14:17:09Z
[]
[]
pypi/warehouse
13,647
pypi__warehouse-13647
[ "12412" ]
04aa8003d4f18a76e05ee61ad5afd15928df1ccc
diff --git a/warehouse/admin/routes.py b/warehouse/admin/routes.py --- a/warehouse/admin/routes.py +++ b/warehouse/admin/routes.py @@ -198,29 +198,6 @@ def includeme(config): config.add_route("admin.flags", "/admin/flags/", domain=warehouse) config.add_route("admin.flags.edit", "/admin/flags/edit/", domain=warehouse) - # Malware checks - config.add_route("admin.checks.list", "/admin/checks/", domain=warehouse) - config.add_route( - "admin.checks.detail", "/admin/checks/{check_name}", domain=warehouse - ) - config.add_route( - "admin.checks.change_state", - "/admin/checks/{check_name}/change_state", - domain=warehouse, - ) - config.add_route( - "admin.checks.run_evaluation", - "/admin/checks/{check_name}/run_evaluation", - domain=warehouse, - ) - config.add_route("admin.verdicts.list", "/admin/verdicts/", domain=warehouse) - config.add_route( - "admin.verdicts.detail", "/admin/verdicts/{verdict_id}", domain=warehouse - ) - config.add_route( - "admin.verdicts.review", "/admin/verdicts/{verdict_id}/review", domain=warehouse - ) - # Sponsor related Admin pages config.add_route("admin.sponsor.list", "/admin/sponsors/", domain=warehouse) config.add_route( diff --git a/warehouse/admin/views/checks.py b/warehouse/admin/views/checks.py deleted file mode 100644 --- a/warehouse/admin/views/checks.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther -from pyramid.view import view_config -from sqlalchemy.exc import NoResultFound - -from warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareCheckType -from warehouse.malware.tasks import backfill, remove_verdicts, run_scheduled_check - -EVALUATION_RUN_SIZE = 10000 - - -@view_config( - route_name="admin.checks.list", - renderer="admin/malware/checks/index.html", - permission="moderator", - request_method="GET", - uses_session=True, -) -def get_checks(request): - all_checks = request.db.query(MalwareCheck) - active_checks = [] - for check in all_checks: - if not check.is_stale: - active_checks.append(check) - - active_checks.sort(key=lambda check: check.created, reverse=True) - - return {"checks": active_checks} - - -@view_config( - route_name="admin.checks.detail", - renderer="admin/malware/checks/detail.html", - permission="moderator", - request_method="GET", - uses_session=True, -) -def get_check(request): - check = get_check_by_name(request.db, request.matchdict["check_name"]) - - all_checks = ( - request.db.query(MalwareCheck) - .filter(MalwareCheck.name == request.matchdict["check_name"]) - .order_by(MalwareCheck.version.desc()) - .all() - ) - - return { - "check": check, - "checks": all_checks, - "states": MalwareCheckState, - "evaluation_run_size": EVALUATION_RUN_SIZE, - } - - -@view_config( - route_name="admin.checks.run_evaluation", - permission="admin", - request_method="POST", - uses_session=True, - require_methods=False, - require_csrf=True, -) -def run_evaluation(request): - check = get_check_by_name(request.db, request.matchdict["check_name"]) - - if check.state not in (MalwareCheckState.Enabled, MalwareCheckState.Evaluation): - request.session.flash( - "Check must be in 'enabled' or 'evaluation' state to manually execute.", - queue="error", - ) - return HTTPSeeOther( - request.route_path("admin.checks.detail", check_name=check.name) - ) - - if check.check_type == MalwareCheckType.EventHook: - request.session.flash( - f"Running {check.name} on {EVALUATION_RUN_SIZE} " - f"{check.hooked_object.value}s!", - queue="success", - ) - request.task(backfill).delay(check.name, EVALUATION_RUN_SIZE) - - else: - request.session.flash(f"Running {check.name} now!", queue="success") - request.task(run_scheduled_check).delay(check.name, manually_triggered=True) - - return HTTPSeeOther( - request.route_path("admin.checks.detail", check_name=check.name) - ) - - -@view_config( - route_name="admin.checks.change_state", - permission="admin", - request_method="POST", - uses_session=True, - require_methods=False, - require_csrf=True, -) -def change_check_state(request): - check = get_check_by_name(request.db, request.matchdict["check_name"]) - - try: - check_state = request.POST["check_state"] - except KeyError: - raise HTTPNotFound - - try: - check.state = MalwareCheckState(check_state) - except ValueError: - request.session.flash("Invalid check state provided.", queue="error") - else: - if check.state == MalwareCheckState.WipedOut: - request.task(remove_verdicts).delay(check.name) - request.session.flash( - f"Changed {check.name!r} check to {check.state.value!r}!", queue="success" - ) - finally: - return HTTPSeeOther( - request.route_path("admin.checks.detail", check_name=check.name) - ) - - -def get_check_by_name(db, check_name): - try: - # Throw an exception if and only if no results are returned. - newest = ( - db.query(MalwareCheck) - .filter(MalwareCheck.name == check_name) - .order_by(MalwareCheck.version.desc()) - .limit(1) - .one() - ) - except NoResultFound: - raise HTTPNotFound - - return newest diff --git a/warehouse/admin/views/verdicts.py b/warehouse/admin/views/verdicts.py deleted file mode 100644 --- a/warehouse/admin/views/verdicts.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paginate_sqlalchemy import SqlalchemyOrmPage as SQLAlchemyORMPage -from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPSeeOther -from pyramid.view import view_config - -from warehouse.malware.models import ( - MalwareCheck, - MalwareVerdict, - VerdictClassification, - VerdictConfidence, -) -from warehouse.utils.paginate import paginate_url_factory - - -@view_config( - route_name="admin.verdicts.list", - renderer="admin/malware/verdicts/index.html", - permission="moderator", - request_method="GET", - uses_session=True, -) -def get_verdicts(request): - result = {} - result["check_names"] = {name for (name,) in request.db.query(MalwareCheck.name)} - result["classifications"] = {c.value for c in VerdictClassification} - result["confidences"] = {c.value for c in VerdictConfidence} - - validate_fields(request, result) - - result["verdicts"] = SQLAlchemyORMPage( - generate_query(request.db, request.params), - page=int(request.params.get("page", 1)), - items_per_page=25, - url_maker=paginate_url_factory(request), - ) - - return result - - -@view_config( - route_name="admin.verdicts.detail", - renderer="admin/malware/verdicts/detail.html", - permission="moderator", - request_method="GET", - uses_session=True, -) -def get_verdict(request): - verdict = request.db.get(MalwareVerdict, request.matchdict["verdict_id"]) - - if verdict: - return { - "verdict": verdict, - "classifications": list(VerdictClassification.__members__.keys()), - } - - raise HTTPNotFound - - -@view_config( - route_name="admin.verdicts.review", - permission="moderator", - request_method="POST", - uses_session=True, - require_methods=False, - require_csrf=True, -) -def review_verdict(request): - verdict = request.db.get(MalwareVerdict, request.matchdict["verdict_id"]) - - try: - classification = getattr(VerdictClassification, request.POST["classification"]) - except (KeyError, AttributeError): - raise HTTPBadRequest("Invalid verdict classification.") from None - - verdict.manually_reviewed = True - verdict.reviewer_verdict = classification - - request.session.flash( - "Verdict %s marked as reviewed." % verdict.id, queue="success" - ) - - # If no query params are provided (e.g. request originating from - # admins.verdicts.detail view), then route to the default list view - query = request.GET or {"classification": "threat", "manually_reviewed": "0"} - return HTTPSeeOther(request.route_path("admin.verdicts.list", _query=query)) - - -def validate_fields(request, validators): - try: - int(request.params.get("page", 1)) - except ValueError: - raise HTTPBadRequest("'page' must be an integer.") from None - - validators = {**validators, **{"manually_revieweds": {"0", "1"}}} - - for key, possible_values in validators.items(): - # Remove the trailing 's' - value = request.params.get(key[:-1]) - additional_values = {None, ""} - if value not in possible_values | additional_values: - raise HTTPBadRequest(f"Invalid value for '{key[:-1]}': {value}.") from None - - -def generate_query(db, params): - """ - Returns an SQLAlchemy query wth request params applied as filters. - """ - query = db.query(MalwareVerdict) - if params.get("check_name"): - query = query.join(MalwareCheck) - query = query.filter(MalwareCheck.name == params["check_name"]) - if params.get("confidence"): - query = query.filter(MalwareVerdict.confidence == params["confidence"]) - if params.get("classification"): - query = query.filter(MalwareVerdict.classification == params["classification"]) - if params.get("manually_reviewed"): - query = query.filter( - MalwareVerdict.manually_reviewed == bool(int(params["manually_reviewed"])) - ) - - return query.order_by(MalwareVerdict.run_date.desc()) diff --git a/warehouse/cli/malware.py b/warehouse/cli/malware.py deleted file mode 100644 --- a/warehouse/cli/malware.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import click - -from warehouse.cli import warehouse -from warehouse.malware.tasks import sync_checks as _sync_checks - - [email protected]() # pragma: no branch -def malware(): - """ - Manage the Warehouse Malware Checks. - """ - - [email protected]() [email protected]_obj -def sync_checks(config): - """ - Sync the Warehouse database with the malware checks in malware/checks. - """ - - request = config.task(_sync_checks).get_request() - config.task(_sync_checks).run(request) diff --git a/warehouse/config.py b/warehouse/config.py --- a/warehouse/config.py +++ b/warehouse/config.py @@ -248,7 +248,6 @@ def configure(settings=None): maybe_set_compound(settings, "mail", "backend", "MAIL_BACKEND") maybe_set_compound(settings, "metrics", "backend", "METRICS_BACKEND") maybe_set_compound(settings, "breached_passwords", "backend", "BREACHED_PASSWORDS") - maybe_set_compound(settings, "malware_check", "backend", "MALWARE_CHECK_BACKEND") maybe_set( settings, "oidc.backend", @@ -597,9 +596,6 @@ def configure(settings=None): # Register support for OIDC based authentication config.include(".oidc") - # Register support for malware checks - config.include(".malware") - # Register logged-in views config.include(".manage") diff --git a/warehouse/malware/__init__.py b/warehouse/malware/__init__.py deleted file mode 100644 --- a/warehouse/malware/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect - -from celery.schedules import crontab - -import warehouse.malware.checks as checks - -from warehouse import db -from warehouse.malware.interfaces import IMalwareCheckService -from warehouse.malware.models import MalwareCheckObjectType -from warehouse.malware.tasks import run_scheduled_check -from warehouse.malware.utils import get_enabled_hooked_checks - - [email protected]_for(db.Session, "after_flush") -def determine_malware_checks(config, session, flush_context): - if not session.new: - return - - if not any( - [ - obj.__class__.__name__ - for obj in session.new - if obj.__class__.__name__ in MalwareCheckObjectType.__members__ - ] - ): - return - - malware_checks = session.info.setdefault("warehouse.malware.checks", set()) - enabled_checks = get_enabled_hooked_checks(session) - for obj in session.new: - for check_name in enabled_checks.get(obj.__class__.__name__, []): - malware_checks.update([f"{check_name}:{obj.id}"]) - - [email protected]_for(db.Session, "after_commit") -def queue_malware_checks(config, session): - malware_checks = session.info.pop("warehouse.malware.checks", set()) - if not malware_checks: - return - - malware_check_factory = config.find_service_factory(IMalwareCheckService) - - malware_check = malware_check_factory(None, config) - malware_check.run_checks(malware_checks) - - -def includeme(config): - malware_check_class = config.maybe_dotted( - config.registry.settings["malware_check.backend"] - ) - # Register the malware check service - config.register_service_factory( - malware_check_class.create_service, IMalwareCheckService - ) - - # Add scheduled tasks for every scheduled MalwareCheck. - all_checks = inspect.getmembers(checks, inspect.isclass) - for check_obj in all_checks: - check = check_obj[1] - if check.check_type == "scheduled": - config.add_periodic_task( - crontab(**check.schedule), run_scheduled_check, args=(check_obj[0],) - ) diff --git a/warehouse/malware/checks/__init__.py b/warehouse/malware/checks/__init__.py deleted file mode 100644 --- a/warehouse/malware/checks/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .package_turnover import PackageTurnoverCheck # noqa -from .setup_patterns import SetupPatternCheck # noqa diff --git a/warehouse/malware/checks/base.py b/warehouse/malware/checks/base.py deleted file mode 100644 --- a/warehouse/malware/checks/base.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareVerdict -from warehouse.packaging import models - - -class MalwareCheckBase: - def __init__(self, db): - self.db = db - self._name = self.__class__.__name__ - self._load_check_fields() - self._verdicts = [] - - @classmethod - def prepare(cls, request, obj_id): - """ - Prepare some context for scanning the given object. - """ - kwargs = {"obj_id": obj_id} - - model = getattr(models, cls.hooked_object) - kwargs["obj"] = request.db.get(model, obj_id) - - if cls.hooked_object == "File": - kwargs["file_url"] = request.route_url( - "packaging.file", path=kwargs["obj"].path - ) - - return kwargs - - def add_verdict(self, **kwargs): - """ - Save a new verdict in the database. Subclasses should call this method - explicitly. - """ - self._verdicts.append(MalwareVerdict(check_id=self.id, **kwargs)) - - def run(self, **kwargs): - """ - Run the check and insert returned verdicts. - """ - self.scan(**kwargs) - self.db.add_all(self._verdicts) - - def scan(self, **kwargs): - """ - Scan the object and return a verdict. Subclasses should implement - this method. - """ - - def _load_check_fields(self): - self.id, self.state = ( - self.db.query(MalwareCheck.id, MalwareCheck.state) - .filter(MalwareCheck.name == self._name) - .filter( - MalwareCheck.state.in_( - [MalwareCheckState.Enabled, MalwareCheckState.Evaluation] - ) - ) - .one() - ) diff --git a/warehouse/malware/checks/package_turnover/__init__.py b/warehouse/malware/checks/package_turnover/__init__.py deleted file mode 100644 --- a/warehouse/malware/checks/package_turnover/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .check import PackageTurnoverCheck # noqa diff --git a/warehouse/malware/checks/package_turnover/check.py b/warehouse/malware/checks/package_turnover/check.py deleted file mode 100644 --- a/warehouse/malware/checks/package_turnover/check.py +++ /dev/null @@ -1,116 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from datetime import datetime, timedelta -from textwrap import dedent - -from sqlalchemy import select - -from warehouse.accounts.models import User -from warehouse.events.tags import EventTag -from warehouse.malware.checks.base import MalwareCheckBase -from warehouse.malware.models import ( - MalwareVerdict, - VerdictClassification, - VerdictConfidence, -) -from warehouse.packaging.models import Project, Release - - -class PackageTurnoverCheck(MalwareCheckBase): - version = 1 - short_description = "A check for unusual changes in package ownership" - long_description = dedent( - """ - This check looks at recently uploaded releases and determines - whether their owners have recently changed or decreased the security - of their accounts (e.g., by disabling 2FA). - """ - ) - check_type = "scheduled" - schedule = {"minute": 0, "hour": 0} - - def __init__(self, db): - super().__init__(db) - self._scan_interval = datetime.utcnow() - timedelta(hours=24) - - def user_posture_verdicts(self, project): - for user in project.users: - has_removed_2fa_method = self.db.query( - self.db.query(User.Event) - .filter(User.Event.source_id == user.id) - .filter(User.Event.time >= self._scan_interval) - .filter(User.Event.tag == EventTag.Account.TwoFactorMethodRemoved) - .exists() - ).scalar() - - if has_removed_2fa_method and not user.has_two_factor: - self.add_verdict( - project_id=project.id, - classification=VerdictClassification.Threat, - confidence=VerdictConfidence.High, - message="User with control over this package has disabled 2FA", - ) - - def user_turnover_verdicts(self, project): - # NOTE: This could probably be more involved to check for the case - # where someone adds themself, removes the real maintainers, pushes a malicious - # release, then reverts the ownership to the original maintainers and removes - # themself again. - recent_role_adds = ( - self.db.query(Project.Event.additional) - .filter(Project.Event.source_id == project.id) - .filter(Project.Event.time >= self._scan_interval) - .filter( - (Project.Event.tag == EventTag.Project.RoleAdd) - | (Project.Event.tag == "project:role:accepted") - ) - .all() - ) - - added_users = {role_add["target_user"] for role_add, in recent_role_adds} - current_users = {user.username for user in project.users} - - if added_users == current_users: - self.add_verdict( - project_id=project.id, - classification=VerdictClassification.Threat, - confidence=VerdictConfidence.High, - message="Suspicious user turnover; all current maintainers are new", - ) - - def scan(self, **kwargs): - prior_verdicts = select(MalwareVerdict.release_id).where( - MalwareVerdict.check_id == self.id - ) - - releases = ( - self.db.query(Release) - .filter(Release.created >= self._scan_interval) - .filter(~Release.id.in_(prior_verdicts)) - .all() - ) - - visited_project_ids = set() - for release in releases: - # Skip projects for which this is the first release, - # since we need a baseline to compare against - if len(release.project.releases) < 2: - continue - - if release.project.id in visited_project_ids: - continue - - visited_project_ids.add(release.project.id) - - self.user_posture_verdicts(release.project) - self.user_turnover_verdicts(release.project) diff --git a/warehouse/malware/checks/setup_patterns/__init__.py b/warehouse/malware/checks/setup_patterns/__init__.py deleted file mode 100644 --- a/warehouse/malware/checks/setup_patterns/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .check import SetupPatternCheck # noqa diff --git a/warehouse/malware/checks/setup_patterns/check.py b/warehouse/malware/checks/setup_patterns/check.py deleted file mode 100644 --- a/warehouse/malware/checks/setup_patterns/check.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from textwrap import dedent - -import yara - -from warehouse.malware.checks.base import MalwareCheckBase -from warehouse.malware.checks.utils import extract_file_content, fetch_url_content -from warehouse.malware.errors import FatalCheckError -from warehouse.malware.models import VerdictClassification, VerdictConfidence - - -class SetupPatternCheck(MalwareCheckBase): - _yara_rule_file = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "setup_py_rules.yara" - ) - - version = 3 - short_description = "A check for common malicious patterns in setup.py" - long_description = dedent( - """ - This check uses YARA to search for common malicious patterns in the setup.py - files of uploaded release archives. - """ - ) - check_type = "event_hook" - hooked_object = "File" - - def __init__(self, db): - super().__init__(db) - self._yara_rules = self._load_yara_rules() - - def _load_yara_rules(self): - return yara.compile(filepath=self._yara_rule_file) - - def scan(self, **kwargs): - release_file = kwargs.get("obj") - file_url = kwargs.get("file_url") - if release_file is None or file_url is None: - raise FatalCheckError( - "Release file or file url is None, indicating user error." - ) - - if release_file.packagetype != "sdist": - # Per PEP 491: bdists do not contain setup.py. - # This check only scans dists that contain setup.py, so - # we have nothing to perform. - return - - archive_stream = fetch_url_content(file_url) - setup_py_contents = extract_file_content(archive_stream, "setup.py") - if setup_py_contents is None: - self.add_verdict( - file_id=release_file.id, - classification=VerdictClassification.Indeterminate, - confidence=VerdictConfidence.High, - message="sdist does not contain a suitable setup.py for analysis", - ) - return - - matches = self._yara_rules.match(data=setup_py_contents) - if len(matches) > 0: - # We reduce N matches into a single verdict by taking the maximum - # classification and confidence. - classification = max( - VerdictClassification(m.meta["classification"]) for m in matches - ) - confidence = max(VerdictConfidence(m.meta["confidence"]) for m in matches) - message = ":".join(m.rule for m in matches) - - details = {} - for match in matches: - details[match.rule] = { - "classification": match.meta["classification"], - "confidence": match.meta["confidence"], - # NOTE: We could include the raw bytes here (s[2]), - # but we'd have to serialize/encode it to make JSON happy. - # It probably suffices to include the offset and identifier - # for triage purposes. - "strings": [[s[0], s[1]] for s in match.strings], - } - - self.add_verdict( - file_id=release_file.id, - classification=classification, - confidence=confidence, - message=message, - details=details, - ) - else: - # No matches? Report a low-confidence benign verdict. - self.add_verdict( - file_id=release_file.id, - classification=VerdictClassification.Benign, - confidence=VerdictConfidence.Low, - message="No malicious patterns found in setup.py", - ) diff --git a/warehouse/malware/checks/utils.py b/warehouse/malware/checks/utils.py deleted file mode 100644 --- a/warehouse/malware/checks/utils.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import pathlib -import tarfile -import zipfile - -import requests - - -def fetch_url_content(url): - """ - Retrieves the contents of the given (presumed CDN) URL as a BytesIO. - - Performs no error checking; exceptions are handled in the check harness - as part of check retrying behavior. - """ - response = requests.get(url) - response.raise_for_status() - return io.BytesIO(response.content) - - -def extract_file_content(archive_stream, file_path): - """ - Retrieves the content of the given path from the given archive stream - (presumed to be a dist) as bytes. - - Handling of the given path is a little special: since the dist format(s) - don't enforce any naming convention for the base archive directory, - the path is interpreted as {base}/{file_path}. Thus, a call like this: - - extract_file_content(stream, "setup.py") - - will extract and return the contents of {base}/setup.py where {base} - is frequently (but not guaranteed to be) something like $name-$version. - - Returns None on any sort of failure. - """ - if zipfile.is_zipfile(archive_stream): - with zipfile.ZipFile(archive_stream) as zipobj: - for name in zipobj.namelist(): - path_parts = pathlib.Path(name).parts - if len(path_parts) >= 2: - tail = pathlib.Path(*path_parts[1:]) - if str(tail) == file_path: - return zipobj.read(name) - return None - else: - # NOTE: is_zipfile doesn't rewind the fileobj it's given. - archive_stream.seek(0) - - # NOTE: We don't need to perform a sanity check on - # the (presumed) tarfile's compression here, since we're - # extracting from a stream that's already gone through - # upload validation. - # See _is_valid_dist_file in forklift/legacy.py. - try: - with tarfile.open(fileobj=archive_stream) as tarobj: - member = tarobj.next() - while member: - path_parts = pathlib.Path(member.name).parts - if len(path_parts) >= 2: - tail = pathlib.Path(*path_parts[1:]) - if str(tail) == file_path: - return tarobj.extractfile(member).read() - - member = tarobj.next() - return None - except tarfile.TarError: - return None diff --git a/warehouse/malware/errors.py b/warehouse/malware/errors.py deleted file mode 100644 --- a/warehouse/malware/errors.py +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class FatalCheckError(Exception): - pass diff --git a/warehouse/malware/interfaces.py b/warehouse/malware/interfaces.py deleted file mode 100644 --- a/warehouse/malware/interfaces.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zope.interface import Interface - - -class IMalwareCheckService(Interface): - def create_service(context, request): - """ - Create the service, given the context and request for which it is being - created for. - """ - - def run_checks(checks, **kwargs): - """ - Run a given set of checks. - """ diff --git a/warehouse/malware/models.py b/warehouse/malware/models.py deleted file mode 100644 --- a/warehouse/malware/models.py +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum -import functools - -from citext import CIText -from sqlalchemy import ( - Boolean, - Column, - DateTime, - Enum, - ForeignKey, - Integer, - String, - Text, - UniqueConstraint, - orm, - sql, -) -from sqlalchemy.dialects.postgresql import JSONB - -from warehouse import db -from warehouse.utils.attrs import make_repr - - [email protected] -class MalwareCheckType(enum.Enum): - EventHook = "event_hook" - Scheduled = "scheduled" - - [email protected] -class MalwareCheckState(enum.Enum): - Enabled = "enabled" - Evaluation = "evaluation" - Disabled = "disabled" - WipedOut = "wiped_out" - - [email protected] -class MalwareCheckObjectType(enum.Enum): - File = "File" - Release = "Release" - Project = "Project" - - [email protected] [email protected]_ordering -class VerdictClassification(enum.Enum): - """ - An enumeration of classification markers for malware verdicts. - - Note that the order of declaration is important: it provides - the appropriate ordering behavior when finding the minimum - and maximum classifications for a set of verdicts. - """ - - Benign = "benign" - Indeterminate = "indeterminate" - Threat = "threat" - - def __lt__(self, other): - members = list(self.__class__) - return members.index(self) < members.index(other) - - [email protected] [email protected]_ordering -class VerdictConfidence(enum.Enum): - """ - An enumeration of confidence markers for malware verdicts. - - Note that the order of declaration is important: it provides - the appropriate ordering behavior when finding the minimum - and maximum confidences for a set of verdicts. - """ - - Low = "low" - Medium = "medium" - High = "high" - - def __lt__(self, other): - members = list(self.__class__) - return members.index(self) < members.index(other) - - -class MalwareCheck(db.Model): - __tablename__ = "malware_checks" - __table_args__ = (UniqueConstraint("name", "version"),) - __repr__ = make_repr("name", "version") - - name = Column(CIText, nullable=False) - version = Column(Integer, default=1, nullable=False) - short_description = Column(String(length=128), nullable=False) - long_description = Column(Text, nullable=False) - check_type = Column( - Enum(MalwareCheckType, values_callable=lambda x: [e.value for e in x]), - nullable=False, - ) - # The object name that hooked-based checks operate on, e.g. - # Project, File, Release - hooked_object = Column( - Enum(MalwareCheckObjectType, values_callable=lambda x: [e.value for e in x]), - nullable=True, - ) - # The run schedule for schedule-based checks. - schedule = Column(JSONB, nullable=True) - state = Column( - Enum(MalwareCheckState, values_callable=lambda x: [e.value for e in x]), - nullable=False, - server_default=("disabled"), - ) - created = Column(DateTime, nullable=False, server_default=sql.func.now()) - - @property - def is_stale(self): - session = orm.object_session(self) - newest = ( - session.query(MalwareCheck) - .filter(MalwareCheck.name == self.name) - .order_by(MalwareCheck.version.desc()) - .first() - ) - return self.version != newest.version - - -class MalwareVerdict(db.Model): - __tablename__ = "malware_verdicts" - - run_date = Column(DateTime, nullable=False, server_default=sql.func.now()) - check_id = Column( - ForeignKey("malware_checks.id", onupdate="CASCADE", ondelete="CASCADE"), - nullable=False, - index=True, - ) - # TODO: When GH-4440 is resolved, we should remove these CASCADEs to ensure that an - # auditable history of malware check verdicts remain in the event that a - # Project, Release, or File is removed by it's maintainers. - file_id = Column(ForeignKey("release_files.id", ondelete="CASCADE"), nullable=True) - release_id = Column(ForeignKey("releases.id", ondelete="CASCADE"), nullable=True) - project_id = Column(ForeignKey("projects.id", ondelete="CASCADE"), nullable=True) - classification = Column( - Enum(VerdictClassification, values_callable=lambda x: [e.value for e in x]), - nullable=False, - ) - confidence = Column( - Enum(VerdictConfidence, values_callable=lambda x: [e.value for e in x]), - nullable=False, - ) - message = Column(Text, nullable=True) - details = Column(JSONB, nullable=True) - manually_reviewed = Column(Boolean, nullable=False, server_default=sql.false()) - reviewer_verdict = Column( - Enum(VerdictClassification, values_callable=lambda x: [e.value for e in x]), - nullable=True, - ) - full_report_link = Column(String, nullable=True) - - check = orm.relationship("MalwareCheck", foreign_keys=[check_id], lazy=True) - release_file = orm.relationship("File", foreign_keys=[file_id], lazy=True) - release = orm.relationship("Release", foreign_keys=[release_id], lazy=True) - project = orm.relationship("Project", foreign_keys=[project_id], lazy=True) diff --git a/warehouse/malware/services.py b/warehouse/malware/services.py deleted file mode 100644 --- a/warehouse/malware/services.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from zope.interface import implementer - -from warehouse.malware.interfaces import IMalwareCheckService -from warehouse.malware.tasks import run_check - - -@implementer(IMalwareCheckService) -class PrinterMalwareCheckService: - def __init__(self, executor): - self.executor = executor - - @classmethod - def create_service(cls, context, request): - return cls(print) - - def run_checks(self, checks, **kwargs): - for check in checks: - self.executor(check, kwargs) - - -@implementer(IMalwareCheckService) -class DatabaseMalwareCheckService: - def __init__(self, executor): - self.executor = executor - - @classmethod - def create_service(cls, context, request): - return cls(request.task(run_check).delay) - - def run_checks(self, checks, **kwargs): - for check_info in checks: - # Hooked checks - if ":" in check_info: - check_name, obj_id = check_info.split(":") - kwargs["obj_id"] = obj_id - # Scheduled checks - else: - check_name = check_info - self.executor(check_name, **kwargs) diff --git a/warehouse/malware/tasks.py b/warehouse/malware/tasks.py deleted file mode 100644 --- a/warehouse/malware/tasks.py +++ /dev/null @@ -1,166 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect - -from sqlalchemy.exc import NoResultFound - -import warehouse.malware.checks as checks -import warehouse.packaging.models as packaging_models - -from warehouse.malware.errors import FatalCheckError -from warehouse.malware.interfaces import IMalwareCheckService -from warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareVerdict -from warehouse.malware.utils import get_check_fields -from warehouse.tasks import task - - -@task(bind=True, ignore_result=True, acks_late=True, retry_backoff=True) -def run_check(task, request, check_name, obj_id=None, manually_triggered=False): - try: - check = getattr(checks, check_name)(request.db) - except NoResultFound: - request.log.info("Check %s isn't active. Aborting." % check_name) - return - - # Don't run scheduled checks if they are in evaluation mode, unless manually - # triggered. - if check.state == MalwareCheckState.Evaluation and not manually_triggered: - request.log.info( - "%s is in the `evaluation` state and must be manually triggered to run." - % check_name - ) - return - - kwargs = {} - - # Hooked checks require `obj_id`s. - if obj_id is not None: - kwargs = check.prepare(request, obj_id) - - try: - check.run(**kwargs) - except FatalCheckError as exc: - request.log.error(f"Fatal exception: {check_name}: {str(exc)}") - return - except Exception as exc: - request.log.error(f"Error executing check {check_name}: {str(exc)}") - raise task.retry(exc=exc) - - -@task(bind=True, ignore_result=True, acks_late=True) -def run_scheduled_check(task, request, check_name, manually_triggered=False): - malware_check_service = request.find_service_factory(IMalwareCheckService) - malware_check = malware_check_service(None, request) - malware_check.run_checks([check_name], manually_triggered=manually_triggered) - - -@task(bind=True, ignore_result=True, acks_late=True) -def backfill(task, request, check_name, num_objects): - """ - Runs a backfill on a fixed number of objects. - """ - check = getattr(checks, check_name)(request.db) - target_object = getattr(packaging_models, check.hooked_object) - query = request.db.query(target_object.id).limit(num_objects) - - request.log.info("Running backfill on %d %ss." % (num_objects, check.hooked_object)) - - runs = set() - for (elem_id,) in query: - runs.update([f"{check_name}:{elem_id}"]) - - malware_check_service = request.find_service_factory(IMalwareCheckService) - malware_check = malware_check_service(None, request) - malware_check.run_checks(runs, manually_triggered=True) - - -@task(bind=True, ignore_result=True, acks_late=True) -def sync_checks(task, request): - code_checks = inspect.getmembers(checks, inspect.isclass) - request.log.info("%d malware checks found in codebase." % len(code_checks)) - - all_checks = request.db.query(MalwareCheck).all() - active_checks = {} - wiped_out_checks = {} - for check in all_checks: - if not check.is_stale: - if check.state == MalwareCheckState.WipedOut: - wiped_out_checks[check.name] = check - else: - active_checks[check.name] = check - - if len(active_checks) > len(code_checks): - code_check_names = {name for name, cls in code_checks} - missing = ", ".join(set(active_checks.keys()) - code_check_names) - request.log.error( - "Found %d active checks in the db, but only %d checks in \ -code. Please manually move superfluous checks to the wiped_out state \ -in the check admin: %s" - % (len(active_checks), len(code_checks), missing) - ) - raise Exception("Mismatch between number of db checks and code checks.") - - for check_name, check_class in code_checks: - check = getattr(checks, check_name) - - if wiped_out_checks.get(check_name): - request.log.error( - "%s is wiped_out and cannot be synced. Please remove check from \ -codebase." - % check_name - ) - continue - - db_check = active_checks.get(check_name) - if db_check: - if check.version == db_check.version: - request.log.info("%s is unmodified." % check_name) - continue - - request.log.info("Updating existing %s." % check_name) - fields = get_check_fields(check) - - # Migrate the check state to the newest check. - # Then mark the old check state as disabled. - if db_check.state != MalwareCheckState.Disabled: - fields["state"] = db_check.state.value - db_check.state = MalwareCheckState.Disabled - - request.db.add(MalwareCheck(**fields)) - else: - request.log.info("Adding new %s to the database." % check_name) - fields = get_check_fields(check) - request.db.add(MalwareCheck(**fields)) - - -@task(bind=True, ignore_result=True, acks_late=True) -def remove_verdicts(task, request, check_name): - check_ids = ( - request.db.query(MalwareCheck.id, MalwareCheck.version) - .filter(MalwareCheck.name == check_name) - .all() - ) - total_deleted = 0 - for check_id, check_version in check_ids: - query = request.db.query(MalwareVerdict).filter( - MalwareVerdict.check_id == check_id - ) - num_verdicts = query.count() - request.log.info( - "Removing %d malware verdicts associated with %s version %d." - % (num_verdicts, check_name, check_version) - ) - total_deleted += query.delete(synchronize_session=False) - - # This returned value is only relevant for testing. - return total_deleted diff --git a/warehouse/malware/utils.py b/warehouse/malware/utils.py deleted file mode 100644 --- a/warehouse/malware/utils.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict - -from warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareCheckType - - -def get_check_fields(check): - result = {"name": check.__name__} - - required_fields = ["short_description", "long_description", "version", "check_type"] - for field in required_fields: - result[field] = getattr(check, field) - - if result["check_type"] == "event_hook": - result["hooked_object"] = check.hooked_object - - if result["check_type"] == "scheduled": - result["schedule"] = check.schedule - - return result - - -def get_enabled_hooked_checks(session): - checks = ( - session.query(MalwareCheck.name, MalwareCheck.hooked_object) - .filter(MalwareCheck.check_type == MalwareCheckType.EventHook) - .filter(MalwareCheck.state == MalwareCheckState.Enabled) - .all() - ) - results = defaultdict(set) - - for check_name, object_type in checks: - results[object_type.value].add(check_name) - - return results diff --git a/warehouse/migrations/versions/ab536b1853f0_remove_malware_infrastructure.py b/warehouse/migrations/versions/ab536b1853f0_remove_malware_infrastructure.py new file mode 100644 --- /dev/null +++ b/warehouse/migrations/versions/ab536b1853f0_remove_malware_infrastructure.py @@ -0,0 +1,184 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +remove malware infrastructure + +Revision ID: ab536b1853f0 +Revises: c5f718cb98ac +Create Date: 2023-05-19 13:46:26.093351 +""" + +import citext +import sqlalchemy as sa + +from alembic import op +from sqlalchemy.dialects import postgresql + +revision = "ab536b1853f0" +down_revision = "c5f718cb98ac" + + +def upgrade(): + op.drop_index("ix_malware_verdicts_check_id", table_name="malware_verdicts") + op.drop_table("malware_verdicts") + op.drop_table("malware_checks") + + +def downgrade(): + op.create_table( + "malware_verdicts", + sa.Column( + "id", + postgresql.UUID(), + server_default=sa.text("gen_random_uuid()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "run_date", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column("check_id", postgresql.UUID(), autoincrement=False, nullable=False), + sa.Column("file_id", postgresql.UUID(), autoincrement=False, nullable=True), + sa.Column("project_id", postgresql.UUID(), autoincrement=False, nullable=True), + sa.Column("release_id", postgresql.UUID(), autoincrement=False, nullable=True), + sa.Column( + "classification", + postgresql.ENUM( + "threat", "indeterminate", "benign", name="verdictclassification" + ), + autoincrement=False, + nullable=False, + ), + sa.Column( + "confidence", + postgresql.ENUM("low", "medium", "high", name="verdictconfidence"), + autoincrement=False, + nullable=False, + ), + sa.Column("message", sa.TEXT(), autoincrement=False, nullable=True), + sa.Column( + "details", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=True, + ), + sa.Column( + "manually_reviewed", + sa.BOOLEAN(), + server_default=sa.text("false"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "reviewer_verdict", + postgresql.ENUM( + "threat", "indeterminate", "benign", name="verdictclassification" + ), + autoincrement=False, + nullable=True, + ), + sa.Column("full_report_link", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.ForeignKeyConstraint( + ["check_id"], + ["malware_checks.id"], + name="malware_verdicts_check_id_fkey", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["file_id"], + ["release_files.id"], + name="malware_verdicts_file_id_fkey", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_id"], + ["projects.id"], + name="malware_verdicts_project_id_fkey", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["release_id"], + ["releases.id"], + name="malware_verdicts_release_id_fkey", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id", name="malware_verdicts_pkey"), + ) + op.create_index( + "ix_malware_verdicts_check_id", "malware_verdicts", ["check_id"], unique=False + ) + op.create_table( + "malware_checks", + sa.Column( + "id", + postgresql.UUID(), + server_default=sa.text("gen_random_uuid()"), + autoincrement=False, + nullable=False, + ), + sa.Column("name", citext.CIText(), autoincrement=False, nullable=False), + sa.Column("version", sa.INTEGER(), autoincrement=False, nullable=False), + sa.Column( + "short_description", + sa.VARCHAR(length=128), + autoincrement=False, + nullable=False, + ), + sa.Column("long_description", sa.TEXT(), autoincrement=False, nullable=False), + sa.Column( + "check_type", + postgresql.ENUM("event_hook", "scheduled", name="malwarechecktypes"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "hooked_object", + postgresql.ENUM( + "File", "Release", "Project", name="malwarecheckobjecttype" + ), + autoincrement=False, + nullable=True, + ), + sa.Column( + "schedule", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=True, + ), + sa.Column( + "state", + postgresql.ENUM( + "enabled", + "evaluation", + "disabled", + "wiped_out", + name="malwarecheckstate", + ), + server_default=sa.text("'disabled'::malwarecheckstate"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.PrimaryKeyConstraint("id", name="malware_checks_pkey"), + sa.UniqueConstraint("name", "version", name="malware_checks_name_version_key"), + ) diff --git a/warehouse/tasks.py b/warehouse/tasks.py --- a/warehouse/tasks.py +++ b/warehouse/tasks.py @@ -209,11 +209,8 @@ def includeme(config): task_default_queue="default", task_default_routing_key="task.default", task_queue_ha_policy="all", - task_queues=( - Queue("default", routing_key="task.#"), - Queue("malware", routing_key="malware.#"), - ), - task_routes={"warehouse.malware.tasks.*": {"queue": "malware"}}, + task_queues=(Queue("default", routing_key="task.#"),), + task_routes={}, task_serializer="json", worker_disable_rate_limits=True, REDBEAT_REDIS_URL=s["celery.scheduler_url"],
diff --git a/tests/common/checks/__init__.py b/tests/common/checks/__init__.py deleted file mode 100644 --- a/tests/common/checks/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .hooked import ExampleHookedCheck # noqa -from .scheduled import ExampleScheduledCheck # noqa diff --git a/tests/common/checks/hooked.py b/tests/common/checks/hooked.py deleted file mode 100644 --- a/tests/common/checks/hooked.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from warehouse.malware.checks.base import MalwareCheckBase -from warehouse.malware.errors import FatalCheckError -from warehouse.malware.models import VerdictClassification, VerdictConfidence - - -class ExampleHookedCheck(MalwareCheckBase): - version = 1 - short_description = "An example hook-based check" - long_description = "The purpose of this check is to test the \ -implementation of a hook-based check. This check will generate verdicts if enabled." - check_type = "event_hook" - hooked_object = "File" - - def __init__(self, db): - super().__init__(db) - - def scan(self, **kwargs): - file_id = kwargs.get("obj_id") - if file_id is None: - raise FatalCheckError("Missing required kwarg `obj_id`") - - self.add_verdict( - file_id=file_id, - classification=VerdictClassification.Benign, - confidence=VerdictConfidence.High, - message="Nothing to see here!", - ) diff --git a/tests/common/checks/scheduled.py b/tests/common/checks/scheduled.py deleted file mode 100644 --- a/tests/common/checks/scheduled.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from warehouse.malware.checks.base import MalwareCheckBase -from warehouse.malware.models import VerdictClassification, VerdictConfidence -from warehouse.packaging.models import Project - - -class ExampleScheduledCheck(MalwareCheckBase): - version = 1 - short_description = "An example scheduled check" - long_description = "The purpose of this check is to test the \ -implementation of a scheduled check. This check will generate verdicts if enabled." - check_type = "scheduled" - schedule = {"minute": "0", "hour": "*/8"} - - def __init__(self, db): - super().__init__(db) - - def scan(self, **kwargs): - project = self.db.query(Project).first() - self.add_verdict( - project_id=project.id, - classification=VerdictClassification.Benign, - confidence=VerdictConfidence.High, - message="Nothing to see here!", - ) diff --git a/tests/common/db/malware.py b/tests/common/db/malware.py deleted file mode 100644 --- a/tests/common/db/malware.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import factory - -from warehouse.malware.models import ( - MalwareCheck, - MalwareCheckObjectType, - MalwareCheckState, - MalwareCheckType, - MalwareVerdict, - VerdictClassification, - VerdictConfidence, -) - -from .base import WarehouseFactory -from .packaging import FileFactory - - -class MalwareCheckFactory(WarehouseFactory): - class Meta: - model = MalwareCheck - - name = factory.Faker("pystr", max_chars=12) - version = 1 - short_description = factory.Faker("sentence", nb_words=6) - long_description = factory.Faker("sentence", nb_words=12) - check_type = factory.Faker("random_element", elements=list(MalwareCheckType)) - hooked_object = factory.Faker( - "random_element", elements=list(MalwareCheckObjectType) - ) - schedule = {"minute": "*/10"} - state = factory.Faker("random_element", elements=list(MalwareCheckState)) - created = factory.Faker( - "date_time_between_dates", - datetime_start=datetime.datetime.utcnow() - datetime.timedelta(days=7), - ) - - -class MalwareVerdictFactory(WarehouseFactory): - class Meta: - model = MalwareVerdict - - check = factory.SubFactory(MalwareCheckFactory) - release_file = factory.SubFactory(FileFactory) - release = None - project = None - manually_reviewed = True - reviewer_verdict = factory.Faker( - "random_element", elements=list(VerdictClassification) - ) - classification = factory.Faker( - "random_element", elements=list(VerdictClassification) - ) - confidence = factory.Faker("random_element", elements=list(VerdictConfidence)) - message = factory.Faker("paragraph") - run_date = factory.Faker("date_time_between_dates") - full_report_link = None - details = None diff --git a/tests/conftest.py b/tests/conftest.py --- a/tests/conftest.py +++ b/tests/conftest.py @@ -289,9 +289,6 @@ def app_config(database): "sponsorlogos.backend": "warehouse.admin.services.LocalSponsorLogoStorage", "billing.backend": "warehouse.subscriptions.services.MockStripeBillingService", "mail.backend": "warehouse.email.services.SMTPEmailSender", - "malware_check.backend": ( - "warehouse.malware.services.PrinterMalwareCheckService" - ), "files.url": "http://localhost:7000/", "archive_files.url": "http://localhost:7000/archive", "sessions.secret": "123456", diff --git a/tests/unit/admin/test_routes.py b/tests/unit/admin/test_routes.py --- a/tests/unit/admin/test_routes.py +++ b/tests/unit/admin/test_routes.py @@ -188,29 +188,6 @@ def test_includeme(): ), pretend.call("admin.flags", "/admin/flags/", domain=warehouse), pretend.call("admin.flags.edit", "/admin/flags/edit/", domain=warehouse), - pretend.call("admin.checks.list", "/admin/checks/", domain=warehouse), - pretend.call( - "admin.checks.detail", "/admin/checks/{check_name}", domain=warehouse - ), - pretend.call( - "admin.checks.change_state", - "/admin/checks/{check_name}/change_state", - domain=warehouse, - ), - pretend.call( - "admin.checks.run_evaluation", - "/admin/checks/{check_name}/run_evaluation", - domain=warehouse, - ), - pretend.call("admin.verdicts.list", "/admin/verdicts/", domain=warehouse), - pretend.call( - "admin.verdicts.detail", "/admin/verdicts/{verdict_id}", domain=warehouse - ), - pretend.call( - "admin.verdicts.review", - "/admin/verdicts/{verdict_id}/review", - domain=warehouse, - ), pretend.call( "admin.sponsor.list", "/admin/sponsors/", diff --git a/tests/unit/admin/views/test_checks.py b/tests/unit/admin/views/test_checks.py deleted file mode 100644 --- a/tests/unit/admin/views/test_checks.py +++ /dev/null @@ -1,215 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pretend -import pytest - -from pyramid.httpexceptions import HTTPNotFound - -from warehouse.admin.views import checks as views -from warehouse.malware.models import MalwareCheckState, MalwareCheckType -from warehouse.malware.tasks import backfill, run_scheduled_check - -from ....common.db.malware import MalwareCheckFactory - - -class TestListChecks: - def test_get_checks_none(self, db_request): - assert views.get_checks(db_request) == {"checks": []} - - def test_get_checks(self, db_request): - checks = [MalwareCheckFactory.create() for _ in range(10)] - result = views.get_checks(db_request)["checks"] - assert len(result) == len(checks) - for r in result: - assert r in checks - - def test_get_checks_different_versions(self, db_request): - checks = [MalwareCheckFactory.create() for _ in range(5)] - checks_same = [ - MalwareCheckFactory.create(name="MyCheck", version=i) for i in range(1, 6) - ] - checks.append(checks_same[-1]) - result = views.get_checks(db_request)["checks"] - assert len(result) == len(checks) - for r in result: - assert r in checks - - -class TestGetCheck: - def test_get_check(self, db_request): - check = MalwareCheckFactory.create() - db_request.matchdict["check_name"] = check.name - assert views.get_check(db_request) == { - "check": check, - "checks": [check], - "states": MalwareCheckState, - "evaluation_run_size": 10000, - } - - def test_get_check_many_versions(self, db_request): - check1 = MalwareCheckFactory.create(name="MyCheck", version="1") - check2 = MalwareCheckFactory.create(name="MyCheck", version="2") - db_request.matchdict["check_name"] = check1.name - assert views.get_check(db_request) == { - "check": check2, - "checks": [check2, check1], - "states": MalwareCheckState, - "evaluation_run_size": 10000, - } - - def test_get_check_not_found(self, db_request): - db_request.matchdict["check_name"] = "DoesNotExist" - with pytest.raises(HTTPNotFound): - views.get_check(db_request) - - -class TestChangeCheckState: - def test_no_check_state(self, db_request): - check = MalwareCheckFactory.create() - db_request.matchdict["check_name"] = check.name - with pytest.raises(HTTPNotFound): - views.change_check_state(db_request) - - @pytest.mark.parametrize( - ("final_state"), [MalwareCheckState.Disabled, MalwareCheckState.WipedOut] - ) - def test_change_to_valid_state(self, db_request, final_state): - check = MalwareCheckFactory.create( - name="MyCheck", state=MalwareCheckState.Disabled - ) - - db_request.POST = {"check_state": final_state.value} - db_request.matchdict["check_name"] = check.name - - db_request.session = pretend.stub( - flash=pretend.call_recorder(lambda *a, **kw: None) - ) - wipe_out_recorder = pretend.stub( - delay=pretend.call_recorder(lambda *a, **kw: None) - ) - db_request.task = pretend.call_recorder(lambda *a, **kw: wipe_out_recorder) - - db_request.route_path = pretend.call_recorder( - lambda *a, **kw: "/admin/checks/MyCheck/change_state" - ) - - views.change_check_state(db_request) - - assert db_request.session.flash.calls == [ - pretend.call( - "Changed 'MyCheck' check to '%s'!" % final_state.value, queue="success" - ) - ] - - assert check.state == final_state - - if final_state == MalwareCheckState.WipedOut: - assert wipe_out_recorder.delay.calls == [pretend.call("MyCheck")] - - def test_change_to_invalid_state(self, db_request): - check = MalwareCheckFactory.create(name="MyCheck") - initial_state = check.state - invalid_check_state = "cancelled" - db_request.POST = {"check_state": invalid_check_state} - db_request.matchdict["check_name"] = check.name - - db_request.session = pretend.stub( - flash=pretend.call_recorder(lambda *a, **kw: None) - ) - db_request.route_path = pretend.call_recorder( - lambda *a, **kw: "/admin/checks/MyCheck/change_state" - ) - - views.change_check_state(db_request) - - assert db_request.session.flash.calls == [ - pretend.call("Invalid check state provided.", queue="error") - ] - assert check.state == initial_state - - -class TestRunEvaluation: - @pytest.mark.parametrize( - ("check_state", "message"), - [ - ( - MalwareCheckState.Disabled, - "Check must be in 'enabled' or 'evaluation' state to manually execute.", - ), - ( - MalwareCheckState.WipedOut, - "Check must be in 'enabled' or 'evaluation' state to manually execute.", - ), - ], - ) - def test_invalid_backfill_parameters(self, db_request, check_state, message): - check = MalwareCheckFactory.create(state=check_state) - db_request.matchdict["check_name"] = check.name - - db_request.session = pretend.stub( - flash=pretend.call_recorder(lambda *a, **kw: None) - ) - - db_request.route_path = pretend.call_recorder( - lambda *a, **kw: "/admin/checks/%s/run_evaluation" % check.name - ) - - views.run_evaluation(db_request) - - assert db_request.session.flash.calls == [pretend.call(message, queue="error")] - - @pytest.mark.parametrize( - ("check_type"), [MalwareCheckType.EventHook, MalwareCheckType.Scheduled] - ) - def test_success(self, db_request, check_type): - check = MalwareCheckFactory.create( - check_type=check_type, state=MalwareCheckState.Enabled - ) - db_request.matchdict["check_name"] = check.name - - db_request.session = pretend.stub( - flash=pretend.call_recorder(lambda *a, **kw: None) - ) - - db_request.route_path = pretend.call_recorder( - lambda *a, **kw: "/admin/checks/%s/run_evaluation" % check.name - ) - - backfill_recorder = pretend.stub( - delay=pretend.call_recorder(lambda *a, **kw: None) - ) - - db_request.task = pretend.call_recorder(lambda *a, **kw: backfill_recorder) - - views.run_evaluation(db_request) - - if check_type == MalwareCheckType.EventHook: - assert db_request.session.flash.calls == [ - pretend.call( - "Running %s on 10000 %ss!" - % (check.name, check.hooked_object.value), - queue="success", - ) - ] - assert db_request.task.calls == [pretend.call(backfill)] - assert backfill_recorder.delay.calls == [pretend.call(check.name, 10000)] - elif check_type == MalwareCheckType.Scheduled: - assert db_request.session.flash.calls == [ - pretend.call("Running %s now!" % check.name, queue="success") - ] - assert db_request.task.calls == [pretend.call(run_scheduled_check)] - assert backfill_recorder.delay.calls == [ - pretend.call(check.name, manually_triggered=True) - ] - else: - raise Exception("Invalid check type: %s" % check_type) diff --git a/tests/unit/admin/views/test_verdicts.py b/tests/unit/admin/views/test_verdicts.py deleted file mode 100644 --- a/tests/unit/admin/views/test_verdicts.py +++ /dev/null @@ -1,294 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from random import randint - -import pretend -import pytest - -from paginate_sqlalchemy import SqlalchemyOrmPage as SQLAlchemyORMPage -from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound - -from warehouse.admin.views import verdicts as views -from warehouse.malware.models import ( - MalwareCheck, - MalwareVerdict, - VerdictClassification, - VerdictConfidence, -) -from warehouse.utils.paginate import paginate_url_factory - -from ....common.db.malware import MalwareCheckFactory, MalwareVerdictFactory - - -class TestListVerdicts: - def test_none(self, db_request): - assert views.get_verdicts(db_request) == { - "verdicts": [], - "check_names": set(), - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - - def test_some(self, db_request): - check = MalwareCheckFactory.create() - for _ in range(10): - MalwareVerdictFactory.create(check=check) - - query = db_request.db.query(MalwareVerdict).order_by( - MalwareVerdict.run_date.desc() - ) - - verdicts = SQLAlchemyORMPage( - query, - page=1, - items_per_page=25, - url_maker=paginate_url_factory(db_request), - ) - - assert views.get_verdicts(db_request) == { - "verdicts": verdicts, - "check_names": {check.name}, - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - - def test_some_with_multipage(self, db_request): - check1 = MalwareCheckFactory.create() - check2 = MalwareCheckFactory.create() - for _ in range(60): - MalwareVerdictFactory.create(check=check2) - - db_request.GET["page"] = "2" - - query = db_request.db.query(MalwareVerdict).order_by( - MalwareVerdict.run_date.desc() - ) - verdicts = SQLAlchemyORMPage( - query, - page=2, - items_per_page=25, - url_maker=paginate_url_factory(db_request), - ) - - assert views.get_verdicts(db_request) == { - "verdicts": verdicts, - "check_names": {check1.name, check2.name}, - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - - @pytest.mark.parametrize("check_name", ["check0", "check1", ""]) - def test_check_name_filter(self, db_request, check_name): - for i in range(3): - check = MalwareCheckFactory.create(name="check%d" % i) - for _ in range(5): - MalwareVerdictFactory.create(check=check) - - query = db_request.db.query(MalwareVerdict) - if check_name: - query = query.join(MalwareCheck).filter(MalwareCheck.name == check_name) - query = query.order_by(MalwareVerdict.run_date.desc()) - - verdicts = SQLAlchemyORMPage( - query, - page=1, - items_per_page=25, - url_maker=paginate_url_factory(db_request), - ) - - response = { - "verdicts": verdicts, - "check_names": {"check0", "check1", "check2"}, - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - - db_request.GET["check_name"] = check_name - assert views.get_verdicts(db_request) == response - - @pytest.mark.parametrize( - "classification", ["benign", "indeterminate", "threat", ""] - ) - def test_classification_filter(self, db_request, classification): - check = MalwareCheckFactory.create() - for c in VerdictClassification: - for _ in range(5): - MalwareVerdictFactory.create(check=check, classification=c) - - db_request.GET["classification"] = classification - - query = db_request.db.query(MalwareVerdict) - if classification: - query = query.filter(MalwareVerdict.classification == classification) - query = query.order_by(MalwareVerdict.run_date.desc()) - - verdicts = SQLAlchemyORMPage( - query, - page=1, - items_per_page=25, - url_maker=paginate_url_factory(db_request), - ) - - response = { - "verdicts": verdicts, - "check_names": {check.name}, - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - assert views.get_verdicts(db_request) == response - - @pytest.mark.parametrize("confidence", ["low", "medium", "high", ""]) - def test_confidence_filter(self, db_request, confidence): - check = MalwareCheckFactory.create() - for c in VerdictConfidence: - for _ in range(5): - MalwareVerdictFactory.create(check=check, confidence=c) - - db_request.GET["confidence"] = confidence - - query = db_request.db.query(MalwareVerdict) - if confidence: - query = query.filter(MalwareVerdict.confidence == confidence) - query = query.order_by(MalwareVerdict.run_date.desc()) - - verdicts = SQLAlchemyORMPage( - query, - page=1, - items_per_page=25, - url_maker=paginate_url_factory(db_request), - ) - - response = { - "verdicts": verdicts, - "check_names": {check.name}, - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - - assert views.get_verdicts(db_request) == response - - @pytest.mark.parametrize("manually_reviewed", [1, 0]) - def test_manually_reviewed_filter(self, db_request, manually_reviewed): - check = MalwareCheckFactory.create() - for _ in range(5): - MalwareVerdictFactory.create( - check=check, manually_reviewed=bool(manually_reviewed) - ) - - # Create other verdicts to ensure filter works properly - for _ in range(10): - MalwareVerdictFactory.create( - check=check, manually_reviewed=not bool(manually_reviewed) - ) - - db_request.GET["manually_reviewed"] = str(manually_reviewed) - - query = ( - db_request.db.query(MalwareVerdict) - .filter(MalwareVerdict.manually_reviewed == bool(manually_reviewed)) - .order_by(MalwareVerdict.run_date.desc()) - ) - - verdicts = SQLAlchemyORMPage( - query, - page=1, - items_per_page=25, - url_maker=paginate_url_factory(db_request), - ) - - response = { - "verdicts": verdicts, - "check_names": {check.name}, - "classifications": {"threat", "indeterminate", "benign"}, - "confidences": {"low", "medium", "high"}, - } - - assert views.get_verdicts(db_request) == response - - @pytest.mark.parametrize( - "invalid_param", - [ - ("page", "invalid"), - ("check_name", "NotACheck"), - ("confidence", "NotAConfidence"), - ("classification", "NotAClassification"), - ("manually_reviewed", "False"), - ], - ) - def test_errors(self, db_request, invalid_param): - db_request.GET[invalid_param[0]] = invalid_param[1] - with pytest.raises(HTTPBadRequest): - views.get_verdicts(db_request) - - -class TestGetVerdict: - def test_found(self, db_request): - verdicts = [MalwareVerdictFactory.create() for _ in range(10)] - index = randint(0, 9) - lookup_id = verdicts[index].id - db_request.matchdict["verdict_id"] = lookup_id - - assert views.get_verdict(db_request) == { - "verdict": verdicts[index], - "classifications": ["Benign", "Indeterminate", "Threat"], - } - - def test_not_found(self, db_request): - db_request.matchdict["verdict_id"] = uuid.uuid4() - - with pytest.raises(HTTPNotFound): - views.get_verdict(db_request) - - -class TestReviewVerdict: - @pytest.mark.parametrize( - "manually_reviewed, reviewer_verdict", - [ - (False, None), # unreviewed verdict - (True, VerdictClassification.Threat), # previously reviewed - ], - ) - def test_set_classification(self, db_request, manually_reviewed, reviewer_verdict): - verdict = MalwareVerdictFactory.create( - manually_reviewed=manually_reviewed, reviewer_verdict=reviewer_verdict - ) - - db_request.matchdict["verdict_id"] = verdict.id - db_request.POST = {"classification": "Benign"} - db_request.session = pretend.stub( - flash=pretend.call_recorder(lambda *a, **kw: None) - ) - - db_request.route_path = pretend.call_recorder( - lambda *a, **kw: "/admin/verdicts/%s/review" % verdict.id - ) - - views.review_verdict(db_request) - - assert db_request.session.flash.calls == [ - pretend.call("Verdict %s marked as reviewed." % verdict.id, queue="success") - ] - - assert verdict.manually_reviewed - assert verdict.reviewer_verdict == VerdictClassification.Benign - - @pytest.mark.parametrize("post_params", [{}, {"classification": "Nope"}]) - def test_errors(self, db_request, post_params): - verdict = MalwareVerdictFactory.create() - db_request.matchdict["verdict_id"] = verdict.id - db_request.POST = post_params - - with pytest.raises(HTTPBadRequest): - views.review_verdict(db_request) diff --git a/tests/unit/cli/test_malware.py b/tests/unit/cli/test_malware.py deleted file mode 100644 --- a/tests/unit/cli/test_malware.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pretend - -from warehouse.cli.malware import sync_checks -from warehouse.malware.tasks import sync_checks as _sync_checks - - -class TestCLIMalware: - def test_sync_checks(self, cli): - request = pretend.stub() - task = pretend.stub( - get_request=pretend.call_recorder(lambda *a, **kw: request), - run=pretend.call_recorder(lambda *a, **kw: None), - ) - config = pretend.stub(task=pretend.call_recorder(lambda *a, **kw: task)) - - result = cli.invoke(sync_checks, obj=config) - - assert result.exit_code == 0 - assert config.task.calls == [ - pretend.call(_sync_checks), - pretend.call(_sync_checks), - ] - assert task.get_request.calls == [pretend.call()] - assert task.run.calls == [pretend.call(request)] diff --git a/tests/unit/malware/__init__.py b/tests/unit/malware/__init__.py deleted file mode 100644 --- a/tests/unit/malware/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/unit/malware/checks/__init__.py b/tests/unit/malware/checks/__init__.py deleted file mode 100644 --- a/tests/unit/malware/checks/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/unit/malware/checks/package_turnover/__init__.py b/tests/unit/malware/checks/package_turnover/__init__.py deleted file mode 100644 --- a/tests/unit/malware/checks/package_turnover/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/unit/malware/checks/package_turnover/test_check.py b/tests/unit/malware/checks/package_turnover/test_check.py deleted file mode 100644 --- a/tests/unit/malware/checks/package_turnover/test_check.py +++ /dev/null @@ -1,182 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pretend - -from warehouse.events.tags import EventTag -from warehouse.malware.checks.package_turnover import check as c -from warehouse.malware.models import ( - MalwareCheckState, - VerdictClassification, - VerdictConfidence, -) - -from .....common.db.accounts import UserFactory -from .....common.db.malware import MalwareCheckFactory -from .....common.db.packaging import ProjectFactory, ReleaseFactory, RoleFactory - - -def test_initializes(db_session): - check_model = MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - assert check.id == check_model.id - - -def test_user_posture_verdicts(db_session): - user = UserFactory.create() - project = pretend.stub(users=[user], id=pretend.stub()) - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - user.record_event( - tag=EventTag.Account.TwoFactorMethodRemoved, ip_address="0.0.0.0", additional={} - ) - - check.user_posture_verdicts(project) - assert len(check._verdicts) == 1 - assert check._verdicts[0].check_id == check.id - assert check._verdicts[0].project_id == project.id - assert check._verdicts[0].classification == VerdictClassification.Threat - assert check._verdicts[0].confidence == VerdictConfidence.High - assert ( - check._verdicts[0].message - == "User with control over this package has disabled 2FA" - ) - - -def test_user_posture_verdicts_hasnt_removed_2fa(db_session): - user = UserFactory.create() - project = pretend.stub(users=[user], id=pretend.stub()) - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - check.user_posture_verdicts(project) - assert len(check._verdicts) == 0 - - -def test_user_posture_verdicts_has_2fa(db_session): - user = UserFactory.create(totp_secret=b"fake secret") - project = pretend.stub(users=[user], id=pretend.stub()) - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - user.record_event( - tag=EventTag.Account.TwoFactorMethodRemoved, ip_address="0.0.0.0", additional={} - ) - - check.user_posture_verdicts(project) - assert len(check._verdicts) == 0 - - -def test_user_turnover_verdicts(db_session): - user = UserFactory.create() - project = ProjectFactory.create() - RoleFactory.create(user=user, project=project, role_name="Owner") - - project.record_event( - tag=EventTag.Project.RoleAdd, - ip_address="0.0.0.0", - additional={"target_user": user.username}, - ) - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - check.user_turnover_verdicts(project) - assert len(check._verdicts) == 1 - assert check._verdicts[0].check_id == check.id - assert check._verdicts[0].project_id == project.id - assert check._verdicts[0].classification == VerdictClassification.Threat - assert check._verdicts[0].confidence == VerdictConfidence.High - assert ( - check._verdicts[0].message - == "Suspicious user turnover; all current maintainers are new" - ) - - -def test_user_turnover_verdicts_no_turnover(db_session): - user = UserFactory.create() - project = ProjectFactory.create() - RoleFactory.create(user=user, project=project, role_name="Owner") - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - check.user_turnover_verdicts(project) - assert len(check._verdicts) == 0 - - -def test_scan(db_session, monkeypatch): - user = UserFactory.create() - project = ProjectFactory.create() - RoleFactory.create(user=user, project=project, role_name="Owner") - - for _ in range(3): - ReleaseFactory.create(project=project) - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - monkeypatch.setattr( - check, "user_posture_verdicts", pretend.call_recorder(lambda project: None) - ) - monkeypatch.setattr( - check, "user_turnover_verdicts", pretend.call_recorder(lambda project: None) - ) - - check.scan() - - # Each verdict rendering method is only called once per project, - # thanks to deduplication. - assert check.user_posture_verdicts.calls == [pretend.call(project)] - assert check.user_turnover_verdicts.calls == [pretend.call(project)] - - -def test_scan_too_few_releases(db_session, monkeypatch): - user = UserFactory.create() - project = ProjectFactory.create() - RoleFactory.create(user=user, project=project, role_name="Owner") - ReleaseFactory.create(project=project) - - MalwareCheckFactory.create( - name="PackageTurnoverCheck", state=MalwareCheckState.Enabled - ) - check = c.PackageTurnoverCheck(db_session) - - monkeypatch.setattr( - check, "user_posture_verdicts", pretend.call_recorder(lambda project: None) - ) - monkeypatch.setattr( - check, "user_turnover_verdicts", pretend.call_recorder(lambda project: None) - ) - - check.scan() - assert check.user_posture_verdicts.calls == [] - assert check.user_turnover_verdicts.calls == [] diff --git a/tests/unit/malware/checks/setup_patterns/__init__.py b/tests/unit/malware/checks/setup_patterns/__init__.py deleted file mode 100644 --- a/tests/unit/malware/checks/setup_patterns/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/unit/malware/checks/setup_patterns/test_check.py b/tests/unit/malware/checks/setup_patterns/test_check.py deleted file mode 100644 --- a/tests/unit/malware/checks/setup_patterns/test_check.py +++ /dev/null @@ -1,1158 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pretend -import pytest -import yara - -from warehouse.malware.checks.setup_patterns import check as c -from warehouse.malware.models import ( - MalwareCheckState, - VerdictClassification, - VerdictConfidence, -) - -from .....common.db.malware import MalwareCheckFactory -from .....common.db.packaging import FileFactory - - -def test_initializes(db_session): - check_model = MalwareCheckFactory.create( - name="SetupPatternCheck", state=MalwareCheckState.Enabled - ) - check = c.SetupPatternCheck(db_session) - - assert check.id == check_model.id - assert isinstance(check._yara_rules, yara.Rules) - - [email protected]( - ("obj", "file_url"), [(None, pretend.stub()), (pretend.stub(), None)] -) -def test_scan_missing_kwargs(db_session, obj, file_url): - MalwareCheckFactory.create( - name="SetupPatternCheck", state=MalwareCheckState.Enabled - ) - check = c.SetupPatternCheck(db_session) - with pytest.raises(c.FatalCheckError): - check.scan(obj=obj, file_url=file_url) - - -def test_scan_non_sdist(db_session): - MalwareCheckFactory.create( - name="SetupPatternCheck", state=MalwareCheckState.Enabled - ) - check = c.SetupPatternCheck(db_session) - - file = FileFactory.create(packagetype="bdist_wheel") - - check.scan(obj=file, file_url=pretend.stub()) - - assert check._verdicts == [] - - -def test_scan_no_setup_contents(db_session, monkeypatch): - monkeypatch.setattr( - c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub()) - ) - monkeypatch.setattr( - c, "extract_file_content", pretend.call_recorder(lambda *a: None) - ) - - MalwareCheckFactory.create( - name="SetupPatternCheck", state=MalwareCheckState.Enabled - ) - check = c.SetupPatternCheck(db_session) - - file = FileFactory.create(packagetype="sdist") - - check.scan(obj=file, file_url=pretend.stub()) - - assert len(check._verdicts) == 1 - assert check._verdicts[0].check_id == check.id - assert check._verdicts[0].file_id == file.id - assert check._verdicts[0].classification == VerdictClassification.Indeterminate - assert check._verdicts[0].confidence == VerdictConfidence.High - assert ( - check._verdicts[0].message - == "sdist does not contain a suitable setup.py for analysis" - ) - - [email protected]("benign", ["", """from os import path"""]) -def test_scan_benign_contents(db_session, monkeypatch, benign): - monkeypatch.setattr( - c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub()) - ) - monkeypatch.setattr( - c, - "extract_file_content", - pretend.call_recorder( - lambda *a: b"this is a benign string\n" + benign.encode("utf-8") - ), - ) - - MalwareCheckFactory.create( - name="SetupPatternCheck", state=MalwareCheckState.Enabled - ) - check = c.SetupPatternCheck(db_session) - - file = FileFactory.create(packagetype="sdist") - - check.scan(obj=file, file_url=pretend.stub()) - - assert len(check._verdicts) == 1 - assert check._verdicts[0].check_id == check.id - assert check._verdicts[0].file_id == file.id - assert check._verdicts[0].classification == VerdictClassification.Benign - assert check._verdicts[0].confidence == VerdictConfidence.Low - assert check._verdicts[0].message == "No malicious patterns found in setup.py" - - [email protected]( - "malicious, rule", - [ - # process_spawn_in_setup - ("""os.system('cat /etc/passwd')""", "process_spawn_in_setup"), - ("""os.popen('cat /etc/passwd')""", "process_spawn_in_setup"), - ("""os.popen3('cat /etc/passwd')""", "process_spawn_in_setup"), - ("""os.spawn('cat /etc/passwd')""", "process_spawn_in_setup"), - ("""os.spawnve('cat /etc/passwd')""", "process_spawn_in_setup"), - ("""os.posix_spawn('cat /etc/passwd')""", "process_spawn_in_setup"), - ("""os.posix_spawnp('cat /etc/passwd')""", "process_spawn_in_setup"), - ( - """os.exec('malicious_code')""", - "process_spawn_in_setup:metaprogramming_in_setup", - ), - ("""os.execve('malicious_code')""", "process_spawn_in_setup"), - ( - """ - from os import * - system('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import path, system - system('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import system - system('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import popen - popen('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import popen3 - popen3('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import spawn - spawn('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import spawnve - spawnve('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import posix_spawn - posix_spawn('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import posix_spawnp - posix_spawnp('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - from os import exec - exec('cat /etc/passwd') - """, - "process_spawn_in_setup:metaprogramming_in_setup", - ), - ( - """ - from os import execve - execve('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - # subprocess_in_setup - ("subprocess.run('...')", "subprocess_in_setup"), - ("subprocess.Popen('...')", "subprocess_in_setup"), - ("subprocess.call('...')", "subprocess_in_setup"), - ("subprocess.check_call('...')", "subprocess_in_setup"), - ("subprocess.check_output('...')", "subprocess_in_setup"), - ( - """ - from subprocess import run - run('...') - """, - "subprocess_in_setup", - ), - ( - """ - from subprocess import Popen - Popen('...') - """, - "subprocess_in_setup", - ), - ( - """ - from subprocess import call - call('...') - """, - "subprocess_in_setup", - ), - ( - """ - from subprocess import check_call - check_call('...') - """, - "subprocess_in_setup", - ), - ( - """ - from subprocess import check_output - check_output('...') - """, - "subprocess_in_setup", - ), - # networking_in_setup - ("from socket import something", "networking_in_setup"), - ("from socket.something import something", "networking_in_setup"), - ("import socket", "networking_in_setup"), - ("from socketserver import something", "networking_in_setup"), - ("from socketserver.something import something", "networking_in_setup"), - ("import socketserver", "networking_in_setup"), - ("from ssl import something", "networking_in_setup"), - ("from ssl.something import something", "networking_in_setup"), - ("import ssl", "networking_in_setup"), - ("from ftplib import something", "networking_in_setup"), - ("from http.something import something", "networking_in_setup"), - ("import http", "networking_in_setup"), - ("import http", "networking_in_setup"), - ("from urllib import something", "networking_in_setup"), - ("from urllib.something import something", "networking_in_setup"), - ("import urllib", "networking_in_setup"), - ("from xmlrpc import something", "networking_in_setup"), - ("from xmlrpc.something import something", "networking_in_setup"), - ("import xmlrpc", "networking_in_setup"), - # deserialization_in_setup - ("from pickle import something", "deserialization_in_setup"), - ("from pickle.something import something", "deserialization_in_setup"), - ("import pickle", "deserialization_in_setup"), - ("from base64 import something", "deserialization_in_setup"), - ("from base64.something import something", "deserialization_in_setup"), - ("import base64", "deserialization_in_setup"), - ("from binhex import something", "deserialization_in_setup"), - ("from binhex.something import something", "deserialization_in_setup"), - ("import binhex", "deserialization_in_setup"), - # metaprogramming_in_setup - ("from inspect import something", "metaprogramming_in_setup"), - ("from inspect.something import something", "metaprogramming_in_setup"), - ("import inspect", "metaprogramming_in_setup"), - ("from compileall import something", "metaprogramming_in_setup"), - ("from compileall.something import something", "metaprogramming_in_setup"), - ("import compileall", "metaprogramming_in_setup"), - ("from py_compile import something", "metaprogramming_in_setup"), - ("from py_compile.something import something", "metaprogramming_in_setup"), - ("import py_compile", "metaprogramming_in_setup"), - ("from builtins import something", "metaprogramming_in_setup"), - ("from builtins.something import something", "metaprogramming_in_setup"), - ("import builtins", "metaprogramming_in_setup"), - ("__builtins__.bla", "metaprogramming_in_setup"), - ("from importlib import something", "metaprogramming_in_setup"), - ("from importlib.something import something", "metaprogramming_in_setup"), - ("import importlib", "metaprogramming_in_setup"), - ("__import__('bla')", "metaprogramming_in_setup"), - ("from sys import modules, path", "metaprogramming_in_setup"), - ("from sys import path, modules", "metaprogramming_in_setup"), - ("import sys.modules", "metaprogramming_in_setup"), - ("compile('malicious')", "metaprogramming_in_setup"), - ("dir(someobject)", "metaprogramming_in_setup"), - ("someobject.__dir__()", "metaprogramming_in_setup"), - ("eval('malicious')", "metaprogramming_in_setup"), - ("exec('malicious')", "metaprogramming_in_setup"), - ("getattr(someobject, 'attr')", "metaprogramming_in_setup"), - ("vars(someobject)", "metaprogramming_in_setup"), - ("someobject.__dict__()", "metaprogramming_in_setup"), - ("globals()", "metaprogramming_in_setup"), - ("locals()", "metaprogramming_in_setup"), - ("chr(42)", "metaprogramming_in_setup"), - ("ord('x')", "metaprogramming_in_setup"), - # alias imports - ( - """ - import os as evil - evil.system('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.system('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - hof = lambda evil, scary : evil(scary) - hof(os.system, "cat /etc/passwd") - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.popen('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.popen('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.popen3('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.popen3('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.spawn('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.spawn('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.spawnve('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.spawnve('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.posix_spawn('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.posix_spawn('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.posix_spawnp('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.posix_spawnp('cat /etc/passwd') - """, - "process_spawn_in_setup", - ), - ( - """ - import os as evil - evil.exec('malicious_code') - """, - "process_spawn_in_setup:metaprogramming_in_setup", - ), - ( - """ - import os - evil = os - evil.exec('malicious_code') - """, - "process_spawn_in_setup:metaprogramming_in_setup", - ), - ( - """ - import os as evil - evil.execve('malicious_code') - """, - "process_spawn_in_setup", - ), - ( - """ - import os - evil = os - evil.execve('malicious_code') - """, - "process_spawn_in_setup", - ), - ( - """ - import subprocess as evil - evil.run('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess - evil = subprocess - evil.run('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess as evil - evil.Popen('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess - evil = subprocess - evil.Popen('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess as evil - evil.call('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess - evil = subprocess - evil.call('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess as evil - evil.check_call('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess - evil = subprocess - evil.check_call('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess as evil - evil.check_output('...') - """, - "subprocess_in_setup", - ), - ( - """ - import subprocess - evil = subprocess - evil.check_output('...') - """, - "subprocess_in_setup", - ), - # higher order functions, used for metaprogramming in setup.py - ( - """ - hof = lambda evil : evil('malicious') - hof(compile) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil(scary) - hof(compile, 'malicious') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof('malicious', compile) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof('malicious',compile) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0]('malicious') - hof([compile]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil[0](scary) - hof([compile], 'malicious') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary[0](evil) - hof('malicious', [compile]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']('malicious') - hof({'mean' : compile}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']('malicious') - hof({'mean':compile}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]('malicious') - hof({compile:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]('malicious') - hof({compile : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil(someobject) - hof(dir) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil(scary) - hof(dir, someobject) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof(someobject, dir) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof(someobject,dir) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0](someobject) - hof([dir]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil[0](scary) - hof([dir], someobject) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary[0](evil) - hof(someobject, [dir]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean'](someobject) - hof({'mean' : dir}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean'](someobject) - hof({'mean':dir}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0](someobject) - hof({dir:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0](someobject) - hof({dir : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil('malicious') - hof(eval) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil(scary) - hof(eval, 'malicious') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof('malicious', eval) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof('malicious',eval) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0]('malicious') - hof([eval]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil[0](scary) - hof([eval], 'malicious') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary[0](evil) - hof('malicious', [eval]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']('malicious') - hof({'mean' : eval}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']('malicious') - hof({'mean':eval}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]('malicious') - hof({eval:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]('malicious') - hof({eval : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil('malicious') - hof(exec) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil(scary) - hof(exec, 'malicious') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof('malicious', exec) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof('malicious',exec) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0]('malicious') - hof([exec]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil[0](scary) - hof([exec], 'malicious') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary[0](evil) - hof('malicious', [exec]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']('malicious') - hof({'mean' : exec}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']('malicious') - hof({'mean':exec}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]('malicious') - hof({exec:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]('malicious') - hof({exec: 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil(someobject, 'attr') - hof(getattr) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary, unkind : evil(scary, unkind) - hof(getattr, someobject, 'attr') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda unkind, evil, scary : scary(evil, unkind) - hof('attr', someobject, getattr) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda unkind, evil, scary : scary(evil, unkind) - hof('attr', someobject,getattr) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0](someobject, 'attr') - hof([getattr]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary, unkind : evil[0](scary, unkind) - hof([getattr], someobject, 'attr') - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda unkind, evil, scary : scary[0](evil, unkind) - hof('attr', someobject, [getattr]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean'](someobject, 'attr') - hof({'mean' : getattr}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean'](someobject, 'attr') - hof({'mean':getattr}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0](someobject, 'attr') - hof({getattr:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0](someobject, 'attr') - hof({getattr : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil(someobject) - hof(vars) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil(scary) - hof(vars, someobject) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof(someobject, vars) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary(evil) - hof(someobject,vars) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0](someobject) - hof([vars]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : evil[0](scary) - hof([vars], someobject) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, scary : scary[0](evil) - hof(someobject, [vars]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean'](someobject) - hof({'mean' : vars}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean'](someobject) - hof({'mean':vars}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0](someobject) - hof({vars:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0](someobject) - hof({vars : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil() - hof(globals) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, _scary : evil() - hof(globals, None) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda _evil, scary : scary() - hof(None, globals) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda _evil, scary : scary() - hof(None,globals) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0]() - hof([globals]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']() - hof({'mean' : globals}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']() - hof({'mean':globals}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]() - hof({globals:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]() - hof({globals : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil() - hof(locals) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil, _scary : evil() - hof(locals, None) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda _evil, scary : scary() - hof(None, locals) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda _evil, scary : scary() - hof(None,locals) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil[0]() - hof([locals]) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']() - hof({'mean' : locals}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : evil['mean']() - hof({'mean':locals}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]() - hof({locals:'bad'}) - """, - "metaprogramming_in_setup", - ), - ( - """ - hof = lambda evil : list(evil.keys())[0]() - hof({locals : 'bad'}) - """, - "metaprogramming_in_setup", - ), - ], -) -def test_scan_matched_content(db_session, monkeypatch, malicious, rule): - monkeypatch.setattr( - c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub()) - ) - monkeypatch.setattr( - c, - "extract_file_content", - pretend.call_recorder( - lambda *a: b"this looks suspicious:\n" + malicious.encode("utf-8") - ), - ) - - MalwareCheckFactory.create( - name="SetupPatternCheck", state=MalwareCheckState.Enabled - ) - check = c.SetupPatternCheck(db_session) - - file = FileFactory.create(packagetype="sdist") - - check.scan(obj=file, file_url=pretend.stub()) - - assert len(check._verdicts) == 1 - assert check._verdicts[0].check_id == check.id - assert check._verdicts[0].file_id == file.id - threat_rules = {"process_spawn_in_setup", "subprocess_in_setup"} - if set(rule.split(":")) & threat_rules: - assert check._verdicts[0].classification == VerdictClassification.Threat - else: - assert check._verdicts[0].classification == VerdictClassification.Indeterminate - assert check._verdicts[0].confidence == VerdictConfidence.High - assert check._verdicts[0].message == rule diff --git a/tests/unit/malware/checks/test_utils.py b/tests/unit/malware/checks/test_utils.py deleted file mode 100644 --- a/tests/unit/malware/checks/test_utils.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import tarfile -import zipfile - -import pretend - -from warehouse.malware.checks import utils - - -def test_fetch_url_content(monkeypatch): - response = pretend.stub( - raise_for_status=pretend.call_recorder(lambda: None), content=b"fake content" - ) - requests = pretend.stub(get=pretend.call_recorder(lambda url: response)) - - monkeypatch.setattr(utils, "requests", requests) - - io = utils.fetch_url_content("hxxp://fake_url.com") - - assert requests.get.calls == [pretend.call("hxxp://fake_url.com")] - assert response.raise_for_status.calls == [pretend.call()] - assert io.getvalue() == b"fake content" - - -def test_extract_file_contents_zip(): - zipbuf = io.BytesIO() - with zipfile.ZipFile(zipbuf, mode="w") as zipobj: - zipobj.writestr("toplevelgetsskipped", b"nothing to see here") - zipobj.writestr("foo/setup.py", b"these are some contents") - zipbuf.seek(0) - - assert utils.extract_file_content(zipbuf, "setup.py") == b"these are some contents" - - -def test_extract_file_contents_zip_no_file(): - zipbuf = io.BytesIO() - with zipfile.ZipFile(zipbuf, mode="w") as zipobj: - zipobj.writestr("foo/notsetup.py", b"these are some contents") - zipbuf.seek(0) - - assert utils.extract_file_content(zipbuf, "setup.py") is None - - -def test_extract_file_contents_tar(): - tarbuf = io.BytesIO() - with tarfile.open(fileobj=tarbuf, mode="w:gz") as tarobj: - contents = io.BytesIO(b"these are some contents") - member = tarfile.TarInfo(name="foo/setup.py") - member.size = len(contents.getbuffer()) - tarobj.addfile(member, fileobj=contents) - - contents = io.BytesIO(b"nothing to see here") - member = tarfile.TarInfo(name="toplevelgetsskipped") - member.size = len(contents.getbuffer()) - tarobj.addfile(member, fileobj=contents) - tarbuf.seek(0) - - assert utils.extract_file_content(tarbuf, "setup.py") == b"these are some contents" - - -def test_extract_file_contents_tar_empty(): - tarbuf = io.BytesIO(b"invalid tar contents") - - assert utils.extract_file_content(tarbuf, "setup.py") is None - - -def test_extract_file_contents_tar_no_file(): - tarbuf = io.BytesIO() - with tarfile.open(fileobj=tarbuf, mode="w:gz") as tarobj: - contents = io.BytesIO(b"these are some contents") - member = tarfile.TarInfo(name="foo/notsetup.py") - member.size = len(contents.getbuffer()) - tarobj.addfile(member, fileobj=contents) - - contents = io.BytesIO(b"nothing to see here") - member = tarfile.TarInfo(name="toplevelgetsskipped") - member.size = len(contents.getbuffer()) - tarobj.addfile(member, fileobj=contents) - tarbuf.seek(0) - - assert utils.extract_file_content(tarbuf, "setup.py") is None diff --git a/tests/unit/malware/test_checks.py b/tests/unit/malware/test_checks.py deleted file mode 100644 --- a/tests/unit/malware/test_checks.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import inspect - -import pretend -import pytest - -import warehouse.malware.checks as prod_checks - -from warehouse.malware.checks.base import MalwareCheckBase -from warehouse.malware.utils import get_check_fields - -from ...common import checks as test_checks -from ...common.db.packaging import FileFactory - - -def test_checks_subclass_base(): - prod_checks_from_module = inspect.getmembers(prod_checks, inspect.isclass) - test_checks_from_module = inspect.getmembers(test_checks, inspect.isclass) - all_checks = prod_checks_from_module + test_checks_from_module - - subclasses_of_malware_base = { - cls.__name__: cls for cls in MalwareCheckBase.__subclasses__() - } - - assert len(all_checks) == len(subclasses_of_malware_base) - - for check_name, check in all_checks: - assert subclasses_of_malware_base[check_name] == check - - [email protected](("checks"), [prod_checks, test_checks]) -def test_checks_fields(checks): - checks_from_module = inspect.getmembers(checks, inspect.isclass) - - for check_name, check in checks_from_module: - elems = inspect.getmembers(check, lambda a: not (inspect.isroutine(a))) - inspection_fields = {"name": check_name} - for elem_name, value in elems: - # Skip both dunder and "private" (_-prefixed) attributes - if not elem_name.startswith("_"): - inspection_fields[elem_name] = value - fields = get_check_fields(check) - - assert inspection_fields == fields - - -def test_base_prepare_file_hooked(db_session): - file = FileFactory.create() - request = pretend.stub( - db=db_session, route_url=pretend.call_recorder(lambda *a, **kw: "fake_url") - ) - - kwargs = test_checks.ExampleHookedCheck.prepare(request, file.id) - - assert request.route_url.calls == [pretend.call("packaging.file", path=file.path)] - assert "file_url" in kwargs - assert kwargs["file_url"] == "fake_url" - - -def test_base_prepare_nonfile_hooked(db_session): - file = FileFactory.create() - request = pretend.stub( - db=db_session, route_url=pretend.call_recorder(lambda *a, **kw: "fake_url") - ) - - class FakeProjectCheck(MalwareCheckBase): - hooked_object = "Project" - - kwargs = FakeProjectCheck.prepare(request, file.id) - - del FakeProjectCheck - gc.collect() - - assert request.route_url.calls == [] - assert "file_url" not in kwargs diff --git a/tests/unit/malware/test_init.py b/tests/unit/malware/test_init.py deleted file mode 100644 --- a/tests/unit/malware/test_init.py +++ /dev/null @@ -1,185 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict - -import pretend - -from celery.schedules import crontab - -from warehouse import malware -from warehouse.malware.interfaces import IMalwareCheckService -from warehouse.malware.tasks import run_scheduled_check - -from ...common import checks as test_checks -from ...common.db.accounts import UserFactory -from ...common.db.packaging import FileFactory, ProjectFactory, ReleaseFactory - - -def test_determine_malware_checks_no_checks(monkeypatch, db_request): - def get_enabled_hooked_checks(session): - return defaultdict(list) - - monkeypatch.setattr(malware, "get_enabled_hooked_checks", get_enabled_hooked_checks) - - project = ProjectFactory.create(name="foo") - release = ReleaseFactory.create(project=project) - file0 = FileFactory.create(release=release, filename="foo.bar") - - session = pretend.stub(info={}, new={file0, release, project}, dirty={}, deleted={}) - - malware.determine_malware_checks(pretend.stub(), session, pretend.stub()) - assert session.info["warehouse.malware.checks"] == set() - - -def test_determine_malware_checks_nothing_new(monkeypatch, db_request): - def get_enabled_hooked_checks(session): - result = defaultdict(list) - result["File"] = ["Check1", "Check2"] - result["Release"] = ["Check3"] - return result - - monkeypatch.setattr(malware, "get_enabled_hooked_checks", get_enabled_hooked_checks) - - project = ProjectFactory.create(name="foo") - release = ReleaseFactory.create(project=project) - file0 = FileFactory.create(release=release, filename="foo.bar") - - session = pretend.stub(info={}, new={}, dirty={file0, release}, deleted={}) - - malware.determine_malware_checks(pretend.stub(), session, pretend.stub()) - assert session.info.get("warehouse.malware.checks") is None - - -def test_determine_malware_checks_unsupported_object(monkeypatch, db_request): - def get_enabled_hooked_checks(session): - result = defaultdict(list) - result["File"] = ["Check1", "Check2"] - result["Release"] = ["Check3"] - return result - - monkeypatch.setattr(malware, "get_enabled_hooked_checks", get_enabled_hooked_checks) - - user = UserFactory.create() - - session = pretend.stub(info={}, new={user}, dirty={}, deleted={}) - - malware.determine_malware_checks(pretend.stub(), session, pretend.stub()) - assert session.info.get("warehouse.malware.checks") is None - - -def test_determine_malware_checks_file_only(monkeypatch, db_request): - def get_enabled_hooked_checks(session): - result = defaultdict(list) - result["File"] = ["Check1", "Check2"] - result["Release"] = ["Check3"] - return result - - monkeypatch.setattr(malware, "get_enabled_hooked_checks", get_enabled_hooked_checks) - - project = ProjectFactory.create(name="foo") - release = ReleaseFactory.create(project=project) - file0 = FileFactory.create(release=release, filename="foo.bar") - - session = pretend.stub(info={}, new={file0}, dirty={}, deleted={}) - - checks = {"Check%d:%s" % (x, file0.id) for x in range(1, 3)} - malware.determine_malware_checks(pretend.stub(), session, pretend.stub()) - assert session.info["warehouse.malware.checks"] == checks - - -def test_determine_malware_checks_file_and_release(monkeypatch, db_request): - def get_enabled_hooked_checks(session): - result = defaultdict(list) - result["File"] = ["Check1", "Check2"] - result["Release"] = ["Check3"] - return result - - monkeypatch.setattr(malware, "get_enabled_hooked_checks", get_enabled_hooked_checks) - - project = ProjectFactory.create(name="foo") - release = ReleaseFactory.create(project=project) - file0 = FileFactory.create(release=release, filename="foo.bar") - file1 = FileFactory.create(release=release, filename="foo.baz") - - session = pretend.stub( - info={}, new={project, release, file0, file1}, dirty={}, deleted={} - ) - - checks = {"Check%d:%s" % (x, file0.id) for x in range(1, 3)} - checks.update(["Check%d:%s" % (x, file1.id) for x in range(1, 3)]) - checks.add("Check3:%s" % release.id) - - malware.determine_malware_checks(pretend.stub(), session, pretend.stub()) - - assert session.info["warehouse.malware.checks"] == checks - - -def test_enqueue_malware_checks(app_config): - malware_check = pretend.stub( - run_checks=pretend.call_recorder(lambda malware_checks: None) - ) - factory = pretend.call_recorder(lambda ctx, config: malware_check) - app_config.register_service_factory(factory, IMalwareCheckService) - app_config.commit() - session = pretend.stub( - info={ - "warehouse.malware.checks": {"Check1:ba70267f-fabf-496f-9ac2-d237a983b187"} - } - ) - - malware.queue_malware_checks(app_config, session) - - assert factory.calls == [pretend.call(None, app_config)] - assert malware_check.run_checks.calls == [ - pretend.call({"Check1:ba70267f-fabf-496f-9ac2-d237a983b187"}) - ] - assert "warehouse.malware.checks" not in session.info - - -def test_enqueue_malware_checks_no_checks(app_config): - session = pretend.stub(info={}) - malware.queue_malware_checks(app_config, session) - assert "warehouse.malware.checks" not in session.info - - -def test_includeme(monkeypatch): - monkeypatch.setattr(malware, "checks", test_checks) - - malware_check_class = pretend.stub( - create_service=pretend.call_recorder(lambda *a, **kw: pretend.stub()) - ) - - config = pretend.stub( - maybe_dotted=lambda dotted: malware_check_class, - register_service_factory=pretend.call_recorder( - lambda factory, iface, name=None: None - ), - registry=pretend.stub( - settings={"malware_check.backend": "TestMalwareCheckService"} - ), - add_periodic_task=pretend.call_recorder(lambda *a, **kw: None), - ) - - malware.includeme(config) - - assert config.register_service_factory.calls == [ - pretend.call(malware_check_class.create_service, IMalwareCheckService) - ] - - assert config.add_periodic_task.calls == [ - pretend.call( - crontab(minute="0", hour="*/8"), - run_scheduled_check, - args=("ExampleScheduledCheck",), - ) - ] diff --git a/tests/unit/malware/test_models.py b/tests/unit/malware/test_models.py deleted file mode 100644 --- a/tests/unit/malware/test_models.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from warehouse.malware.models import VerdictClassification, VerdictConfidence - - -def test_classification_orderable(): - assert ( - VerdictClassification.Benign - < VerdictClassification.Indeterminate - < VerdictClassification.Threat - ) - assert ( - max( - [ - VerdictClassification.Benign, - VerdictClassification.Indeterminate, - VerdictClassification.Threat, - ] - ) - == VerdictClassification.Threat - ) - - -def test_confidence_orderable(): - assert VerdictConfidence.Low < VerdictConfidence.Medium < VerdictConfidence.High - assert ( - max([VerdictConfidence.Low, VerdictConfidence.Medium, VerdictConfidence.High]) - == VerdictConfidence.High - ) diff --git a/tests/unit/malware/test_services.py b/tests/unit/malware/test_services.py deleted file mode 100644 --- a/tests/unit/malware/test_services.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pretend -import pytest - -from zope.interface.verify import verifyClass - -from warehouse.malware.interfaces import IMalwareCheckService -from warehouse.malware.services import ( - DatabaseMalwareCheckService, - PrinterMalwareCheckService, -) -from warehouse.malware.tasks import run_check - - -class TestPrinterMalwareCheckService: - def test_verify_service(self): - assert verifyClass(IMalwareCheckService, PrinterMalwareCheckService) - - def test_create_service(self): - request = pretend.stub() - service = PrinterMalwareCheckService.create_service(None, request) - assert service.executor == print - - @pytest.mark.parametrize(("kwargs"), [{}, {"manually_triggered": True}]) - def test_run_checks(self, capfd, kwargs): - request = pretend.stub() - service = PrinterMalwareCheckService.create_service(None, request) - checks = ["one", "two", "three"] - service.run_checks(checks, **kwargs) - out, err = capfd.readouterr() - assert out == "".join([f"{check} {kwargs}\n" for check in checks]) - - -class TestDatabaseMalwareService: - def test_verify_service(self): - assert verifyClass(IMalwareCheckService, DatabaseMalwareCheckService) - - def test_create_service(self, db_request): - _delay = pretend.call_recorder(lambda *args: None) - db_request.task = lambda x: pretend.stub(delay=_delay) - service = DatabaseMalwareCheckService.create_service(None, db_request) - assert service.executor == db_request.task(run_check).delay - - def test_run_hooked_check(self, db_request): - _delay = pretend.call_recorder(lambda *args, **kwargs: None) - db_request.task = lambda x: pretend.stub(delay=_delay) - service = DatabaseMalwareCheckService.create_service(None, db_request) - checks = [ - "MyTestCheck:ba70267f-fabf-496f-9ac2-d237a983b187", - "AnotherCheck:44f57b0e-c5b0-47c5-8713-341cf392efe2", - "FinalCheck:e8518a15-8f01-430e-8f5b-87644007c9c0", - ] - service.run_checks(checks) - assert _delay.calls == [ - pretend.call("MyTestCheck", obj_id="ba70267f-fabf-496f-9ac2-d237a983b187"), - pretend.call("AnotherCheck", obj_id="44f57b0e-c5b0-47c5-8713-341cf392efe2"), - pretend.call("FinalCheck", obj_id="e8518a15-8f01-430e-8f5b-87644007c9c0"), - ] - - def test_run_scheduled_check(self, db_request): - _delay = pretend.call_recorder(lambda *args, **kwargs: None) - db_request.task = lambda x: pretend.stub(delay=_delay) - service = DatabaseMalwareCheckService.create_service(None, db_request) - checks = ["MyTestScheduledCheck"] - service.run_checks(checks) - assert _delay.calls == [pretend.call("MyTestScheduledCheck")] - - def test_run_triggered_check(self, db_request): - _delay = pretend.call_recorder(lambda *args, **kwargs: None) - db_request.task = lambda x: pretend.stub(delay=_delay) - service = DatabaseMalwareCheckService.create_service(None, db_request) - checks = ["MyTriggeredCheck"] - service.run_checks(checks, manually_triggered=True) - assert _delay.calls == [ - pretend.call("MyTriggeredCheck", manually_triggered=True) - ] diff --git a/tests/unit/malware/test_tasks.py b/tests/unit/malware/test_tasks.py deleted file mode 100644 --- a/tests/unit/malware/test_tasks.py +++ /dev/null @@ -1,490 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import celery -import pretend -import pytest - -from warehouse.malware import tasks -from warehouse.malware.models import MalwareCheck, MalwareCheckState, MalwareVerdict -from warehouse.malware.services import PrinterMalwareCheckService - -from ...common import checks as test_checks -from ...common.db.malware import MalwareCheckFactory, MalwareVerdictFactory -from ...common.db.packaging import FileFactory, ProjectFactory, ReleaseFactory - - -class TestRunCheck: - def test_success(self, db_request, monkeypatch): - db_request.route_url = pretend.call_recorder(lambda *a, **kw: "fake_route") - - monkeypatch.setattr(tasks, "checks", test_checks) - file0 = FileFactory.create() - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Enabled - ) - task = pretend.stub() - tasks.run_check(task, db_request, "ExampleHookedCheck", obj_id=file0.id) - - assert db_request.route_url.calls == [ - pretend.call("packaging.file", path=file0.path) - ] - assert db_request.db.query(MalwareVerdict).one() - - @pytest.mark.parametrize(("manually_triggered"), [True, False]) - def test_evaluation_run(self, db_session, monkeypatch, manually_triggered): - monkeypatch.setattr(tasks, "checks", test_checks) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Evaluation - ) - ProjectFactory.create() - task = pretend.stub() - - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - tasks.run_check( - task, - request, - "ExampleScheduledCheck", - manually_triggered=manually_triggered, - ) - - if manually_triggered: - assert db_session.query(MalwareVerdict).one() - else: - assert request.log.info.calls == [ - pretend.call( - "ExampleScheduledCheck is in the `evaluation` state and must be \ -manually triggered to run." - ) - ] - assert db_session.query(MalwareVerdict).all() == [] - - def test_disabled_check(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Disabled - ) - task = pretend.stub() - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - file = FileFactory.create() - - tasks.run_check(task, request, "ExampleHookedCheck", obj_id=file.id) - - assert request.log.info.calls == [ - pretend.call("Check ExampleHookedCheck isn't active. Aborting.") - ] - - def test_missing_check(self, db_request, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - task = pretend.stub() - - with pytest.raises(AttributeError): - tasks.run_check(task, db_request, "DoesNotExistCheck") - - def test_missing_obj_id(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - task = pretend.stub() - - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Enabled - ) - task = pretend.stub() - - request = pretend.stub( - db=db_session, - log=pretend.stub(error=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - tasks.run_check(task, request, "ExampleHookedCheck") - - assert request.log.error.calls == [ - pretend.call( - "Fatal exception: ExampleHookedCheck: Missing required kwarg `obj_id`" - ) - ] - - def test_retry(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - exc = Exception("Scan failed") - - def scan(self, **kwargs): - raise exc - - monkeypatch.setattr(tasks.checks.ExampleHookedCheck, "scan", scan) - - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Enabled - ) - - task = pretend.stub( - retry=pretend.call_recorder(pretend.raiser(celery.exceptions.Retry)) - ) - request = pretend.stub( - db=db_session, - log=pretend.stub(error=pretend.call_recorder(lambda *args, **kwargs: None)), - route_url=pretend.call_recorder(lambda *a, **kw: pretend.stub()), - ) - - file = FileFactory.create() - - with pytest.raises(celery.exceptions.Retry): - tasks.run_check(task, request, "ExampleHookedCheck", obj_id=file.id) - - assert request.log.error.calls == [ - pretend.call("Error executing check ExampleHookedCheck: Scan failed") - ] - - assert task.retry.calls == [pretend.call(exc=exc)] - - -class TestRunScheduledCheck: - def test_invalid_check_name(self, db_request, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - task = pretend.stub() - with pytest.raises(AttributeError): - tasks.run_scheduled_check(task, db_request, "DoesNotExist") - - def test_run_check(self, db_session, capfd, monkeypatch): - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Enabled - ) - - request = pretend.stub( - db=db_session, - find_service_factory=pretend.call_recorder( - lambda interface: PrinterMalwareCheckService.create_service - ), - ) - - task = pretend.stub() - - tasks.run_scheduled_check(task, request, "ExampleScheduledCheck") - - assert request.find_service_factory.calls == [ - pretend.call(tasks.IMalwareCheckService) - ] - - out, err = capfd.readouterr() - assert out == "ExampleScheduledCheck {'manually_triggered': False}\n" - - -class TestBackfill: - def test_invalid_check_name(self, db_request, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - task = pretend.stub() - with pytest.raises(AttributeError): - tasks.backfill(task, db_request, "DoesNotExist", 1) - - @pytest.mark.parametrize( - ("num_objects", "num_runs"), [(11, 1), (11, 11), (101, 90)] - ) - def test_run(self, db_session, capfd, num_objects, num_runs, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - - ids = [] - for i in range(num_objects): - ids.append(FileFactory.create().id) - - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Enabled - ) - - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - find_service_factory=pretend.call_recorder( - lambda interface: PrinterMalwareCheckService.create_service - ), - ) - - task = pretend.stub() - - tasks.backfill(task, request, "ExampleHookedCheck", num_runs) - - assert request.log.info.calls == [ - pretend.call("Running backfill on %d Files." % num_runs) - ] - - assert request.find_service_factory.calls == [ - pretend.call(tasks.IMalwareCheckService) - ] - - out, err = capfd.readouterr() - num_output_lines = 0 - for file_id in ids: - logged_output = "ExampleHookedCheck:{} {}\n".format( - file_id, - {"manually_triggered": True}, - ) - num_output_lines += 1 if logged_output in out else 0 - - assert num_output_lines == num_runs - - -class TestSyncChecks: - def test_no_updates(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - monkeypatch.setattr(tasks.checks.ExampleScheduledCheck, "version", 2) - - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Disabled - ) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Disabled - ) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Enabled, version=2 - ) - - task = pretend.stub() - - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - tasks.sync_checks(task, request) - - assert request.log.info.calls == [ - pretend.call("2 malware checks found in codebase."), - pretend.call("ExampleHookedCheck is unmodified."), - pretend.call("ExampleScheduledCheck is unmodified."), - ] - - @pytest.mark.parametrize( - ("final_state"), [MalwareCheckState.Enabled, MalwareCheckState.Disabled] - ) - def test_upgrade_check(self, monkeypatch, db_session, final_state): - monkeypatch.setattr(tasks, "checks", test_checks) - monkeypatch.setattr(tasks.checks.ExampleHookedCheck, "version", 2) - - MalwareCheckFactory.create(name="ExampleHookedCheck", state=final_state) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Disabled - ) - - task = pretend.stub() - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - tasks.sync_checks(task, request) - - assert request.log.info.calls == [ - pretend.call("2 malware checks found in codebase."), - pretend.call("Updating existing ExampleHookedCheck."), - pretend.call("ExampleScheduledCheck is unmodified."), - ] - db_checks = ( - db_session.query(MalwareCheck) - .filter(MalwareCheck.name == "ExampleHookedCheck") - .all() - ) - - assert len(db_checks) == 2 - - if final_state == MalwareCheckState.Disabled: - assert ( - db_checks[0].state == db_checks[1].state == MalwareCheckState.Disabled - ) - - else: - for c in db_checks: - if c.state == final_state: - assert c.version == 2 - else: - assert c.version == 1 - - def test_one_new_check(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Disabled - ) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Disabled - ) - - task = pretend.stub() - - class FakeMalwareCheck: - version = 1 - short_description = "This is a short description." - long_description = "This is a longer description." - check_type = "scheduled" - schedule = {"minute": "0", "hour": "*/8"} - - tasks.checks.FakeMalwareCheck = FakeMalwareCheck - - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - tasks.sync_checks(task, request) - - assert request.log.info.calls == [ - pretend.call("3 malware checks found in codebase."), - pretend.call("ExampleHookedCheck is unmodified."), - pretend.call("ExampleScheduledCheck is unmodified."), - pretend.call("Adding new FakeMalwareCheck to the database."), - ] - assert db_session.query(MalwareCheck).count() == 3 - - new_check = ( - db_session.query(MalwareCheck) - .filter(MalwareCheck.name == "FakeMalwareCheck") - .one() - ) - - assert new_check.state == MalwareCheckState.Disabled - - del tasks.checks.FakeMalwareCheck - - def test_too_many_db_checks(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.Enabled - ) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.Enabled - ) - MalwareCheckFactory.create( - name="AnotherCheck", state=MalwareCheckState.Evaluation, version=2 - ) - - task = pretend.stub() - - request = pretend.stub( - db=db_session, - log=pretend.stub( - info=pretend.call_recorder(lambda *args, **kwargs: None), - error=pretend.call_recorder(lambda *args, **kwargs: None), - ), - ) - - with pytest.raises(Exception): - tasks.sync_checks(task, request) - - assert request.log.info.calls == [ - pretend.call("2 malware checks found in codebase.") - ] - - assert request.log.error.calls == [ - pretend.call( - "Found 3 active checks in the db, but only 2 checks in code. Please \ -manually move superfluous checks to the wiped_out state in the check admin: \ -AnotherCheck" - ) - ] - - def test_only_wiped_out(self, db_session, monkeypatch): - monkeypatch.setattr(tasks, "checks", test_checks) - MalwareCheckFactory.create( - name="ExampleHookedCheck", state=MalwareCheckState.WipedOut - ) - MalwareCheckFactory.create( - name="ExampleScheduledCheck", state=MalwareCheckState.WipedOut - ) - - task = pretend.stub() - request = pretend.stub( - db=db_session, - log=pretend.stub( - info=pretend.call_recorder(lambda *args, **kwargs: None), - error=pretend.call_recorder(lambda *args, **kwargs: None), - ), - ) - - tasks.sync_checks(task, request) - - assert request.log.info.calls == [ - pretend.call("2 malware checks found in codebase.") - ] - - assert request.log.error.calls == [ - pretend.call( - "ExampleHookedCheck is wiped_out and cannot be synced. " - "Please remove check from codebase." - ), - pretend.call( - "ExampleScheduledCheck is wiped_out and cannot be synced. " - "Please remove check from codebase." - ), - ] - - -class TestRemoveVerdicts: - def test_no_verdicts(self, db_session): - check = MalwareCheckFactory.create() - - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - task = pretend.stub() - removed = tasks.remove_verdicts(task, request, check.name) - - assert request.log.info.calls == [ - pretend.call( - "Removing 0 malware verdicts associated with %s version 1." % check.name - ) - ] - assert removed == 0 - - @pytest.mark.parametrize(("check_with_verdicts"), [True, False]) - def test_many_verdicts(self, db_session, check_with_verdicts): - check0 = MalwareCheckFactory.create() - check1 = MalwareCheckFactory.create() - project = ProjectFactory.create(name="foo") - release = ReleaseFactory.create(project=project) - file0 = FileFactory.create(release=release, filename="foo.bar") - num_verdicts = 10 - - for i in range(num_verdicts): - MalwareVerdictFactory.create(check=check1, release_file=file0) - - assert db_session.query(MalwareVerdict).count() == num_verdicts - - request = pretend.stub( - db=db_session, - log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)), - ) - - task = pretend.stub() - - if check_with_verdicts: - wiped_out_check = check1 - else: - wiped_out_check = check0 - num_verdicts = 0 - - removed = tasks.remove_verdicts(task, request, wiped_out_check.name) - - assert request.log.info.calls == [ - pretend.call( - "Removing %d malware verdicts associated with %s version 1." - % (num_verdicts, wiped_out_check.name) - ) - ] - - assert removed == num_verdicts diff --git a/tests/unit/malware/test_utils.py b/tests/unit/malware/test_utils.py deleted file mode 100644 --- a/tests/unit/malware/test_utils.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -from warehouse.malware.models import ( - MalwareCheckObjectType, - MalwareCheckState, - MalwareCheckType, -) -from warehouse.malware.utils import get_check_fields, get_enabled_hooked_checks - -from ...common.checks import ExampleHookedCheck, ExampleScheduledCheck -from ...common.db.malware import MalwareCheckFactory - - -class TestGetEnabledChecks: - def test_one(self, db_session): - check = MalwareCheckFactory.create( - state=MalwareCheckState.Enabled, check_type=MalwareCheckType.EventHook - ) - assert get_enabled_hooked_checks(db_session) == { - check.hooked_object.value: {check.name} - } - - def test_many(self, db_session): - check = MalwareCheckFactory.create( - state=MalwareCheckState.Enabled, - check_type=MalwareCheckType.EventHook, - hooked_object=MalwareCheckObjectType.Project, - ) - other_check = MalwareCheckFactory.create( - state=MalwareCheckState.Enabled, - check_type=MalwareCheckType.EventHook, - hooked_object=MalwareCheckObjectType.Project, - ) - assert get_enabled_hooked_checks(db_session) == { - check.hooked_object.value: {check.name, other_check.name} - } - - def test_none(self, db_session): - assert get_enabled_hooked_checks(db_session) == {} - - -class TestGetCheckFields: - @pytest.mark.parametrize( - ("check", "result"), - [ - ( - ExampleHookedCheck, - { - "name": "ExampleHookedCheck", - "version": 1, - "short_description": "An example hook-based check", - "long_description": "The purpose of this check is to test the \ -implementation of a hook-based check. This check will generate verdicts if enabled.", - "check_type": "event_hook", - "hooked_object": "File", - }, - ), - ( - ExampleScheduledCheck, - { - "name": "ExampleScheduledCheck", - "version": 1, - "short_description": "An example scheduled check", - "long_description": "The purpose of this check is to test the \ -implementation of a scheduled check. This check will generate verdicts if enabled.", - "check_type": "scheduled", - "schedule": {"minute": "0", "hour": "*/8"}, - }, - ), - ], - ) - def test_success(self, check, result): - assert get_check_fields(check) == result - - def test_failure(self, monkeypatch): - monkeypatch.delattr(ExampleScheduledCheck, "schedule") - - with pytest.raises(AttributeError): - get_check_fields(ExampleScheduledCheck) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -359,7 +359,6 @@ def __init__(self): pretend.call(".accounts"), pretend.call(".macaroons"), pretend.call(".oidc"), - pretend.call(".malware"), pretend.call(".manage"), pretend.call(".organizations"), pretend.call(".subscriptions"), diff --git a/tests/unit/test_tasks.py b/tests/unit/test_tasks.py --- a/tests/unit/test_tasks.py +++ b/tests/unit/test_tasks.py @@ -529,11 +529,8 @@ def test_includeme(env, ssl, broker_url, expected_url, transport_options): "task_serializer": "json", "accept_content": ["json", "msgpack"], "task_queue_ha_policy": "all", - "task_queues": ( - Queue("default", routing_key="task.#"), - Queue("malware", routing_key="malware.#"), - ), - "task_routes": {"warehouse.malware.tasks.*": {"queue": "malware"}}, + "task_queues": (Queue("default", routing_key="task.#"),), + "task_routes": {}, "REDBEAT_REDIS_URL": (config.registry.settings["celery.scheduler_url"]), }.items(): assert app.conf[key] == value
Repurpose 'malware' checks / YARA scanning for secret scanning **What's the problem this feature will solve?** While our existing 'malware' checks are not being used due to noise and false positives, the mechanics available via the checks are perfect for token/credential scanning. **Describe the solution you'd like** We should be able to write a class of checks that: - scans every file in a distribution for a given pattern - calls an internal/external webhook with the result, in some specified format Once this is implemented, we should support scanning for specific tokens: - [ ] `pypi-` and `testpypi-` prefixed tokens, which should automatically be revoked when found - [ ] 'reciprocal' scanning for GitHub tokens: https://docs.github.com/en/code-security/secret-scanning/about-secret-scanning - [ ] other platforms/ecosystems? **Additional context** Some other potential ecosystems that might want to integrate: https://docs.github.com/en/code-security/secret-scanning/secret-scanning-patterns
Related: https://tomforb.es/i-scanned-every-package-on-pypi-and-found-57-live-aws-keys/ I think there is limited value in rolling our own secret scanning solution for PyPi. We could of course scan for specific patterns we derive ourselves (like I did for AWS keys) and work out how to validate each one, but this is really re-implementing the Github secret scanning infrastructure. Which already has a _lot_ of partners: https://docs.github.com/en/code-security/secret-scanning/secret-scanning-patterns I've been toying with a stupid sounding idea that I can't seem to let go of. Why don't we extract all text files from each uploaded release and commit them to a unique orphaned branch in a Github repository? We could expire the branches after a period of time, to keep the repo size down, and I expect the total size of all unique text content in pypi, compressed, is _significantly_ lower than the 12tb total storage size. This would trigger a scan against all supported providers, without us needing to really do much. An interesting idea, but IMO that sort of feels like an abuse of GitHub, or at least not a way in which GitHub is intended to be used, and might be confusing to our users and the secret scanning partners: - While PyPI's [terms of use](https://pypi.org/policy/terms-of-use/) permit redistribution on PyPI and "any mirroring facility", I think our users would be a bit surprised to find that things they publish to PyPI also are getting published to GitHub (even if it's eventually removed). - I'm not sure if GitHub notifies the user when they find a secret, but I would imagine that they do and that we would in turn want to notify our users as well. Trying to generate this notification from GitHub's notification seems challenging, and it would be much easier for us to generate this notification if we're scanning for secrets 'in-house'. - When GitHub notifies on a found secret, they indicate the source to the secret provider. In this instance, the report they would be giving to the partner would indicate that the source is our 'mirror' GitHub repository, not PyPI itself. - While many of GitHubs partners have open & public means to integrate with their secret-reporting APIs, not all do (for example Google Cloud requires specific onboarding steps). While it would be nice to immediately support all the partners GitHub supports, I think their partners might be surprised to be getting reports from PyPI. - Finally, round-tripping to GitHub just to revoke our _own_ API tokens seems unnecessarily circuitous. These revocations should happen as quickly as possible. I want to reiterate that the scanning infrastructure already exists on PyPI, we just need to write some YARA patterns for specific partners we want to support, and add fictionality for hitting a webhook with the result to notify. These are all valid points, however I'm assuming in all of this that we're talking about scanning more than just pypi tokens. These can (and should) be done in house where convenient. My argument is that while it's simple to scan for and revoke pypi tokens, the moment we want "reciprocal scanning of GitHub tokens" or "other platforms/ecosystems" things seem to get a lot more complex. And I'm not sure of the value of *just* scanning for pypi tokens. GitHub does seem to care about these kinds of issues, and my overall point is that working with them might be better than working alone. That might come in different forms, from hacking an MVP together with git to seeing if there's a chance an external secret scanning API for pypi and other registries might be put on their roadmap. I've started work on committing the text content of every file uploaded to PyPi in November 2022 with the aim of seeing how many secrets Github would detect. It's being added in chunks here: https://github.com/orf/pypi-import I think we (or maybe just me?) have massively underestimated the number and variety of credentials that have been published to PyPi. It has only completed about 15% of November but it's already found hundreds of keys from many different services. 9 distinct pypi API tokens so far. Spot checking them reveals even more, including just [straight up, valid, raw database credentials](https://github.com/orf/pypi-import/blob/2d421ba607f8fe5146dc5f3cdfda26f25a2110de/output/test_sf_etl_py3-0.0.2.tar.gz#L392).
2023-05-11T12:18:28Z
[]
[]
pypi/warehouse
13,651
pypi__warehouse-13651
[ "13650" ]
42b951a99e22b670570f92ad88d8eef966491211
diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -62,10 +62,7 @@ Project, Release, ) -from warehouse.packaging.tasks import ( - sync_file_to_archive, - update_bigquery_release_files, -) +from warehouse.packaging.tasks import sync_file_to_cache, update_bigquery_release_files from warehouse.rate_limiting.interfaces import RateLimiterException from warehouse.utils import http, readme from warehouse.utils.project import PROJECT_NAME_RE, validate_project_name @@ -1453,7 +1450,7 @@ def file_upload(request): # this won't take affect until after a commit has happened, for # now we'll just ignore it and save it before the transaction is # committed. - storage = request.find_service(IFileStorage, name="primary") + storage = request.find_service(IFileStorage, name="archive") storage.store( file_.path, os.path.join(tmpdir, filename), @@ -1538,8 +1535,8 @@ def file_upload(request): # Log a successful upload metrics.increment("warehouse.upload.ok", tags=[f"filetype:{form.filetype.data}"]) - # Dispatch our archive task to sync this as soon as possible - request.task(sync_file_to_archive).delay(file_.id) + # Dispatch our task to sync this to cache as soon as possible + request.task(sync_file_to_cache).delay(file_.id) return Response() diff --git a/warehouse/migrations/versions/c5f718cb98ac_add_cached_bool_on_files_table.py b/warehouse/migrations/versions/c5f718cb98ac_add_cached_bool_on_files_table.py new file mode 100644 --- /dev/null +++ b/warehouse/migrations/versions/c5f718cb98ac_add_cached_bool_on_files_table.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +add cached bool on files table + +Revision ID: c5f718cb98ac +Revises: 6073f65a2767 +Create Date: 2023-05-12 08:00:47.726442 +""" + +import sqlalchemy as sa + +from alembic import op + +revision = "c5f718cb98ac" +down_revision = "6073f65a2767" + + +def upgrade(): + op.add_column( + "release_files", + sa.Column( + "cached", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + comment="If True, the object has been populated to our cache bucket.", + ), + ) + # CREATE INDEX CONCURRENTLY cannot happen inside a transaction. We'll close + # our transaction here and issue the statement. + op.execute("COMMIT") + op.create_index( + "release_files_cached_idx", + "release_files", + ["cached"], + unique=False, + postgresql_concurrently=True, + ) + + +def downgrade(): + op.drop_index("release_files_cached_idx", table_name="release_files") + op.drop_column("release_files", "cached") diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py --- a/warehouse/packaging/__init__.py +++ b/warehouse/packaging/__init__.py @@ -27,7 +27,7 @@ from warehouse.packaging.models import File, Project, Release, Role from warehouse.packaging.services import project_service_factory from warehouse.packaging.tasks import ( - check_file_archive_tasks_outstanding, + check_file_cache_tasks_outstanding, compute_2fa_mandate, compute_2fa_metrics, update_description_html, @@ -64,7 +64,7 @@ def includeme(config): # our package files. files_storage_class = config.maybe_dotted(config.registry.settings["files.backend"]) config.register_service_factory( - files_storage_class.create_service, IFileStorage, name="primary" + files_storage_class.create_service, IFileStorage, name="cache" ) archive_files_storage_class = config.maybe_dotted( @@ -179,9 +179,7 @@ def includeme(config): ], ) - config.add_periodic_task( - crontab(minute="*/1"), check_file_archive_tasks_outstanding - ) + config.add_periodic_task(crontab(minute="*/1"), check_file_cache_tasks_outstanding) config.add_periodic_task(crontab(minute="*/5"), update_description_html) config.add_periodic_task(crontab(minute="*/5"), update_role_invitation_status) diff --git a/warehouse/packaging/models.py b/warehouse/packaging/models.py --- a/warehouse/packaging/models.py +++ b/warehouse/packaging/models.py @@ -638,6 +638,7 @@ def __table_args__(cls): # noqa ), Index("release_files_release_id_idx", "release_id"), Index("release_files_archived_idx", "archived"), + Index("release_files_cached_idx", "cached"), ) release_id = Column( @@ -678,6 +679,12 @@ def __table_args__(cls): # noqa # of all of them and then remove this column. allow_multiple_sdist = Column(Boolean, nullable=False, server_default=sql.false()) + cached = Column( + Boolean, + comment="If True, the object has been populated to our cache bucket.", + nullable=False, + server_default=sql.false(), + ) archived = Column( Boolean, comment="If True, the object has been archived to our archival bucket.", diff --git a/warehouse/packaging/tasks.py b/warehouse/packaging/tasks.py --- a/warehouse/packaging/tasks.py +++ b/warehouse/packaging/tasks.py @@ -29,42 +29,40 @@ from warehouse.utils import readme -def _copy_file_to_archive(primary_storage, archive_storage, path): - metadata = primary_storage.get_metadata(path) - file_obj = primary_storage.get(path) - with tempfile.NamedTemporaryFile() as file_for_archive: - file_for_archive.write(file_obj.read()) - file_for_archive.flush() - archive_storage.store(path, file_for_archive.name, meta=metadata) +def _copy_file_to_cache(archive_storage, cache_storage, path): + metadata = archive_storage.get_metadata(path) + file_obj = archive_storage.get(path) + with tempfile.NamedTemporaryFile() as file_for_cache: + file_for_cache.write(file_obj.read()) + file_for_cache.flush() + cache_storage.store(path, file_for_cache.name, meta=metadata) @tasks.task(ignore_result=True, acks_late=True) -def sync_file_to_archive(request, file_id): +def sync_file_to_cache(request, file_id): file = request.db.get(File, file_id) - if not file.archived: - primary_storage = request.find_service(IFileStorage, name="primary") + if not file.cached: archive_storage = request.find_service(IFileStorage, name="archive") + cache_storage = request.find_service(IFileStorage, name="cache") - _copy_file_to_archive(primary_storage, archive_storage, file.path) + _copy_file_to_cache(archive_storage, cache_storage, file.path) if file.metadata_file_sha256_digest is not None: - _copy_file_to_archive(primary_storage, archive_storage, file.metadata_path) + _copy_file_to_cache(archive_storage, cache_storage, file.metadata_path) if file.has_signature: - _copy_file_to_archive(primary_storage, archive_storage, file.pgp_path) + _copy_file_to_cache(archive_storage, cache_storage, file.pgp_path) - file.archived = True + file.cached = True @tasks.task(ignore_result=True, acks_late=True) -def check_file_archive_tasks_outstanding(request): +def check_file_cache_tasks_outstanding(request): metrics = request.find_service(IMetricsService, context=None) - files_not_archived = ( - request.db.query(File).filter(File.archived == False).count() # noqa: E712 - ) + files_not_cached = request.db.query(File).filter_by(cached=False).count() metrics.gauge( - "warehouse.packaging.files.not_archived", - files_not_archived, + "warehouse.packaging.files.not_cached", + files_not_cached, )
diff --git a/tests/unit/forklift/test_legacy.py b/tests/unit/forklift/test_legacy.py --- a/tests/unit/forklift/test_legacy.py +++ b/tests/unit/forklift/test_legacy.py @@ -48,10 +48,7 @@ Release, Role, ) -from warehouse.packaging.tasks import ( - sync_file_to_archive, - update_bigquery_release_files, -) +from warehouse.packaging.tasks import sync_file_to_cache, update_bigquery_release_files from warehouse.utils.security_policy import AuthenticationMethod from ...common.db.accounts import EmailFactory, UserFactory @@ -1447,7 +1444,7 @@ def storage_service_store(path, file_path, *, meta): assert resp.status_code == 200 assert db_request.find_service.calls == [ pretend.call(IMetricsService, context=None), - pretend.call(IFileStorage, name="primary"), + pretend.call(IFileStorage, name="archive"), ] assert len(storage_service.store.calls) == 2 if has_signature else 1 assert storage_service.store.calls[0] == pretend.call( @@ -1521,7 +1518,7 @@ def storage_service_store(path, file_path, *, meta): assert db_request.task.calls == [ pretend.call(update_bigquery_release_files), - pretend.call(sync_file_to_archive), + pretend.call(sync_file_to_cache), ] assert metrics.increment.calls == [ @@ -2824,7 +2821,7 @@ def storage_service_store(path, file_path, *, meta): assert resp.status_code == 200 assert db_request.find_service.calls == [ pretend.call(IMetricsService, context=None), - pretend.call(IFileStorage, name="primary"), + pretend.call(IFileStorage, name="archive"), ] assert storage_service.store.calls == [ pretend.call( @@ -2958,7 +2955,7 @@ def storage_service_store(path, file_path, *, meta): assert resp.status_code == 200 assert db_request.find_service.calls == [ pretend.call(IMetricsService, context=None), - pretend.call(IFileStorage, name="primary"), + pretend.call(IFileStorage, name="archive"), ] assert storage_service.store.calls == [ pretend.call( diff --git a/tests/unit/packaging/test_init.py b/tests/unit/packaging/test_init.py --- a/tests/unit/packaging/test_init.py +++ b/tests/unit/packaging/test_init.py @@ -28,7 +28,7 @@ from warehouse.packaging.models import File, Project, Release, Role from warehouse.packaging.services import project_service_factory from warehouse.packaging.tasks import ( # sync_bigquery_release_files, - check_file_archive_tasks_outstanding, + check_file_cache_tasks_outstanding, compute_2fa_mandate, update_description_html, ) @@ -75,7 +75,7 @@ def key_factory(keystring, iterate_on=None, if_attr_exists=None): packaging.includeme(config) assert config.register_service_factory.calls == [ - pretend.call(storage_class.create_service, IFileStorage, name="primary"), + pretend.call(storage_class.create_service, IFileStorage, name="cache"), pretend.call(storage_class.create_service, IFileStorage, name="archive"), pretend.call(storage_class.create_service, ISimpleStorage), pretend.call(storage_class.create_service, IDocsStorage), @@ -179,7 +179,7 @@ def key_factory(keystring, iterate_on=None, if_attr_exists=None): ) assert ( - pretend.call(crontab(minute="*/1"), check_file_archive_tasks_outstanding) + pretend.call(crontab(minute="*/1"), check_file_cache_tasks_outstanding) in config.add_periodic_task.calls ) assert ( diff --git a/tests/unit/packaging/test_tasks.py b/tests/unit/packaging/test_tasks.py --- a/tests/unit/packaging/test_tasks.py +++ b/tests/unit/packaging/test_tasks.py @@ -26,11 +26,11 @@ from warehouse.accounts.models import WebAuthn from warehouse.packaging.models import Description from warehouse.packaging.tasks import ( - check_file_archive_tasks_outstanding, + check_file_cache_tasks_outstanding, compute_2fa_mandate, compute_2fa_metrics, sync_bigquery_release_files, - sync_file_to_archive, + sync_file_to_cache, update_bigquery_release_files, update_description_html, ) @@ -48,22 +48,20 @@ ) [email protected]("archived", [True, False]) -def test_sync_file_to_archive(db_request, monkeypatch, archived): - file = FileFactory(archived=archived) - primary_stub = pretend.stub( [email protected]("cached", [True, False]) +def test_sync_file_to_cache(db_request, monkeypatch, cached): + file = FileFactory(cached=cached) + archive_stub = pretend.stub( get_metadata=pretend.call_recorder(lambda path: {"fizz": "buzz"}), get=pretend.call_recorder( lambda path: pretend.stub(read=lambda: b"my content") ), ) - archive_stub = pretend.stub( + cache_stub = pretend.stub( store=pretend.call_recorder(lambda filename, path, meta=None: None) ) db_request.find_service = pretend.call_recorder( - lambda iface, name=None: {"primary": primary_stub, "archive": archive_stub}[ - name - ] + lambda iface, name=None: {"cache": cache_stub, "archive": archive_stub}[name] ) @contextmanager @@ -76,42 +74,40 @@ def mock_named_temporary_file(): monkeypatch.setattr(tempfile, "NamedTemporaryFile", mock_named_temporary_file) - sync_file_to_archive(db_request, file.id) + sync_file_to_cache(db_request, file.id) - assert file.archived + assert file.cached - if not archived: - assert primary_stub.get_metadata.calls == [pretend.call(file.path)] - assert primary_stub.get.calls == [pretend.call(file.path)] - assert archive_stub.store.calls == [ + if not cached: + assert archive_stub.get_metadata.calls == [pretend.call(file.path)] + assert archive_stub.get.calls == [pretend.call(file.path)] + assert cache_stub.store.calls == [ pretend.call(file.path, "/tmp/wutang", meta={"fizz": "buzz"}), ] else: - assert primary_stub.get_metadata.calls == [] - assert primary_stub.get.calls == [] - assert archive_stub.store.calls == [] + assert archive_stub.get_metadata.calls == [] + assert archive_stub.get.calls == [] + assert cache_stub.store.calls == [] [email protected]("archived", [True, False]) -def test_sync_file_to_archive_includes_bonus_files(db_request, monkeypatch, archived): [email protected]("cached", [True, False]) +def test_sync_file_to_cache_includes_bonus_files(db_request, monkeypatch, cached): file = FileFactory( - archived=archived, + cached=cached, has_signature=True, metadata_file_sha256_digest="deadbeefdeadbeefdeadbeefdeadbeef", ) - primary_stub = pretend.stub( + archive_stub = pretend.stub( get_metadata=pretend.call_recorder(lambda path: {"fizz": "buzz"}), get=pretend.call_recorder( lambda path: pretend.stub(read=lambda: b"my content") ), ) - archive_stub = pretend.stub( + cache_stub = pretend.stub( store=pretend.call_recorder(lambda filename, path, meta=None: None) ) db_request.find_service = pretend.call_recorder( - lambda iface, name=None: {"primary": primary_stub, "archive": archive_stub}[ - name - ] + lambda iface, name=None: {"cache": cache_stub, "archive": archive_stub}[name] ) @contextmanager @@ -124,40 +120,40 @@ def mock_named_temporary_file(): monkeypatch.setattr(tempfile, "NamedTemporaryFile", mock_named_temporary_file) - sync_file_to_archive(db_request, file.id) + sync_file_to_cache(db_request, file.id) - assert file.archived + assert file.cached - if not archived: - assert primary_stub.get_metadata.calls == [ + if not cached: + assert archive_stub.get_metadata.calls == [ pretend.call(file.path), pretend.call(file.metadata_path), pretend.call(file.pgp_path), ] - assert primary_stub.get.calls == [ + assert archive_stub.get.calls == [ pretend.call(file.path), pretend.call(file.metadata_path), pretend.call(file.pgp_path), ] - assert archive_stub.store.calls == [ + assert cache_stub.store.calls == [ pretend.call(file.path, "/tmp/wutang", meta={"fizz": "buzz"}), pretend.call(file.metadata_path, "/tmp/wutang", meta={"fizz": "buzz"}), pretend.call(file.pgp_path, "/tmp/wutang", meta={"fizz": "buzz"}), ] else: - assert primary_stub.get_metadata.calls == [] - assert primary_stub.get.calls == [] - assert archive_stub.store.calls == [] + assert archive_stub.get_metadata.calls == [] + assert archive_stub.get.calls == [] + assert cache_stub.store.calls == [] -def test_check_file_archive_tasks_outstanding(db_request, metrics): - [FileFactory(archived=True) for _ in range(12)] - [FileFactory(archived=False) for _ in range(3)] +def test_check_file_cache_tasks_outstanding(db_request, metrics): + [FileFactory(cached=True) for _ in range(12)] + [FileFactory(cached=False) for _ in range(3)] - check_file_archive_tasks_outstanding(db_request) + check_file_cache_tasks_outstanding(db_request) assert metrics.gauge.calls == [ - pretend.call("warehouse.packaging.files.not_archived", 3) + pretend.call("warehouse.packaging.files.not_cached", 3) ]
Convert B2 to be a service cache tier We've experienced multiple business-day outages for operations that leverage file uploads to B2 Cloud, notably `forklift.legacy.file_upload`. The Backblaze [Status Page indicates this is weekly scheduled maintenance window](https://status.backblaze.com/#:~:text=Scheduled%20Maintenance), and in chatting with one of their support reps even outside the window, their recommendation is to "try again, increase timeouts, or try again at the end of day" We currently use B2 as the `files.backend` configured IFileStorage "primary" service, so we should consider what that means in the context of uploads. One thought is to accept uploads to S3, and then kick off a task to replicate to B2 for cached serving. In the event that the task fails, apply some retry logic (expo backoff for N hours?). In the interim, serve the file from S3 (which I think is already the case).
2023-05-12T08:15:13Z
[]
[]
pypi/warehouse
13,667
pypi__warehouse-13667
[ "13577" ]
3407b021747cb4fed54d3643d4f9ce9973efa1f5
diff --git a/warehouse/email/__init__.py b/warehouse/email/__init__.py --- a/warehouse/email/__init__.py +++ b/warehouse/email/__init__.py @@ -987,7 +987,9 @@ def send_trusted_publisher_added_email(request, user, project_name, publisher): "username": request.user.username, "project_name": project_name, "publisher_name": publisher.publisher_name, - "publisher_spec": str(publisher), + "publisher_workflow": str(publisher), + "publisher_repository": publisher.repository_name, + "publisher_environment": publisher.environment, } @@ -998,7 +1000,9 @@ def send_trusted_publisher_removed_email(request, user, project_name, publisher) "username": request.user.username, "project_name": project_name, "publisher_name": publisher.publisher_name, - "publisher_spec": str(publisher), + "publisher_workflow": str(publisher), + "publisher_repository": publisher.repository_name, + "publisher_environment": publisher.environment, }
diff --git a/tests/unit/email/test_init.py b/tests/unit/email/test_init.py --- a/tests/unit/email/test_init.py +++ b/tests/unit/email/test_init.py @@ -5724,7 +5724,11 @@ def test_trusted_publisher_emails( pyramid_request.registry.settings = {"mail.sender": "[email protected]"} project_name = "test_project" - fakepublisher = pretend.stub(publisher_name="fakepublisher") + fakepublisher = pretend.stub( + publisher_name="fakepublisher", + repository_name="fakerepository", + environment="fakeenvironment", + ) # NOTE: Can't set __str__ using pretend.stub() monkeypatch.setattr( fakepublisher.__class__, "__str__", lambda s: "fakespecifier" @@ -5741,7 +5745,9 @@ def test_trusted_publisher_emails( "username": stub_user.username, "project_name": project_name, "publisher_name": "fakepublisher", - "publisher_spec": "fakespecifier", + "publisher_workflow": "fakespecifier", + "publisher_repository": "fakerepository", + "publisher_environment": "fakeenvironment", } subject_renderer.assert_() body_renderer.assert_(username=stub_user.username, project_name=project_name)
Trusted publishing: "OIDC added" email does not specify full trust details ### Story I asked my co-maintainer to configure trusted publishing on a project where I don't have owner privileges. I gave them detailed instructions. When they added trusted publishing, I got this email: https://github.com/pypi/warehouse/blob/0795e55d/warehouse/templates/email/trusted-publisher-added/body.html#L31 Specifically, this is what I saw: > Publisher name: GitHub > Publisher specification: ci-cd.yml As you can see, the specification field only contains the workflow name. It does not say which *repository* is trusted, nor does it specify the *environment* name (which was the main piece of information I was looking out for). ### Expectations I'd like to be able to see all the details of the trust configuration, including the optional fields. ### Motivation This is useful for logging purposes, correctness verification, implementation matching. Especially since this information is not available on the web interface to the accounts with the maintainer-level access.
Thanks for the issue, I agree this could use more detail. The templates are here if anyone would like to work on this: https://github.com/pypi/warehouse/tree/main/warehouse/templates/email/trusted-publisher-added I'll take it. @Martolivna FYI, feel free to tag me and/or @tnytown for review on any changes here! We're partially funded to work on tasks like these. Hello @woodruffw, thank you for your assistance. I have encountered some difficulties while resolving this issue, so once I have any updates or a potential solution, I will let you know. I apologize if it takes up too much time. No problem at all! We're also happy to help with questions, if you have any.
2023-05-15T11:52:10Z
[]
[]
pypi/warehouse
13,680
pypi__warehouse-13680
[ "13515" ]
d79691ee14dfdb52d6669ce34c79d5080349bcae
diff --git a/warehouse/accounts/__init__.py b/warehouse/accounts/__init__.py --- a/warehouse/accounts/__init__.py +++ b/warehouse/accounts/__init__.py @@ -37,7 +37,7 @@ MacaroonAuthorizationPolicy, MacaroonSecurityPolicy, ) -from warehouse.oidc.models import OIDCPublisher +from warehouse.oidc.utils import OIDCContext from warehouse.organizations.services import IOrganizationService from warehouse.rate_limiting import IRateLimiter, RateLimit from warehouse.utils.security_policy import MultiSecurityPolicy @@ -64,7 +64,17 @@ def _user(request): def _oidc_publisher(request): - return request.identity if isinstance(request.identity, OIDCPublisher) else None + return ( + request.identity.publisher + if isinstance(request.identity, OIDCContext) + else None + ) + + +def _oidc_claims(request): + return ( + request.identity.claims if isinstance(request.identity, OIDCContext) else None + ) def _organization_access(request): @@ -136,6 +146,7 @@ def includeme(config): # request identity by type, if they know it. config.add_request_method(_user, name="user", reify=True) config.add_request_method(_oidc_publisher, name="oidc_publisher", reify=True) + config.add_request_method(_oidc_claims, name="oidc_claims", reify=True) config.add_request_method( _organization_access, name="organization_access", reify=True ) diff --git a/warehouse/accounts/views.py b/warehouse/accounts/views.py --- a/warehouse/accounts/views.py +++ b/warehouse/accounts/views.py @@ -1561,7 +1561,7 @@ def add_pending_github_oidc_publisher(self): "publisher": pending_publisher.publisher_name, "id": str(pending_publisher.id), "specifier": str(pending_publisher), - "url": pending_publisher.publisher_url, + "url": pending_publisher.publisher_url(), "submitted_by": self.request.user.username, }, ) @@ -1651,7 +1651,7 @@ def delete_pending_oidc_publisher(self): "publisher": pending_publisher.publisher_name, "id": str(pending_publisher.id), "specifier": str(pending_publisher), - "url": pending_publisher.publisher_url, + "url": pending_publisher.publisher_url(), "submitted_by": self.request.user.username, }, ) diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -1184,7 +1184,9 @@ def file_upload(request): if request.user else "OpenID created token", "canonical_version": release.canonical_version, - "publisher_url": request.oidc_publisher.publisher_url + "publisher_url": request.oidc_publisher.publisher_url( + request.oidc_claims + ) if request.oidc_publisher else None, }, @@ -1426,7 +1428,9 @@ def file_upload(request): if request.user else "OpenID created token", "canonical_version": release.canonical_version, - "publisher_url": request.oidc_publisher.publisher_url + "publisher_url": request.oidc_publisher.publisher_url( + request.oidc_claims + ) if request.oidc_publisher else None, "project_id": str(project.id), diff --git a/warehouse/macaroons/caveats/__init__.py b/warehouse/macaroons/caveats/__init__.py --- a/warehouse/macaroons/caveats/__init__.py +++ b/warehouse/macaroons/caveats/__init__.py @@ -33,7 +33,6 @@ deserialize, serialize, ) -from warehouse.oidc import models as oidc_models from warehouse.oidc.interfaces import SignedClaims from warehouse.packaging.models import Project @@ -117,16 +116,21 @@ def verify(self, request: Request, context: Any, permission: str) -> Result: class OIDCPublisher(Caveat): oidc_publisher_id: StrictStr oidc_claims: SignedClaims | None = None + """ + This field is deprecated and should not be used. + + Contains the OIDC claims passed through from token exchange. + """ def verify(self, request: Request, context: Any, permission: str) -> Result: # If the identity associated with this macaroon is not an OpenID publisher, # then it doesn't make sense to restrict it with an `OIDCPublisher` caveat. - if not isinstance(request.identity, oidc_models.OIDCPublisher): + if not request.oidc_publisher: return Failure( "OIDC scoped token used outside of an OIDC identified request" ) - if str(request.identity.id) != self.oidc_publisher_id: + if str(request.oidc_publisher.id) != self.oidc_publisher_id: return Failure( "current OIDC publisher does not match publisher restriction in token" ) @@ -137,7 +141,7 @@ def verify(self, request: Request, context: Any, permission: str) -> Result: # Specifically, they are only valid against projects that are registered # to the current identifying OpenID publisher. - if context not in request.identity.projects: + if context not in request.oidc_publisher.projects: return Failure( f"OIDC scoped token is not valid for project '{context.name}'" ) diff --git a/warehouse/macaroons/interfaces.py b/warehouse/macaroons/interfaces.py --- a/warehouse/macaroons/interfaces.py +++ b/warehouse/macaroons/interfaces.py @@ -41,7 +41,13 @@ def verify(raw_macaroon, request, context, permission): """ def create_macaroon( - location, description, scopes, *, user_id=None, oidc_publisher_id=None + location, + description, + scopes, + *, + user_id=None, + oidc_publisher_id=None, + additional=None ): """ Returns a tuple of a new raw (serialized) macaroon and its DB model. diff --git a/warehouse/macaroons/models.py b/warehouse/macaroons/models.py --- a/warehouse/macaroons/models.py +++ b/warehouse/macaroons/models.py @@ -69,6 +69,10 @@ class Macaroon(db.Model): # body of the permissions ("V1") caveat. permissions_caveat = Column(JSONB, nullable=False, server_default=sql.text("'{}'")) + # Additional state associated with this macaroon. + # For OIDC publisher-issued macaroons, this will contain a subset of OIDC claims. + additional = Column(JSONB, nullable=True) + # It might be better to move this default into the database, that way we # make it less likely that something does it incorrectly (since the # default would be to generate a random key). However, it appears the diff --git a/warehouse/macaroons/security_policy.py b/warehouse/macaroons/security_policy.py --- a/warehouse/macaroons/security_policy.py +++ b/warehouse/macaroons/security_policy.py @@ -20,6 +20,7 @@ from warehouse.errors import WarehouseDenied from warehouse.macaroons import InvalidMacaroonError from warehouse.macaroons.interfaces import IMacaroonService +from warehouse.oidc.utils import OIDCContext from warehouse.utils.security_policy import AuthenticationMethod @@ -90,13 +91,19 @@ def identity(self, request): try: dm = macaroon_service.find_from_raw(macaroon) + oidc_claims = ( + dm.additional.get("oidc") + if dm.oidc_publisher and dm.additional + else None + ) except InvalidMacaroonError: return None # Every Macaroon is either associated with a user or an OIDC publisher. if dm.user is not None: return dm.user - return dm.oidc_publisher + + return OIDCContext(dm.oidc_publisher, oidc_claims) def remember(self, request, userid, **kw): # This is a NO-OP because our Macaroon header policy doesn't allow diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py --- a/warehouse/macaroons/services.py +++ b/warehouse/macaroons/services.py @@ -142,7 +142,14 @@ def verify(self, raw_macaroon, request, context, permission): raise InvalidMacaroonError(verified.msg) def create_macaroon( - self, location, description, scopes, *, user_id=None, oidc_publisher_id=None + self, + location, + description, + scopes, + *, + user_id=None, + oidc_publisher_id=None, + additional=None, ): """ Returns a tuple of a new raw (serialized) macaroon and its DB model. @@ -171,6 +178,7 @@ def create_macaroon( oidc_publisher_id=oidc_publisher_id, description=description, permissions_caveat={"permissions": permissions}, + additional=additional, ) self.db.add(dm) self.db.flush() # flush db now so dm.id is available diff --git a/warehouse/manage/views/__init__.py b/warehouse/manage/views/__init__.py --- a/warehouse/manage/views/__init__.py +++ b/warehouse/manage/views/__init__.py @@ -1332,13 +1332,13 @@ def add_github_oidc_publisher(self): "publisher": publisher.publisher_name, "id": str(publisher.id), "specifier": str(publisher), - "url": publisher.publisher_url, + "url": publisher.publisher_url(), "submitted_by": self.request.user.username, }, ) self.request.session.flash( - f"Added {publisher} in {publisher.publisher_url} to {self.project.name}", + f"Added {publisher} in {publisher.publisher_url()} to {self.project.name}", queue="success", ) @@ -1397,7 +1397,7 @@ def delete_oidc_publisher(self): "publisher": publisher.publisher_name, "id": str(publisher.id), "specifier": str(publisher), - "url": publisher.publisher_url, + "url": publisher.publisher_url(), "submitted_by": self.request.user.username, }, ) diff --git a/warehouse/migrations/versions/646bc86a09b6_macaroon_oidc_claims.py b/warehouse/migrations/versions/646bc86a09b6_macaroon_oidc_claims.py new file mode 100644 --- /dev/null +++ b/warehouse/migrations/versions/646bc86a09b6_macaroon_oidc_claims.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +macaroon oidc claims + +Revision ID: 646bc86a09b6 +Revises: 60e6b0dd0f47 +Create Date: 2023-06-01 16:50:32.765849 +""" + +import sqlalchemy as sa + +from alembic import op +from sqlalchemy.dialects import postgresql + +revision = "646bc86a09b6" +down_revision = "60e6b0dd0f47" + + +def upgrade(): + op.add_column( + "macaroons", + sa.Column("additional", postgresql.JSONB(astext_type=sa.Text()), nullable=True), + ) + + +def downgrade(): + op.drop_column("macaroons", "additional") diff --git a/warehouse/oidc/models/_core.py b/warehouse/oidc/models/_core.py --- a/warehouse/oidc/models/_core.py +++ b/warehouse/oidc/models/_core.py @@ -178,8 +178,7 @@ def publisher_name(self): # pragma: no cover # Only concrete subclasses are constructed. raise NotImplementedError - @property - def publisher_url(self): # pragma: no cover + def publisher_url(self, claims=None): # pragma: no cover # Only concrete subclasses are constructed. raise NotImplementedError diff --git a/warehouse/oidc/models/github.py b/warehouse/oidc/models/github.py --- a/warehouse/oidc/models/github.py +++ b/warehouse/oidc/models/github.py @@ -137,10 +137,6 @@ def publisher_name(self): def repository(self): return f"{self.repository_owner}/{self.repository_name}" - @property - def publisher_url(self): - return f"https://github.com/{self.repository}" - @property def job_workflow_ref(self): return f"{self.repository}/{self._workflow_slug}" @@ -149,6 +145,14 @@ def job_workflow_ref(self): def sub(self): return f"repo:{self.repository}" + def publisher_url(self, claims=None): + base = f"https://github.com/{self.repository}" + sha = claims.get("sha") if claims else None + + if sha: + return f"{base}/commit/{sha}" + return base + def __str__(self): return self.workflow_filename diff --git a/warehouse/oidc/utils.py b/warehouse/oidc/utils.py --- a/warehouse/oidc/utils.py +++ b/warehouse/oidc/utils.py @@ -12,9 +12,12 @@ from __future__ import annotations +from dataclasses import dataclass + from sqlalchemy.sql.expression import func, literal -from warehouse.oidc.models import GitHubPublisher, PendingGitHubPublisher +from warehouse.oidc.interfaces import SignedClaims +from warehouse.oidc.models import GitHubPublisher, OIDCPublisher, PendingGitHubPublisher GITHUB_OIDC_ISSUER_URL = "https://token.actions.githubusercontent.com" @@ -91,3 +94,26 @@ def find_publisher_by_issuer(session, issuer_url, signed_claims, *, pending=Fals else: # Unreachable; same logic error as above. return None # pragma: no cover + + +@dataclass +class OIDCContext: + """ + This class supports `MacaroonSecurityPolicy` in + `warehouse.macaroons.security_policy`. + + It is a wrapper containing both the signed claims associated with an OIDC + authenticated request and its `OIDCPublisher` DB model. We use it to smuggle + claims from the identity provider through to a session. `request.identity` + in an OIDC authenticated request should return this type. + """ + + publisher: OIDCPublisher + """ + The associated OIDC publisher. + """ + + claims: SignedClaims | None + """ + Pertinent OIDC claims from the token, if they exist. + """ diff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py --- a/warehouse/oidc/views.py +++ b/warehouse/oidc/views.py @@ -194,12 +194,12 @@ def _invalid(errors): [ caveats.OIDCPublisher( oidc_publisher_id=str(publisher.id), - oidc_claims={"ref": claims.get("ref"), "sha": claims.get("sha")}, ), caveats.ProjectID(project_ids=[str(p.id) for p in publisher.projects]), caveats.Expiration(expires_at=expires_at, not_before=not_before), ], oidc_publisher_id=publisher.id, + additional={"oidc": {"ref": claims.get("ref"), "sha": claims.get("sha")}}, ) for project in publisher.projects: project.record_event( @@ -209,7 +209,7 @@ def _invalid(errors): additional={ "expires": expires_at, "publisher_name": publisher.publisher_name, - "publisher_url": publisher.publisher_url, + "publisher_url": publisher.publisher_url(), }, ) return {"success": True, "token": serialized} diff --git a/warehouse/utils/security_policy.py b/warehouse/utils/security_policy.py --- a/warehouse/utils/security_policy.py +++ b/warehouse/utils/security_policy.py @@ -18,7 +18,7 @@ from zope.interface import implementer from warehouse.accounts.models import User -from warehouse.oidc.models import OIDCPublisher +from warehouse.oidc.utils import OIDCContext class AuthenticationMethod(enum.Enum): @@ -104,8 +104,8 @@ def permits(self, request, context, permission): if isinstance(identity, User): principals.append(f"user:{identity.id}") principals.extend(_principals_for_authenticated_user(identity)) - elif isinstance(identity, OIDCPublisher): - principals.append(f"oidc:{identity.id}") + elif isinstance(identity, OIDCContext): + principals.append(f"oidc:{identity.publisher.id}") else: return Denied("unknown identity")
diff --git a/tests/unit/accounts/test_core.py b/tests/unit/accounts/test_core.py --- a/tests/unit/accounts/test_core.py +++ b/tests/unit/accounts/test_core.py @@ -36,7 +36,9 @@ ) from warehouse.errors import BasicAuthBreachedPassword, BasicAuthFailedPassword from warehouse.events.tags import EventTag +from warehouse.oidc.interfaces import SignedClaims from warehouse.oidc.models import OIDCPublisher +from warehouse.oidc.utils import OIDCContext from warehouse.rate_limiting import IRateLimiter, RateLimit from ...common.db.accounts import UserFactory @@ -329,23 +331,28 @@ def test_without_identity(self): assert accounts._user(request) is None -class TestOIDCPublisher: +class TestOIDCPublisherAndClaims: def test_with_oidc_publisher(self, db_request): publisher = GitHubPublisherFactory.create() assert isinstance(publisher, OIDCPublisher) - request = pretend.stub(identity=publisher) + claims = SignedClaims({"foo": "bar"}) + + request = pretend.stub(identity=OIDCContext(publisher, claims)) assert accounts._oidc_publisher(request) is publisher + assert accounts._oidc_claims(request) is claims def test_without_oidc_publisher_identity(self): nonpublisher = pretend.stub() request = pretend.stub(identity=nonpublisher) assert accounts._oidc_publisher(request) is None + assert accounts._oidc_claims(request) is None def test_without_identity(self): request = pretend.stub(identity=None) assert accounts._oidc_publisher(request) is None + assert accounts._oidc_claims(request) is None class TestOrganizationAccess: @@ -458,6 +465,7 @@ def test_includeme(monkeypatch): assert config.add_request_method.calls == [ pretend.call(accounts._user, name="user", reify=True), pretend.call(accounts._oidc_publisher, name="oidc_publisher", reify=True), + pretend.call(accounts._oidc_claims, name="oidc_claims", reify=True), pretend.call( accounts._organization_access, name="organization_access", reify=True ), diff --git a/tests/unit/accounts/test_views.py b/tests/unit/accounts/test_views.py --- a/tests/unit/accounts/test_views.py +++ b/tests/unit/accounts/test_views.py @@ -3650,7 +3650,7 @@ def test_add_pending_github_oidc_publisher(self, monkeypatch, db_request): "publisher": pending_publisher.publisher_name, "id": str(pending_publisher.id), "specifier": str(pending_publisher), - "url": pending_publisher.publisher_url, + "url": pending_publisher.publisher_url(), "submitted_by": db_request.user.username, }, ) @@ -3884,7 +3884,7 @@ def test_delete_pending_oidc_publisher(self, monkeypatch, db_request): "publisher": "GitHub", "id": str(pending_publisher.id), "specifier": str(pending_publisher), - "url": pending_publisher.publisher_url, + "url": pending_publisher.publisher_url(), "submitted_by": db_request.user.username, }, ) diff --git a/tests/unit/forklift/test_legacy.py b/tests/unit/forklift/test_legacy.py --- a/tests/unit/forklift/test_legacy.py +++ b/tests/unit/forklift/test_legacy.py @@ -38,6 +38,8 @@ from warehouse.errors import BasicAuthTwoFactorEnabled from warehouse.forklift import legacy from warehouse.metrics import IMetricsService +from warehouse.oidc.interfaces import SignedClaims +from warehouse.oidc.utils import OIDCContext from warehouse.packaging.interfaces import IFileStorage, IProjectService from warehouse.packaging.models import ( Dependency, @@ -3061,21 +3063,45 @@ def test_upload_updates_existing_project_name( ("v1.0", "1.0"), ], ) + @pytest.mark.parametrize( + "test_with_user", + [ + True, + False, + ], + ) def test_upload_succeeds_creates_release( - self, pyramid_config, db_request, metrics, version, expected_version + self, + monkeypatch, + pyramid_config, + db_request, + metrics, + version, + expected_version, + test_with_user, ): - user = UserFactory.create() - EmailFactory.create(user=user) + from warehouse.events.models import HasEvents + from warehouse.events.tags import EventTag + project = ProjectFactory.create() - RoleFactory.create(user=user, project=project) + if test_with_user: + identity = UserFactory.create() + EmailFactory.create(user=identity) + RoleFactory.create(user=identity, project=project) + else: + publisher = GitHubPublisherFactory.create(projects=[project]) + claims = {"sha": "somesha"} + identity = OIDCContext(publisher, SignedClaims(claims)) + db_request.oidc_publisher = identity.publisher + db_request.oidc_claims = identity.claims db_request.db.add(Classifier(classifier="Environment :: Other Environment")) db_request.db.add(Classifier(classifier="Programming Language :: Python")) filename = "{}-{}.tar.gz".format(project.name, "1.0") - pyramid_config.testing_securitypolicy(identity=user) - db_request.user = user + pyramid_config.testing_securitypolicy(identity=identity) + db_request.user = identity if test_with_user else None db_request.user_agent = "warehouse-tests/6.6.6" db_request.POST = MultiDict( { @@ -3110,6 +3136,11 @@ def test_upload_succeeds_creates_release( IMetricsService: metrics, }.get(svc) + record_event = pretend.call_recorder( + lambda self, *, tag, ip_address, request=None, additional: None + ) + monkeypatch.setattr(HasEvents, "record_event", record_event) + resp = legacy.file_upload(db_request) assert resp.status_code == 200 @@ -3155,13 +3186,53 @@ def test_upload_succeeds_creates_release( release.project.name, release.version, "new release", - user, + identity if test_with_user else None, ), ( release.project.name, release.version, f"add source file {filename}", - user, + identity if test_with_user else None, + ), + ] + + # Ensure that all of our events have been created + release_event = { + "submitted_by": identity.username + if test_with_user + else "OpenID created token", + "canonical_version": release.canonical_version, + "publisher_url": f"{identity.publisher.publisher_url()}/commit/somesha" + if not test_with_user + else None, + } + + fileadd_event = { + "filename": filename, + "submitted_by": identity.username + if test_with_user + else "OpenID created token", + "canonical_version": release.canonical_version, + "publisher_url": f"{identity.publisher.publisher_url()}/commit/somesha" + if not test_with_user + else None, + "project_id": str(project.id), + } + + assert record_event.calls == [ + pretend.call( + mock.ANY, + tag=EventTag.Project.ReleaseAdd, + ip_address=mock.ANY, + request=db_request, + additional=release_event, + ), + pretend.call( + mock.ANY, + tag=EventTag.File.FileAdd, + ip_address=mock.ANY, + request=db_request, + additional=fileadd_event, ), ] diff --git a/tests/unit/macaroons/test_caveats.py b/tests/unit/macaroons/test_caveats.py --- a/tests/unit/macaroons/test_caveats.py +++ b/tests/unit/macaroons/test_caveats.py @@ -19,6 +19,7 @@ from pydantic.dataclasses import dataclass from pymacaroons import Macaroon +from warehouse.accounts import _oidc_publisher from warehouse.macaroons import caveats from warehouse.macaroons.caveats import ( Caveat, @@ -35,6 +36,7 @@ verify, ) from warehouse.macaroons.caveats._core import _CaveatRegistry +from warehouse.oidc.utils import OIDCContext from ...common.db.accounts import UserFactory from ...common.db.oidc import GitHubPublisherFactory @@ -91,13 +93,6 @@ class TestDeserialization: b'[4,"somepublisher"]', OIDCPublisher(oidc_publisher_id="somepublisher"), ), - ( - b'[4,"somepublisher",{"foo": "bar"}]', - OIDCPublisher( - oidc_publisher_id="somepublisher", - oidc_claims={"foo": "bar"}, - ), - ), # Legacy Caveat Style (b'{"exp": 50, "nbf": 10}', Expiration(expires_at=50, not_before=10)), ( @@ -290,7 +285,9 @@ class TestOIDCPublisherCaveat: def test_verify_no_identity(self): caveat = OIDCPublisher(oidc_publisher_id="invalid") result = caveat.verify( - pretend.stub(identity=None), pretend.stub(), pretend.stub() + pretend.stub(identity=None, oidc_publisher=None), + pretend.stub(), + pretend.stub(), ) assert result == Failure( @@ -298,24 +295,24 @@ def test_verify_no_identity(self): ) def test_verify_invalid_publisher_id(self, db_request): - publisher = GitHubPublisherFactory.create() + identity = OIDCContext(GitHubPublisherFactory.create(), None) + request = pretend.stub(identity=identity) + request.oidc_publisher = _oidc_publisher(request) caveat = OIDCPublisher(oidc_publisher_id="invalid") - result = caveat.verify( - pretend.stub(identity=publisher), pretend.stub(), pretend.stub() - ) + result = caveat.verify(request, pretend.stub(), pretend.stub()) assert result == Failure( "current OIDC publisher does not match publisher restriction in token" ) def test_verify_invalid_context(self, db_request): - publisher = GitHubPublisherFactory.create() + identity = OIDCContext(GitHubPublisherFactory.create(), None) + request = pretend.stub(identity=identity) + request.oidc_publisher = _oidc_publisher(request) - caveat = OIDCPublisher(oidc_publisher_id=str(publisher.id)) - result = caveat.verify( - pretend.stub(identity=publisher), pretend.stub(), pretend.stub() - ) + caveat = OIDCPublisher(oidc_publisher_id=str(request.oidc_publisher.id)) + result = caveat.verify(request, pretend.stub(), pretend.stub()) assert result == Failure("OIDC scoped token used outside of a project context") @@ -325,10 +322,12 @@ def test_verify_invalid_project(self, db_request): # This OIDC publisher is only registered to "foobar", so it should # not verify a caveat presented for "foobaz". - publisher = GitHubPublisherFactory.create(projects=[foobar]) - caveat = OIDCPublisher(oidc_publisher_id=str(publisher.id)) + identity = OIDCContext(GitHubPublisherFactory.create(projects=[foobar]), None) + request = pretend.stub(identity=identity) + request.oidc_publisher = _oidc_publisher(request) + caveat = OIDCPublisher(oidc_publisher_id=str(request.oidc_publisher.id)) - result = caveat.verify(pretend.stub(identity=publisher), foobaz, pretend.stub()) + result = caveat.verify(request, foobaz, pretend.stub()) assert result == Failure("OIDC scoped token is not valid for project 'foobaz'") @@ -337,10 +336,12 @@ def test_verify_ok(self, db_request): # This OIDC publisher is only registered to "foobar", so it should # not verify a caveat presented for "foobaz". - publisher = GitHubPublisherFactory.create(projects=[foobar]) - caveat = OIDCPublisher(oidc_publisher_id=str(publisher.id)) + identity = OIDCContext(GitHubPublisherFactory.create(projects=[foobar]), None) + request = pretend.stub(identity=identity) + request.oidc_publisher = _oidc_publisher(request) + caveat = OIDCPublisher(oidc_publisher_id=str(request.oidc_publisher.id)) - result = caveat.verify(pretend.stub(identity=publisher), foobar, pretend.stub()) + result = caveat.verify(request, foobar, pretend.stub()) assert result == Success() diff --git a/tests/unit/macaroons/test_security_policy.py b/tests/unit/macaroons/test_security_policy.py --- a/tests/unit/macaroons/test_security_policy.py +++ b/tests/unit/macaroons/test_security_policy.py @@ -21,6 +21,8 @@ from warehouse.macaroons import security_policy from warehouse.macaroons.interfaces import IMacaroonService from warehouse.macaroons.services import InvalidMacaroonError +from warehouse.oidc.interfaces import SignedClaims +from warehouse.oidc.utils import OIDCContext @pytest.mark.parametrize( @@ -143,7 +145,7 @@ def test_identity_user(self, monkeypatch): ) user = pretend.stub() - macaroon = pretend.stub(user=user) + macaroon = pretend.stub(user=user, oidc_publisher=None) macaroon_service = pretend.stub( find_from_raw=pretend.call_recorder(lambda rm: macaroon), ) @@ -177,7 +179,10 @@ def test_identity_oidc_publisher(self, monkeypatch): ) oidc_publisher = pretend.stub() - macaroon = pretend.stub(user=None, oidc_publisher=oidc_publisher) + oidc_additional = {"oidc": {"foo": "bar"}} + macaroon = pretend.stub( + user=None, oidc_publisher=oidc_publisher, additional=oidc_additional + ) macaroon_service = pretend.stub( find_from_raw=pretend.call_recorder(lambda rm: macaroon), ) @@ -187,7 +192,13 @@ def test_identity_oidc_publisher(self, monkeypatch): find_service=pretend.call_recorder(lambda iface, **kw: macaroon_service), ) - assert policy.identity(request) is oidc_publisher + identity = policy.identity(request) + assert identity + assert identity.publisher is oidc_publisher + assert identity == OIDCContext( + oidc_publisher, SignedClaims(oidc_additional["oidc"]) + ) + assert extract_http_macaroon.calls == [pretend.call(request)] assert request.find_service.calls == [ pretend.call(IMacaroonService, context=None), diff --git a/tests/unit/macaroons/test_services.py b/tests/unit/macaroons/test_services.py --- a/tests/unit/macaroons/test_services.py +++ b/tests/unit/macaroons/test_services.py @@ -92,6 +92,28 @@ def test_find_from_raw(self, user_service, macaroon_service): assert isinstance(dm, Macaroon) assert macaroon.id == dm.id assert macaroon.user == user + assert macaroon.additional is None + + def test_find_from_raw_oidc(self, macaroon_service): + publisher = GitHubPublisherFactory.create() + claims = {"sha": "somesha", "ref": "someref"} + ( + serialized, + macaroon, + ) = macaroon_service.create_macaroon( + "fake location", + "fake description", + [caveats.OIDCPublisher(oidc_publisher_id=str(publisher.id))], + oidc_publisher_id=publisher.id, + additional=claims, + ) + + dm = macaroon_service.find_from_raw(serialized) + + assert isinstance(dm, Macaroon) + assert macaroon.id == macaroon.id + assert macaroon.oidc_publisher == publisher + assert macaroon.additional == claims @pytest.mark.parametrize( "raw_macaroon", diff --git a/tests/unit/manage/test_views.py b/tests/unit/manage/test_views.py --- a/tests/unit/manage/test_views.py +++ b/tests/unit/manage/test_views.py @@ -6088,7 +6088,7 @@ def test_add_github_oidc_publisher_preexisting(self, monkeypatch): id="fakeid", publisher_name="GitHub", repository_name="fakerepo", - publisher_url="https://github.com/fakeowner/fakerepo", + publisher_url=lambda x=None: "https://github.com/fakeowner/fakerepo", owner="fakeowner", owner_id="1234", workflow_filename="fakeworkflow.yml", @@ -6554,7 +6554,7 @@ def test_delete_oidc_publisher_registered_to_multiple_projects( "publisher": publisher.publisher_name, "id": str(publisher.id), "specifier": str(publisher), - "url": publisher.publisher_url, + "url": publisher.publisher_url(), "submitted_by": db_request.user.username, }, ) @@ -6645,7 +6645,7 @@ def test_delete_oidc_publisher_entirely(self, monkeypatch, db_request): "publisher": publisher.publisher_name, "id": str(publisher.id), "specifier": str(publisher), - "url": publisher.publisher_url, + "url": publisher.publisher_url(), "submitted_by": db_request.user.username, } diff --git a/tests/unit/oidc/models/test_github.py b/tests/unit/oidc/models/test_github.py --- a/tests/unit/oidc/models/test_github.py +++ b/tests/unit/oidc/models/test_github.py @@ -76,7 +76,11 @@ def test_github_publisher_computed_properties(self): assert getattr(publisher, claim_name) is not None assert str(publisher) == "fakeworkflow.yml" - assert publisher.publisher_url == "https://github.com/fakeowner/fakerepo" + assert publisher.publisher_url() == "https://github.com/fakeowner/fakerepo" + assert ( + publisher.publisher_url({"sha": "somesha"}) + == "https://github.com/fakeowner/fakerepo/commit/somesha" + ) def test_github_publisher_unaccounted_claims(self, monkeypatch): publisher = github.GitHubPublisher( diff --git a/tests/unit/oidc/test_views.py b/tests/unit/oidc/test_views.py --- a/tests/unit/oidc/test_views.py +++ b/tests/unit/oidc/test_views.py @@ -380,7 +380,7 @@ def test_mint_token_from_pending_trusted_publisher_invalidates_others( @pytest.mark.parametrize( - ("claims_in_token", "claims_in_caveat"), + ("claims_in_token", "claims_input"), [ ({"ref": "someref", "sha": "somesha"}, {"ref": "someref", "sha": "somesha"}), ({"ref": "someref"}, {"ref": "someref", "sha": None}), @@ -388,7 +388,7 @@ def test_mint_token_from_pending_trusted_publisher_invalidates_others( ], ) def test_mint_token_from_oidc_no_pending_publisher_ok( - monkeypatch, claims_in_token, claims_in_caveat + monkeypatch, claims_in_token, claims_input ): time = pretend.stub(time=pretend.call_recorder(lambda: 0)) monkeypatch.setattr(views, "time", time) @@ -401,7 +401,7 @@ def test_mint_token_from_oidc_no_pending_publisher_ok( id="fakepublisherid", projects=[project], publisher_name="fakepublishername", - publisher_url="https://fake/url", + publisher_url=lambda x=None: "https://fake/url", ) # NOTE: Can't set __str__ using pretend.stub() monkeypatch.setattr(publisher.__class__, "__str__", lambda s: "fakespecifier") @@ -454,12 +454,13 @@ def find_service(iface, **kw): f"OpenID token: fakespecifier ({datetime.fromtimestamp(0).isoformat()})", [ caveats.OIDCPublisher( - oidc_publisher_id="fakepublisherid", oidc_claims=claims_in_caveat + oidc_publisher_id="fakepublisherid", ), caveats.ProjectID(project_ids=["fakeprojectid"]), caveats.Expiration(expires_at=900, not_before=0), ], oidc_publisher_id="fakepublisherid", + additional={"oidc": claims_input}, ) ] assert project.record_event.calls == [ diff --git a/tests/unit/utils/test_security_policy.py b/tests/unit/utils/test_security_policy.py --- a/tests/unit/utils/test_security_policy.py +++ b/tests/unit/utils/test_security_policy.py @@ -16,6 +16,7 @@ from pyramid.authorization import Authenticated from pyramid.security import Denied +from warehouse.oidc.utils import OIDCContext from warehouse.utils import security_policy from ...common.db.accounts import UserFactory @@ -219,7 +220,7 @@ def test_permits_oidc_publisher(self, db_request): policy = security_policy.MultiSecurityPolicy(subpolicies, authz) publisher = GitHubPublisherFactory.create() - request = pretend.stub(identity=publisher) + request = pretend.stub(identity=OIDCContext(publisher, None)) context = pretend.stub() permission = pretend.stub() assert policy.permits(request, context, permission) is status
Trusted publishing: preserve OIDC claims for events As part of making trusted publisher events "richer" and more useful to users, we should figure out how to embed their associated OIDC claims. This is currently impossible, due to how we perform the token exchange and request identity setup: 1. During token exchange, we take the OIDC token and exchange it for a PyPI API token; this API token is a macaroon that can contain arbitrary data, but we don't currently embed any of the OIDC claims in it; 2. During authentication with an OIDC-minted API token, we use `OIDCProvider` as our `request.identity` type, which are "stateless" with respect to the *specific* API token that produced them; 3. Consequently, there's no `request` state that links the current `request.identity` to either the underlying macaroon *or* OIDC token that produced it. To fix this, we have to create links at (1) and (2): the minted API token should contain caveats (or other metadata?) for the OIDC token's claims, and `request.identity` should become a type that contains the claims extracted from the macaroon. CC @di
@tnytown will be looking at this as well.
2023-05-16T18:44:42Z
[]
[]
pypi/warehouse
13,696
pypi__warehouse-13696
[ "13695" ]
a5d6e1142cb4232e1e2530d2176dde9a48c4018f
diff --git a/warehouse/accounts/forms.py b/warehouse/accounts/forms.py --- a/warehouse/accounts/forms.py +++ b/warehouse/accounts/forms.py @@ -48,7 +48,7 @@ class UsernameMixin: - username = wtforms.StringField(validators=[wtforms.validators.DataRequired()]) + username = wtforms.StringField(validators=[wtforms.validators.InputRequired()]) def validate_username(self, field): userid = self.user_service.find_userid(field.data) @@ -62,7 +62,7 @@ def validate_username(self, field): class TOTPValueMixin: totp_value = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Regexp( rf"^ *([0-9] *){{{TOTP_LENGTH}}}$", message=_( @@ -75,13 +75,13 @@ class TOTPValueMixin: class WebAuthnCredentialMixin: - credential = wtforms.StringField(wtforms.validators.DataRequired()) + credential = wtforms.StringField(wtforms.validators.InputRequired()) class RecoveryCodeValueMixin: recovery_code_value = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Regexp( rf"^ *([0-9a-f] *){{{2*RECOVERY_CODE_BYTES}}}$", message=_( @@ -96,7 +96,7 @@ class RecoveryCodeValueMixin: class NewUsernameMixin: username = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Length( max=50, message=_("Choose a username with 50 characters or less.") ), @@ -131,7 +131,7 @@ def validate_username(self, field): class PasswordMixin: password = wtforms.PasswordField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Length( max=MAX_PASSWORD_SIZE, message=_("Password too long."), @@ -180,7 +180,7 @@ def validate_password(self, field): class NewPasswordMixin: new_password = wtforms.PasswordField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Length( max=MAX_PASSWORD_SIZE, message=_("Password too long."), @@ -193,7 +193,7 @@ class NewPasswordMixin: password_confirm = wtforms.PasswordField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Length( max=MAX_PASSWORD_SIZE, message=_("Password too long."), @@ -208,8 +208,8 @@ class NewPasswordMixin: # PasswordStrengthValidator of the new_password field, to ensure that the # newly set password doesn't contain any of them full_name = wtforms.StringField() # May be empty - username = wtforms.StringField(validators=[wtforms.validators.DataRequired()]) - email = wtforms.StringField(validators=[wtforms.validators.DataRequired()]) + username = wtforms.StringField(validators=[wtforms.validators.InputRequired()]) + email = wtforms.StringField(validators=[wtforms.validators.InputRequired()]) def __init__(self, *args, breach_service, **kwargs): super().__init__(*args, **kwargs) @@ -227,7 +227,7 @@ def validate_new_password(self, field): class NewEmailMixin: email = wtforms.fields.EmailField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Regexp( r".+@.+\..+", message=_("The email address isn't valid. Try again.") ), @@ -423,13 +423,13 @@ class ReAuthenticateForm(PasswordMixin, forms.Form): __params__ = ["username", "password", "next_route", "next_route_matchdict"] username = wtforms.fields.HiddenField( - validators=[wtforms.validators.DataRequired()] + validators=[wtforms.validators.InputRequired()] ) next_route = wtforms.fields.HiddenField( - validators=[wtforms.validators.DataRequired()] + validators=[wtforms.validators.InputRequired()] ) next_route_matchdict = wtforms.fields.HiddenField( - validators=[wtforms.validators.DataRequired()] + validators=[wtforms.validators.InputRequired()] ) def __init__(self, *args, user_service, **kwargs): @@ -472,7 +472,7 @@ def validate_recovery_code_value(self, field): class RequestPasswordResetForm(forms.Form): username_or_email = wtforms.StringField( - validators=[wtforms.validators.DataRequired()] + validators=[wtforms.validators.InputRequired()] ) def __init__(self, *args, user_service, **kwargs): diff --git a/warehouse/accounts/views.py b/warehouse/accounts/views.py --- a/warehouse/accounts/views.py +++ b/warehouse/accounts/views.py @@ -29,6 +29,7 @@ from pyramid.view import view_config, view_defaults from sqlalchemy.exc import NoResultFound from webauthn.helpers import bytes_to_base64url +from webob.multidict import MultiDict from warehouse.accounts import REDIRECT_FIELD_NAME from warehouse.accounts.forms import ( @@ -559,10 +560,12 @@ def register(request, _form_class=RegistrationForm): # the form contains an auto-generated field from recaptcha with # hyphens in it. make it play nice with wtforms. - post_body = {key.replace("-", "_"): value for key, value in request.POST.items()} + post_body = MultiDict( + {key.replace("-", "_"): value for key, value in request.POST.items()} + ) form = _form_class( - data=post_body, + formdata=post_body, user_service=user_service, recaptcha_service=recaptcha_service, breach_service=breach_service, diff --git a/warehouse/admin/views/banners.py b/warehouse/admin/views/banners.py --- a/warehouse/admin/views/banners.py +++ b/warehouse/admin/views/banners.py @@ -148,18 +148,18 @@ class BannerForm(Form): name = wtforms.fields.StringField( validators=[ wtforms.validators.Length(max=100), - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), ], ) text = wtforms.fields.StringField( validators=[ wtforms.validators.Length(max=280), - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), ], ) link_url = wtforms.fields.StringField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), URIValidator(), ] ) @@ -179,4 +179,4 @@ class BannerForm(Form): active = wtforms.fields.BooleanField( validators=[wtforms.validators.Optional()], default=False ) - end = wtforms.fields.DateField(validators=[wtforms.validators.DataRequired()]) + end = wtforms.fields.DateField(validators=[wtforms.validators.InputRequired()]) diff --git a/warehouse/admin/views/sponsors.py b/warehouse/admin/views/sponsors.py --- a/warehouse/admin/views/sponsors.py +++ b/warehouse/admin/views/sponsors.py @@ -30,7 +30,7 @@ class SponsorForm(Form): name = wtforms.fields.StringField( validators=[ wtforms.validators.Length(max=100), - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), ], ) service = wtforms.fields.StringField( @@ -39,7 +39,7 @@ class SponsorForm(Form): link_url = wtforms.fields.StringField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), URIValidator(), ] ) diff --git a/warehouse/admin/views/users.py b/warehouse/admin/views/users.py --- a/warehouse/admin/views/users.py +++ b/warehouse/admin/views/users.py @@ -74,7 +74,7 @@ def user_list(request): class EmailForm(forms.Form): - email = wtforms.fields.EmailField(validators=[wtforms.validators.DataRequired()]) + email = wtforms.fields.EmailField(validators=[wtforms.validators.InputRequired()]) primary = wtforms.fields.BooleanField() verified = wtforms.fields.BooleanField() public = wtforms.fields.BooleanField() diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -421,7 +421,7 @@ class MetadataForm(forms.Form): metadata_version = wtforms.StringField( description="Metadata-Version", validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.AnyOf( # Note: This isn't really Metadata 2.0, however bdist_wheel # claims it is producing a Metadata 2.0 metadata when in @@ -436,7 +436,7 @@ class MetadataForm(forms.Form): name = wtforms.StringField( description="Name", validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Regexp( PROJECT_NAME_RE, re.IGNORECASE, @@ -450,7 +450,7 @@ class MetadataForm(forms.Form): version = wtforms.StringField( description="Version", validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.Regexp( r"^(?!\s).*(?<!\s)$", message="Can't have leading or trailing whitespace.", @@ -526,7 +526,7 @@ class MetadataForm(forms.Form): pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()]) filetype = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(), + wtforms.validators.InputRequired(), wtforms.validators.AnyOf( ["bdist_egg", "bdist_wheel", "sdist"], message="Use a known file type." ), diff --git a/warehouse/forms.py b/warehouse/forms.py --- a/warehouse/forms.py +++ b/warehouse/forms.py @@ -11,7 +11,7 @@ # limitations under the License. from wtforms import Form as BaseForm, StringField -from wtforms.validators import DataRequired, StopValidation, ValidationError +from wtforms.validators import InputRequired, StopValidation, ValidationError from zxcvbn import zxcvbn from warehouse.i18n import KNOWN_LOCALES @@ -124,7 +124,7 @@ def __init__(self, *args, db, **kwargs): class SetLocaleForm(Form): __params__ = ["locale_id"] - locale_id = StringField(validators=[DataRequired(message="Missing locale ID")]) + locale_id = StringField(validators=[InputRequired(message="Missing locale ID")]) def validate_locale_id(self, field): if field.data not in KNOWN_LOCALES.keys(): diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py --- a/warehouse/manage/forms.py +++ b/warehouse/manage/forms.py @@ -40,7 +40,7 @@ class RoleNameMixin: role_name = wtforms.SelectField( "Select role", choices=[("", "Select role"), ("Maintainer", "Maintainer"), ("Owner", "Owner")], - validators=[wtforms.validators.DataRequired(message="Select role")], + validators=[wtforms.validators.InputRequired(message="Select role")], ) @@ -49,13 +49,13 @@ class TeamProjectRoleNameMixin: "Select permissions", choices=[("", "Select role"), ("Maintainer", "Maintainer"), ("Owner", "Owner")], coerce=lambda string: TeamProjectRoleType(string) if string else None, - validators=[wtforms.validators.DataRequired(message="Select role")], + validators=[wtforms.validators.InputRequired(message="Select role")], ) class UsernameMixin: username = wtforms.StringField( - validators=[wtforms.validators.DataRequired(message="Specify username")] + validators=[wtforms.validators.InputRequired(message="Specify username")] ) def validate_username(self, field): @@ -212,7 +212,7 @@ class DeleteWebAuthnForm(forms.Form): label = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message="Specify a device name"), + wtforms.validators.InputRequired(message="Specify a device name"), wtforms.validators.Length( max=64, message=("Label must be 64 characters or less") ), @@ -238,7 +238,7 @@ class ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form): label = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message="Specify a label"), + wtforms.validators.InputRequired(message="Specify a label"), wtforms.validators.Length( max=64, message=("Label must be 64 characters or less") ), @@ -293,7 +293,7 @@ def __init__(self, *args, user_id, macaroon_service, project_names, **kwargs): description = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message="Specify a token name"), + wtforms.validators.InputRequired(message="Specify a token name"), wtforms.validators.Length( max=100, message="Description must be 100 characters or less" ), @@ -301,7 +301,7 @@ def __init__(self, *args, user_id, macaroon_service, project_names, **kwargs): ) token_scope = wtforms.StringField( - validators=[wtforms.validators.DataRequired(message="Specify the token scope")] + validators=[wtforms.validators.InputRequired(message="Specify the token scope")] ) def validate_description(self, field): @@ -347,7 +347,7 @@ class DeleteMacaroonForm(UsernameMixin, PasswordMixin, forms.Form): __params__ = ["confirm_password", "macaroon_id"] macaroon_id = wtforms.StringField( - validators=[wtforms.validators.DataRequired(message="Identifier required")] + validators=[wtforms.validators.InputRequired(message="Identifier required")] ) def __init__(self, *args, macaroon_service, user_service, **kwargs): @@ -381,14 +381,14 @@ class OrganizationRoleNameMixin: ("Billing Manager", "Billing Manager"), ], coerce=lambda string: OrganizationRoleType(string) if string else None, - validators=[wtforms.validators.DataRequired(message="Select role")], + validators=[wtforms.validators.InputRequired(message="Select role")], ) class OrganizationNameMixin: name = wtforms.StringField( validators=[ - wtforms.validators.DataRequired( + wtforms.validators.InputRequired( message="Specify organization account name" ), wtforms.validators.Length( @@ -487,7 +487,7 @@ class TransferOrganizationProjectForm(forms.Form): "Select organization", choices=[("", "Select organization")], validators=[ - wtforms.validators.DataRequired(message="Select organization"), + wtforms.validators.InputRequired(message="Select organization"), ], ) @@ -538,7 +538,7 @@ class SaveOrganizationForm(forms.Form): display_name = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message="Specify your organization name"), + wtforms.validators.InputRequired(message="Specify your organization name"), wtforms.validators.Length( max=100, message=_( @@ -550,7 +550,7 @@ class SaveOrganizationForm(forms.Form): ) link_url = wtforms.URLField( validators=[ - wtforms.validators.DataRequired(message="Specify your organization URL"), + wtforms.validators.InputRequired(message="Specify your organization URL"), wtforms.validators.Length( max=400, message=_( @@ -566,7 +566,7 @@ class SaveOrganizationForm(forms.Form): ) description = wtforms.TextAreaField( validators=[ - wtforms.validators.DataRequired( + wtforms.validators.InputRequired( message="Specify your organization description" ), wtforms.validators.Length( @@ -582,7 +582,7 @@ class SaveOrganizationForm(forms.Form): choices=[("Company", "Company"), ("Community", "Community")], coerce=OrganizationType, validators=[ - wtforms.validators.DataRequired(message="Select organization type"), + wtforms.validators.InputRequired(message="Select organization type"), ], ) @@ -609,7 +609,7 @@ class SaveTeamForm(forms.Form): name = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message="Specify team name"), + wtforms.validators.InputRequired(message="Specify team name"), wtforms.validators.Length( max=50, message=_("Choose a team name with 50 characters or less."), diff --git a/warehouse/oidc/forms.py b/warehouse/oidc/forms.py --- a/warehouse/oidc/forms.py +++ b/warehouse/oidc/forms.py @@ -29,7 +29,7 @@ class GitHubPublisherBase(forms.Form): owner = wtforms.StringField( validators=[ - wtforms.validators.DataRequired( + wtforms.validators.InputRequired( message=_("Specify GitHub repository owner (username or organization)"), ), ] @@ -37,7 +37,7 @@ class GitHubPublisherBase(forms.Form): repository = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message=_("Specify repository name")), + wtforms.validators.InputRequired(message=_("Specify repository name")), wtforms.validators.Regexp( _VALID_GITHUB_REPO, message=_("Invalid repository name") ), @@ -46,7 +46,7 @@ class GitHubPublisherBase(forms.Form): workflow_filename = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message=_("Specify workflow filename")) + wtforms.validators.InputRequired(message=_("Specify workflow filename")) ] ) @@ -170,7 +170,7 @@ class PendingGitHubPublisherForm(GitHubPublisherBase): project_name = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message=_("Specify project name")), + wtforms.validators.InputRequired(message=_("Specify project name")), wtforms.validators.Regexp( PROJECT_NAME_RE, message=_("Invalid project name") ), @@ -199,7 +199,7 @@ class DeletePublisherForm(forms.Form): publisher_id = wtforms.StringField( validators=[ - wtforms.validators.DataRequired(message=_("Specify a publisher ID")), + wtforms.validators.InputRequired(message=_("Specify a publisher ID")), wtforms.validators.UUID(message=_("Publisher must be specified by ID")), ] ) diff --git a/warehouse/views.py b/warehouse/views.py --- a/warehouse/views.py +++ b/warehouse/views.py @@ -40,6 +40,7 @@ from sqlalchemy import func from sqlalchemy.sql import exists, expression from trove_classifiers import deprecated_classifiers, sorted_classifiers +from webob.multidict import MultiDict from warehouse.accounts import REDIRECT_FIELD_NAME from warehouse.accounts.models import User @@ -246,7 +247,7 @@ def index(request): ) def locale(request): try: - form = SetLocaleForm(locale_id=request.GET.getone("locale_id")) + form = SetLocaleForm(MultiDict({"locale_id": request.GET.getone("locale_id")})) except KeyError: raise HTTPBadRequest("Invalid amount of locale_id parameters provided")
diff --git a/tests/unit/accounts/test_forms.py b/tests/unit/accounts/test_forms.py --- a/tests/unit/accounts/test_forms.py +++ b/tests/unit/accounts/test_forms.py @@ -17,6 +17,8 @@ import pytest import wtforms +from webob.multidict import MultiDict + from warehouse import recaptcha from warehouse.accounts import forms from warehouse.accounts.interfaces import ( @@ -36,7 +38,9 @@ def test_creation(self): user_service = pretend.stub() breach_service = pretend.stub() form = forms.LoginForm( - request=request, user_service=user_service, breach_service=breach_service + request=request, + user_service=user_service, + breach_service=breach_service, ) assert form.request is request @@ -84,7 +88,7 @@ def test_validate_password_no_user(self): ) breach_service = pretend.stub() form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -110,7 +114,7 @@ def test_validate_password_disabled_for_compromised_pw(self, db_session): ) breach_service = pretend.stub(failure_message="Bad Password!") form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -141,7 +145,7 @@ def test_validate_password_ok(self): check_password=pretend.call_recorder(lambda pw, tags: False) ) form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -183,7 +187,7 @@ def test_validate_password_notok(self, db_session): ) breach_service = pretend.stub() form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -226,7 +230,7 @@ def test_validate_password_too_many_failed(self): ) breach_service = pretend.stub() form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -268,7 +272,7 @@ def test_password_breached(self, monkeypatch): ) form = forms.LoginForm( - data={"password": "password"}, + MultiDict({"password": "password"}), request=request, user_service=user_service, breach_service=breach_service, @@ -302,7 +306,7 @@ def test_validate_password_ok_ip_banned(self): check_password=pretend.call_recorder(lambda pw, tags: False) ) form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -335,7 +339,7 @@ def test_validate_password_notok_ip_banned(self, db_session): ) breach_service = pretend.stub() form = forms.LoginForm( - data={"username": "my_username"}, + formdata=MultiDict({"username": "my_username"}), request=request, user_service=user_service, breach_service=breach_service, @@ -357,7 +361,7 @@ def test_create(self): breach_service = pretend.stub() form = forms.RegistrationForm( - data={}, + formdata=MultiDict(), user_service=user_service, recaptcha_service=recaptcha_service, breach_service=breach_service, @@ -368,7 +372,7 @@ def test_create(self): def test_password_confirm_required_error(self): form = forms.RegistrationForm( - data={"password_confirm": ""}, + formdata=MultiDict({"password_confirm": ""}), user_service=pretend.stub( find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub()) ), @@ -384,7 +388,9 @@ def test_passwords_mismatch_error(self, pyramid_config): find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub()) ) form = forms.RegistrationForm( - data={"new_password": "password", "password_confirm": "mismatch"}, + formdata=MultiDict( + {"new_password": "password", "password_confirm": "mismatch"} + ), user_service=user_service, recaptcha_service=pretend.stub(enabled=True), breach_service=pretend.stub(check_password=lambda pw, tags=None: False), @@ -401,10 +407,12 @@ def test_passwords_match_success(self): find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub()) ) form = forms.RegistrationForm( - data={ - "new_password": "MyStr0ng!shPassword", - "password_confirm": "MyStr0ng!shPassword", - }, + formdata=MultiDict( + { + "new_password": "MyStr0ng!shPassword", + "password_confirm": "MyStr0ng!shPassword", + } + ), user_service=user_service, recaptcha_service=pretend.stub(enabled=True), breach_service=pretend.stub(check_password=lambda pw, tags=None: False), @@ -416,7 +424,7 @@ def test_passwords_match_success(self): def test_email_required_error(self): form = forms.RegistrationForm( - data={"email": ""}, + formdata=MultiDict({"email": ""}), user_service=pretend.stub( find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub()) ), @@ -430,7 +438,7 @@ def test_email_required_error(self): @pytest.mark.parametrize("email", ["bad", "foo][email protected]", "</body></html>"]) def test_invalid_email_error(self, pyramid_config, email): form = forms.RegistrationForm( - data={"email": email}, + formdata=MultiDict({"email": email}), user_service=pretend.stub( find_userid_by_email=pretend.call_recorder(lambda _: None) ), @@ -445,7 +453,7 @@ def test_invalid_email_error(self, pyramid_config, email): def test_exotic_email_success(self): form = forms.RegistrationForm( - data={"email": "[email protected]"}, + formdata=MultiDict({"email": "[email protected]"}), user_service=pretend.stub( find_userid_by_email=pretend.call_recorder(lambda _: None) ), @@ -458,7 +466,7 @@ def test_exotic_email_success(self): def test_email_exists_error(self, pyramid_config): form = forms.RegistrationForm( - data={"email": "[email protected]"}, + formdata=MultiDict({"email": "[email protected]"}), user_service=pretend.stub( find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub()) ), @@ -475,7 +483,7 @@ def test_email_exists_error(self, pyramid_config): def test_prohibited_email_error(self, pyramid_config): form = forms.RegistrationForm( - data={"email": "[email protected]"}, + formdata=MultiDict({"email": "[email protected]"}), user_service=pretend.stub( find_userid_by_email=pretend.call_recorder(lambda _: None) ), @@ -492,7 +500,7 @@ def test_prohibited_email_error(self, pyramid_config): def test_recaptcha_disabled(self): form = forms.RegistrationForm( - data={"g_recpatcha_response": ""}, + formdata=MultiDict({"g_recpatcha_response": ""}), user_service=pretend.stub(), recaptcha_service=pretend.stub( enabled=False, @@ -507,7 +515,7 @@ def test_recaptcha_disabled(self): def test_recaptcha_required_error(self): form = forms.RegistrationForm( - data={"g_recaptcha_response": ""}, + formdata=MultiDict({"g_recaptcha_response": ""}), user_service=pretend.stub(), recaptcha_service=pretend.stub( enabled=True, @@ -520,7 +528,7 @@ def test_recaptcha_required_error(self): def test_recaptcha_error(self): form = forms.RegistrationForm( - data={"g_recaptcha_response": "asd"}, + formdata=MultiDict({"g_recaptcha_response": "asd"}), user_service=pretend.stub(), recaptcha_service=pretend.stub( verify_response=pretend.raiser(recaptcha.RecaptchaError), @@ -533,7 +541,7 @@ def test_recaptcha_error(self): def test_username_exists(self, pyramid_config): form = forms.RegistrationForm( - data={"username": "foo"}, + formdata=MultiDict({"username": "foo"}), user_service=pretend.stub( find_userid=pretend.call_recorder(lambda name: 1), username_is_prohibited=lambda a: False, @@ -553,7 +561,7 @@ def test_username_exists(self, pyramid_config): def test_username_prohibted(self, pyramid_config): form = forms.RegistrationForm( - data={"username": "foo"}, + formdata=MultiDict({"username": "foo"}), user_service=pretend.stub( username_is_prohibited=lambda a: True, ), @@ -573,7 +581,7 @@ def test_username_prohibted(self, pyramid_config): @pytest.mark.parametrize("username", ["_foo", "bar_", "foo^bar"]) def test_username_is_valid(self, username, pyramid_config): form = forms.RegistrationForm( - data={"username": username}, + formdata=MultiDict({"username": username}), user_service=pretend.stub( find_userid=pretend.call_recorder(lambda _: None), username_is_prohibited=lambda a: False, @@ -601,7 +609,7 @@ def test_password_strength(self): ) for pwd, valid in cases: form = forms.RegistrationForm( - data={"new_password": pwd, "password_confirm": pwd}, + formdata=MultiDict({"new_password": pwd, "password_confirm": pwd}), user_service=pretend.stub(), recaptcha_service=pretend.stub( enabled=False, @@ -614,7 +622,7 @@ def test_password_strength(self): def test_password_breached(self): form = forms.RegistrationForm( - data={"new_password": "password"}, + formdata=MultiDict({"new_password": "password"}), user_service=pretend.stub( find_userid=pretend.call_recorder(lambda _: None) ), @@ -638,7 +646,7 @@ def test_password_breached(self): def test_name_too_long(self, pyramid_config): form = forms.RegistrationForm( - data={"full_name": "hello " * 50}, + formdata=MultiDict({"full_name": "hello " * 50}), user_service=pretend.stub( find_userid=pretend.call_recorder(lambda _: None) ), @@ -703,7 +711,7 @@ def test_validate_username_or_email_with_none(self): class TestResetPasswordForm: def test_password_confirm_required_error(self): form = forms.ResetPasswordForm( - data={"password_confirm": ""}, + formdata=MultiDict({"password_confirm": ""}), breach_service=pretend.stub(check_password=lambda pw, tags=None: False), ) @@ -712,13 +720,15 @@ def test_password_confirm_required_error(self): def test_passwords_mismatch_error(self, pyramid_config): form = forms.ResetPasswordForm( - data={ - "new_password": "password", - "password_confirm": "mismatch", - "username": "username", - "full_name": "full_name", - "email": "email", - }, + formdata=MultiDict( + { + "new_password": "password", + "password_confirm": "mismatch", + "username": "username", + "full_name": "full_name", + "email": "email", + } + ), breach_service=pretend.stub(check_password=lambda pw, tags=None: False), ) @@ -734,13 +744,15 @@ def test_passwords_mismatch_error(self, pyramid_config): ) def test_password_strength(self, password, expected): form = forms.ResetPasswordForm( - data={ - "new_password": password, - "password_confirm": password, - "username": "username", - "full_name": "full_name", - "email": "email", - }, + formdata=MultiDict( + { + "new_password": password, + "password_confirm": password, + "username": "username", + "full_name": "full_name", + "email": "email", + } + ), breach_service=pretend.stub(check_password=lambda pw, tags=None: False), ) @@ -748,13 +760,15 @@ def test_password_strength(self, password, expected): def test_passwords_match_success(self): form = forms.ResetPasswordForm( - data={ - "new_password": "MyStr0ng!shPassword", - "password_confirm": "MyStr0ng!shPassword", - "username": "username", - "full_name": "full_name", - "email": "email", - }, + formdata=MultiDict( + { + "new_password": "MyStr0ng!shPassword", + "password_confirm": "MyStr0ng!shPassword", + "username": "username", + "full_name": "full_name", + "email": "email", + } + ), breach_service=pretend.stub(check_password=lambda pw, tags=None: False), ) @@ -762,13 +776,15 @@ def test_passwords_match_success(self): def test_password_breached(self): form = forms.ResetPasswordForm( - data={ - "new_password": "MyStr0ng!shPassword", - "password_confirm": "MyStr0ng!shPassword", - "username": "username", - "full_name": "full_name", - "email": "email", - }, + formdata=MultiDict( + { + "new_password": "MyStr0ng!shPassword", + "password_confirm": "MyStr0ng!shPassword", + "username": "username", + "full_name": "full_name", + "email": "email", + } + ), user_service=pretend.stub( find_userid=pretend.call_recorder(lambda _: None) ), @@ -803,8 +819,8 @@ def test_totp_secret_exists(self, pyramid_config): request = pretend.stub(remote_addr="1.2.3.4") form = forms.TOTPAuthenticationForm( + formdata=MultiDict({"totp_value": ""}), request=request, - data={"totp_value": ""}, user_id=pretend.stub(), user_service=pretend.stub(get_user=get_user), ) @@ -813,7 +829,7 @@ def test_totp_secret_exists(self, pyramid_config): form = forms.TOTPAuthenticationForm( request=request, - data={"totp_value": "not_a_real_value"}, + formdata=MultiDict({"totp_value": "not_a_real_value"}), user_id=pretend.stub(), user_service=pretend.stub( check_totp_value=lambda *a: True, get_user=get_user @@ -824,7 +840,7 @@ def test_totp_secret_exists(self, pyramid_config): form = forms.TOTPAuthenticationForm( request=request, - data={"totp_value": "1 2 3 4 5 6 7"}, + formdata=MultiDict({"totp_value": "1 2 3 4 5 6 7"}), user_id=pretend.stub(), user_service=pretend.stub( check_totp_value=lambda *a: True, get_user=get_user @@ -838,8 +854,8 @@ def test_totp_secret_exists(self, pyramid_config): get_user=get_user, ) form = forms.TOTPAuthenticationForm( + formdata=MultiDict({"totp_value": "123456"}), request=request, - data={"totp_value": "123456"}, user_id=1, user_service=user_service, ) @@ -855,8 +871,8 @@ def test_totp_secret_exists(self, pyramid_config): ] form = forms.TOTPAuthenticationForm( + formdata=MultiDict({"totp_value": "123456"}), request=request, - data={"totp_value": "123456"}, user_id=pretend.stub(), user_service=pretend.stub( check_totp_value=lambda *a: True, get_user=get_user @@ -866,7 +882,7 @@ def test_totp_secret_exists(self, pyramid_config): form = forms.TOTPAuthenticationForm( request=request, - data={"totp_value": " 1 2 3 4 5 6 "}, + formdata=MultiDict({"totp_value": " 1 2 3 4 5 6 "}), user_id=pretend.stub(), user_service=pretend.stub( check_totp_value=lambda *a: True, get_user=get_user @@ -876,7 +892,7 @@ def test_totp_secret_exists(self, pyramid_config): form = forms.TOTPAuthenticationForm( request=request, - data={"totp_value": "123 456"}, + formdata=MultiDict({"totp_value": "123 456"}), user_id=pretend.stub(), user_service=pretend.stub( check_totp_value=lambda *a: True, get_user=get_user @@ -1006,8 +1022,8 @@ def test_creation(self): def test_missing_value(self): request = pretend.stub() form = forms.RecoveryCodeAuthenticationForm( + formdata=MultiDict({"recovery_code_value": ""}), request=request, - data={"recovery_code_value": ""}, user_id=pretend.stub(), user_service=pretend.stub(), ) @@ -1038,8 +1054,8 @@ def test_invalid_recovery_code( get_user=pretend.call_recorder(lambda userid: user), ) form = forms.RecoveryCodeAuthenticationForm( + formdata=MultiDict({"recovery_code_value": "deadbeef00001111"}), request=request, - data={"recovery_code_value": "deadbeef00001111"}, user_id=1, user_service=user_service, ) @@ -1059,8 +1075,8 @@ def test_valid_recovery_code(self, monkeypatch): request = pretend.stub(remote_addr="1.2.3.4") user = pretend.stub(id=pretend.stub(), username="foobar") form = forms.RecoveryCodeAuthenticationForm( + formdata=MultiDict({"recovery_code_value": "deadbeef00001111"}), request=request, - data={"recovery_code_value": "deadbeef00001111"}, user_id=pretend.stub(), user_service=pretend.stub( check_recovery_code=pretend.call_recorder(lambda *a, **kw: True), @@ -1095,7 +1111,7 @@ def test_recovery_code_string_validation( user = pretend.stub(id=pretend.stub(), username="foobar") form = forms.RecoveryCodeAuthenticationForm( request=request, - data={"recovery_code_value": input_string}, + formdata=MultiDict({"recovery_code_value": input_string}), user_id=pretend.stub(), user_service=pretend.stub( check_recovery_code=pretend.call_recorder(lambda *a, **kw: True), diff --git a/tests/unit/admin/views/test_banners.py b/tests/unit/admin/views/test_banners.py --- a/tests/unit/admin/views/test_banners.py +++ b/tests/unit/admin/views/test_banners.py @@ -218,13 +218,13 @@ def test_preview_banner(self, db_request): class TestBannerForm: def test_required_fields(self, banner_data): - form = views.BannerForm(data={}) + form = views.BannerForm(formdata=MultiDict()) assert form.validate() is False assert set(form.errors) == set(banner_data) def test_valid_data(self, banner_data): - form = views.BannerForm(data=banner_data) + form = views.BannerForm(formdata=MultiDict(banner_data)) assert form.validate() is True data = form.data defaults = { @@ -232,4 +232,7 @@ def test_valid_data(self, banner_data): "active": False, "link_label": Banner.DEFAULT_BTN_LABEL, } + + # Mash the `end` into a date object to match the form's coerced result. + banner_data["end"] = datetime.date.fromisoformat(banner_data["end"]) assert data == {**banner_data, **defaults} diff --git a/tests/unit/admin/views/test_sponsors.py b/tests/unit/admin/views/test_sponsors.py --- a/tests/unit/admin/views/test_sponsors.py +++ b/tests/unit/admin/views/test_sponsors.py @@ -260,29 +260,29 @@ def test_required_fields(self): assert field in form.errors def test_valid_data(self): - form = views.SponsorForm(data=self.data) + form = views.SponsorForm(formdata=MultiDict(self.data)) assert form.validate() is True def test_white_logo_is_required_for_footer_display(self): self.data["footer"] = True # don't validate without logo - form = views.SponsorForm(data=self.data) + form = views.SponsorForm(formdata=MultiDict(self.data)) assert form.validate() is False assert "white_logo" in form.errors self.data["white_logo_url"] = "http://domain.com/white-logo.jpg" - form = views.SponsorForm(data=self.data) + form = views.SponsorForm(formdata=MultiDict(self.data)) assert form.validate() is True def test_white_logo_is_required_for_infra_display(self): self.data["infra_sponsor"] = True # don't validate without logo - form = views.SponsorForm(data=self.data) + form = views.SponsorForm(formdata=MultiDict(self.data)) assert form.validate() is False assert "white_logo" in form.errors self.data["white_logo_url"] = "http://domain.com/white-logo.jpg" - form = views.SponsorForm(data=self.data) + form = views.SponsorForm(formdata=MultiDict(self.data)) assert form.validate() is True diff --git a/tests/unit/manage/test_forms.py b/tests/unit/manage/test_forms.py --- a/tests/unit/manage/test_forms.py +++ b/tests/unit/manage/test_forms.py @@ -57,7 +57,7 @@ def test_validate_username_with_user(self): [ ("", "Select role"), ("invalid", "Not a valid choice."), - (None, "Not a valid choice."), + (None, "Select role"), ], ) def test_validate_role_name_fails(self, value, expected): @@ -81,7 +81,7 @@ def test_creation(self): def test_email_exists_error(self, pyramid_config): user_id = pretend.stub() form = forms.AddEmailForm( - data={"email": "[email protected]"}, + formdata=MultiDict({"email": "[email protected]"}), user_id=user_id, user_service=pretend.stub(find_userid_by_email=lambda _: user_id), ) @@ -95,7 +95,7 @@ def test_email_exists_error(self, pyramid_config): def test_email_exists_other_account_error(self, pyramid_config): form = forms.AddEmailForm( - data={"email": "[email protected]"}, + formdata=MultiDict({"email": "[email protected]"}), user_id=pretend.stub(), user_service=pretend.stub(find_userid_by_email=lambda _: pretend.stub()), ) @@ -109,7 +109,7 @@ def test_email_exists_other_account_error(self, pyramid_config): def test_prohibited_email_error(self, pyramid_config): form = forms.AddEmailForm( - data={"email": "[email protected]"}, + formdata=MultiDict({"email": "[email protected]"}), user_service=pretend.stub(find_userid_by_email=lambda _: None), user_id=pretend.stub(), ) @@ -148,7 +148,7 @@ def test_verify_totp_invalid(self, monkeypatch): monkeypatch.setattr(otp, "verify_totp", verify_totp) form = forms.ProvisionTOTPForm( - data={"totp_value": "123456"}, totp_secret=pretend.stub() + formdata=MultiDict({"totp_value": "123456"}), totp_secret=pretend.stub() ) assert not form.validate() assert form.totp_value.errors.pop() == "Invalid TOTP code. Try again?" @@ -158,7 +158,7 @@ def test_verify_totp_valid(self, monkeypatch): monkeypatch.setattr(otp, "verify_totp", verify_totp) form = forms.ProvisionTOTPForm( - data={"totp_value": "123456"}, totp_secret=pretend.stub() + formdata=MultiDict({"totp_value": "123456"}), totp_secret=pretend.stub() ) assert form.validate() @@ -182,10 +182,9 @@ def test_validate_confirm_password(self): ), ) form = forms.DeleteTOTPForm( - username="username", + formdata=MultiDict({"username": "username", "password": "password"}), request=request, user_service=user_service, - password="password", ) assert form.validate() @@ -218,7 +217,7 @@ def test_verify_assertion_invalid_json(self): ) form = forms.ProvisionWebAuthnForm( - data={"credential": "invalid json", "label": "fake label"}, + formdata=MultiDict({"credential": "invalid json", "label": "fake label"}), user_service=user_service, user_id=pretend.stub(), challenge=pretend.stub(), @@ -239,7 +238,7 @@ def test_verify_assertion_invalid(self): get_webauthn_by_label=pretend.call_recorder(lambda *a: None), ) form = forms.ProvisionWebAuthnForm( - data={"credential": "{}", "label": "fake label"}, + formdata=MultiDict({"credential": "{}", "label": "fake label"}), user_service=user_service, user_id=pretend.stub(), challenge=pretend.stub(), @@ -255,7 +254,7 @@ def test_verify_label_missing(self): verify_webauthn_credential=lambda *a, **kw: pretend.stub() ) form = forms.ProvisionWebAuthnForm( - data={"credential": "{}"}, + formdata=MultiDict({"credential": "{}"}), user_service=user_service, user_id=pretend.stub(), challenge=pretend.stub(), @@ -272,7 +271,7 @@ def test_verify_label_already_in_use(self): get_webauthn_by_label=pretend.call_recorder(lambda *a: pretend.stub()), ) form = forms.ProvisionWebAuthnForm( - data={"credential": "{}", "label": "fake label"}, + formdata=MultiDict({"credential": "{}", "label": "fake label"}), user_service=user_service, user_id=pretend.stub(), challenge=pretend.stub(), @@ -290,7 +289,7 @@ def test_creates_validated_credential(self): get_webauthn_by_label=pretend.call_recorder(lambda *a: None), ) form = forms.ProvisionWebAuthnForm( - data={"credential": "{}", "label": "fake label"}, + formdata=MultiDict({"credential": "{}", "label": "fake label"}), user_service=user_service, user_id=pretend.stub(), challenge=pretend.stub(), @@ -323,7 +322,7 @@ def test_validate_label_not_in_use(self): get_webauthn_by_label=pretend.call_recorder(lambda *a: None) ) form = forms.DeleteWebAuthnForm( - data={"label": "fake label"}, + formdata=MultiDict({"label": "fake label"}), user_service=user_service, user_id=pretend.stub(), ) @@ -337,7 +336,7 @@ def test_creates_webauthn_attribute(self): get_webauthn_by_label=pretend.call_recorder(lambda *a: fake_webauthn) ) form = forms.DeleteWebAuthnForm( - data={"label": "fake label"}, + formdata=MultiDict({"label": "fake label"}), user_service=user_service, user_id=pretend.stub(), ) @@ -363,7 +362,7 @@ def test_creation(self): def test_validate_description_missing(self): form = forms.CreateMacaroonForm( - data={"token_scope": "scope:user"}, + formdata=MultiDict({"token_scope": "scope:user"}), user_id=pretend.stub(), macaroon_service=pretend.stub(), project_names=pretend.stub(), @@ -374,7 +373,7 @@ def test_validate_description_missing(self): def test_validate_description_in_use(self): form = forms.CreateMacaroonForm( - data={"description": "dummy", "token_scope": "scope:user"}, + formdata=MultiDict({"description": "dummy", "token_scope": "scope:user"}), user_id=pretend.stub(), macaroon_service=pretend.stub( get_macaroon_by_description=lambda *a: pretend.stub() @@ -387,7 +386,7 @@ def test_validate_description_in_use(self): def test_validate_token_scope_missing(self): form = forms.CreateMacaroonForm( - data={"description": "dummy"}, + formdata=MultiDict({"description": "dummy"}), user_id=pretend.stub(), macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None), project_names=pretend.stub(), @@ -398,7 +397,9 @@ def test_validate_token_scope_missing(self): def test_validate_token_scope_unspecified(self): form = forms.CreateMacaroonForm( - data={"description": "dummy", "token_scope": "scope:unspecified"}, + formdata=MultiDict( + {"description": "dummy", "token_scope": "scope:unspecified"} + ), user_id=pretend.stub(), macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None), project_names=pretend.stub(), @@ -412,7 +413,7 @@ def test_validate_token_scope_unspecified(self): ) def test_validate_token_scope_invalid_format(self, scope): form = forms.CreateMacaroonForm( - data={"description": "dummy", "token_scope": scope}, + formdata=MultiDict({"description": "dummy", "token_scope": scope}), user_id=pretend.stub(), macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None), project_names=pretend.stub(), @@ -423,7 +424,9 @@ def test_validate_token_scope_invalid_format(self, scope): def test_validate_token_scope_invalid_project(self): form = forms.CreateMacaroonForm( - data={"description": "dummy", "token_scope": "scope:project:foo"}, + formdata=MultiDict( + {"description": "dummy", "token_scope": "scope:project:foo"} + ), user_id=pretend.stub(), macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None), project_names=["bar"], @@ -434,7 +437,7 @@ def test_validate_token_scope_invalid_project(self): def test_validate_token_scope_valid_user(self): form = forms.CreateMacaroonForm( - data={"description": "dummy", "token_scope": "scope:user"}, + formdata=MultiDict({"description": "dummy", "token_scope": "scope:user"}), user_id=pretend.stub(), macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None), project_names=pretend.stub(), @@ -444,7 +447,9 @@ def test_validate_token_scope_valid_user(self): def test_validate_token_scope_valid_project(self): form = forms.CreateMacaroonForm( - data={"description": "dummy", "token_scope": "scope:project:foo"}, + formdata=MultiDict( + {"description": "dummy", "token_scope": "scope:project:foo"} + ), user_id=pretend.stub(), macaroon_service=pretend.stub(get_macaroon_by_description=lambda *a: None), project_names=["foo"], @@ -478,7 +483,7 @@ def test_validate_macaroon_id_invalid(self): remote_addr="1.2.3.4", banned=pretend.stub(by_ip=lambda ip_address: False) ) form = forms.DeleteMacaroonForm( - data={"macaroon_id": pretend.stub(), "password": "password"}, + formdata=MultiDict({"macaroon_id": pretend.stub(), "password": "password"}), request=request, macaroon_service=macaroon_service, user_service=user_service, @@ -499,10 +504,15 @@ def test_validate_macaroon_id(self): remote_addr="1.2.3.4", banned=pretend.stub(by_ip=lambda ip_address: False) ) form = forms.DeleteMacaroonForm( - data={"macaroon_id": pretend.stub(), "password": "password"}, + formdata=MultiDict( + { + "macaroon_id": pretend.stub(), + "username": "username", + "password": "password", + } + ), request=request, macaroon_service=macaroon_service, - username="username", user_service=user_service, )
Chore: Forms: replace `DataRequired` with `InputRequired` A bunch of the Warehouse forms use `DataRequired` when they should probably be using `InputRequired`, since the latter is stricter and doesn't do default (probably unintended) input coercions. Ref: https://wtforms.readthedocs.io/en/2.3.x/validators/#wtforms.validators.DataRequired Assigning to myself for triage.
Triaging: @jleightcap will be working on this. Does https://github.com/pypi/warehouse/pull/13696 need to be taken over? @jleightcap is still working on it; I believe he was just busy today 🙂
2023-05-18T22:13:58Z
[]
[]
pypi/warehouse
13,706
pypi__warehouse-13706
[ "13705" ]
fec156fdaa6d81dd33615f926356ae39f2ee6166
diff --git a/warehouse/packaging/utils.py b/warehouse/packaging/utils.py --- a/warehouse/packaging/utils.py +++ b/warehouse/packaging/utils.py @@ -73,6 +73,9 @@ def _simple_detail(project, request): "data-dist-info-metadata": {"sha256": file.metadata_file_sha256_digest} if file.metadata_file_sha256_digest else False, + "core-metadata": {"sha256": file.metadata_file_sha256_digest} + if file.metadata_file_sha256_digest + else False, } for file in files ],
diff --git a/tests/unit/api/test_simple.py b/tests/unit/api/test_simple.py --- a/tests/unit/api/test_simple.py +++ b/tests/unit/api/test_simple.py @@ -272,6 +272,7 @@ def test_with_files_no_serial(self, db_request, content_type, renderer_override) "size": f.size, "upload-time": f.upload_time.isoformat() + "Z", "data-dist-info-metadata": False, + "core-metadata": False, } for f in files ], @@ -319,6 +320,7 @@ def test_with_files_with_serial(self, db_request, content_type, renderer_overrid "size": f.size, "upload-time": f.upload_time.isoformat() + "Z", "data-dist-info-metadata": False, + "core-metadata": False, } for f in files ], @@ -407,6 +409,9 @@ def test_with_files_with_version_multi_digit( } if f.metadata_file_sha256_digest is not None else False, + "core-metadata": {"sha256": "deadbeefdeadbeefdeadbeefdeadbeef"} + if f.metadata_file_sha256_digest is not None + else False, } for f in files ],
Wrong key name used for PEP 658 metadata files in the JSON index **Describe the bug** [PEP 691](https://peps.python.org/pep-0691/#project-detail) states that the key name for metadata files in the JSON index should be `dist-info-metadata`: > `dist-info-metadata`: An optional key that indicates that metadata for this file is available, via the same location as specified in [PEP 658](https://peps.python.org/pep-0658) (`{file_url}.metadata`). However, warehouse is providing it under the `data-dist-info-metadata` key instead: ``` $ curl -H 'Accept: application/vnd.pypi.simple.v1+json' https://pypi.org/simple/fluffy-server/ | jq .files [...] { "data-dist-info-metadata": { "sha256": "4db99543165cbdeef42ccb6257545911ccd7865d65e304e3e056f383a25f309c" }, "filename": "fluffy_server-1.39.2-py3-none-any.whl", [...] ``` This is causing pip to not use the metadata files as it is looking for the `dist-info-metadata` key only: https://github.com/pypa/pip/blob/f25f8fffbbd16fdb13a4f8977946afe9a3248453/src/pip/_internal/models/link.py#L265 **Additional context** There are two bugs discovered recently in pip which may make this tricky to fix: * https://github.com/pypa/pip/issues/12042 * https://github.com/pypa/pip/issues/12038 I believe if we simply fix the key name in pypi.org, it will break existing pip versions as it will cause users to encounter these bugs. It may be necessary to coordinate this fix with fixes to the above bugs in pip to avoid disruption?
Are there versions of pip currently using the incorrect name? This was just shipped last week: https://github.com/pypi/warehouse/pull/13649 I don't think any versions of pip are using the incorrect name. https://github.com/pypa/pip/commit/bad03ef931d9b3ff4f9e75f35f9c41f45839e2a1#diff-3919ca53335487395177edeffff8b60bd360f1ad18c95593287f6814b9ecefb8R249 is the commit which first introduced the support in pip, and it is using `dist-info-metadata` already. (It does reference `data-dist-info-metadata` but that is for the PEP 658 HTML-based index and not the JSON index.) In my testing I couldn't get pip to use the metadata files at all with pypi.org when using the JSON index. My guess is that the current key is not being widely used at the moment since if it was, other people would probably have encountered the pip bugs I referenced above before I did. Ah, I understand. For what its worth, pip is not the only consumer of PEP 658 data via the JSON api. I suppose we _will_ need to coordinate with the pip team though. @pfmoore do you have thoughts on how you'd anticipate such coordination happening? See the discussion in https://github.com/pypa/pip/issues/12042 Specifically, as `dist-info-metadata` is optional, I think it should be omitted unless there's an actual file (rather than including it with a value `false`). And the backfill to existing wheels shouldn't be started until we have a fixed pip released, and people have a chance to upgrade.
2023-05-21T12:22:08Z
[]
[]
pypi/warehouse
13,707
pypi__warehouse-13707
[ "12629" ]
ebe1da7043398ae236357c4b786ec870f9d8b2c9
diff --git a/warehouse/email/__init__.py b/warehouse/email/__init__.py --- a/warehouse/email/__init__.py +++ b/warehouse/email/__init__.py @@ -320,6 +320,11 @@ def send_basic_auth_with_two_factor_email(request, user, *, project_name): return {"project_name": project_name} +@_email("gpg-signature-uploaded", repeat_window=datetime.timedelta(days=1)) +def send_gpg_signature_uploaded_email(request, user, *, project_name): + return {"project_name": project_name} + + @_email("account-deleted") def send_account_deletion_email(request, user): return {"username": user.username} diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py --- a/warehouse/forklift/legacy.py +++ b/warehouse/forklift/legacy.py @@ -48,7 +48,10 @@ from warehouse import forms from warehouse.admin.flags import AdminFlagValue from warehouse.classifiers.models import Classifier -from warehouse.email import send_basic_auth_with_two_factor_email +from warehouse.email import ( + send_basic_auth_with_two_factor_email, + send_gpg_signature_uploaded_email, +) from warehouse.events.tags import EventTag from warehouse.metrics import IMetricsService from warehouse.packaging.interfaces import IFileStorage, IProjectService @@ -812,6 +815,9 @@ def _extract_wheel_metadata(path): has_translations=True, ) def file_upload(request): + # This is a list of warnings that we'll emit *IF* the request is successful. + warnings = [] + # If we're in read-only mode, let upload clients know if request.flags.enabled(AdminFlagValue.READ_ONLY): raise _exc_with_message( @@ -885,11 +891,10 @@ def file_upload(request): raise _exc_with_message(HTTPBadRequest, "Unknown protocol version.") # Check if any fields were supplied as a tuple and have become a - # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a - # FieldStorage, however. + # FieldStorage. The 'content' field _should_ be a FieldStorage, however. # ref: https://github.com/pypi/warehouse/issues/2185 # ref: https://github.com/pypi/warehouse/issues/2491 - for field in set(request.POST) - {"content", "gpg_signature"}: + for field in set(request.POST) - {"content"}: values = request.POST.getall(field) if any(isinstance(value, FieldStorage) for value in values): raise _exc_with_message(HTTPBadRequest, f"{field}: Should not be a tuple.") @@ -1139,6 +1144,15 @@ def file_upload(request): ) request.db.add(release) + if "gpg_signature" in request.POST: + warnings.append( + "GPG signature support has been removed from PyPI and the " + "provided signature has been discarded." + ) + send_gpg_signature_uploaded_email( + request, request.user, project_name=project.name + ) + # TODO: This should be handled by some sort of database trigger or # a SQLAlchemy hook or the like instead of doing it inline in # this view. @@ -1357,28 +1371,6 @@ def file_upload(request): k: h.hexdigest().lower() for k, h in metadata_file_hashes.items() } - # Also buffer the entire signature file to disk. - if "gpg_signature" in request.POST: - has_signature = True - with open(os.path.join(tmpdir, filename + ".asc"), "wb") as fp: - signature_size = 0 - for chunk in iter( - lambda: request.POST["gpg_signature"].file.read(8096), b"" - ): - signature_size += len(chunk) - if signature_size > MAX_SIGSIZE: - raise _exc_with_message(HTTPBadRequest, "Signature too large.") - fp.write(chunk) - - # Check whether signature is ASCII armored - with open(os.path.join(tmpdir, filename + ".asc"), "rb") as fp: - if not fp.read().startswith(b"-----BEGIN PGP SIGNATURE-----"): - raise _exc_with_message( - HTTPBadRequest, "PGP signature isn't ASCII armored." - ) - else: - has_signature = False - # TODO: This should be handled by some sort of database trigger or a # SQLAlchemy hook or the like instead of doing it inline in this # view. @@ -1392,7 +1384,6 @@ def file_upload(request): packagetype=form.filetype.data, comment_text=form.comment.data, size=file_size, - has_signature=bool(has_signature), md5_digest=file_hashes["md5"], sha256_digest=file_hashes["sha256"], blake2_256_digest=file_hashes["blake2_256"], @@ -1461,17 +1452,7 @@ def file_upload(request): "python-version": file_.python_version, }, ) - if has_signature: - storage.store( - file_.pgp_path, - os.path.join(tmpdir, filename + ".asc"), - meta={ - "project": file_.release.project.normalized_name, - "version": file_.release.version, - "package-type": file_.packagetype, - "python-version": file_.python_version, - }, - ) + if metadata_file_hashes: storage.store( file_.metadata_path, @@ -1521,7 +1502,7 @@ def file_upload(request): "packagetype": file_data.packagetype, "comment_text": file_data.comment_text, "size": file_data.size, - "has_signature": file_data.has_signature, + "has_signature": False, "md5_digest": file_data.md5_digest, "sha256_digest": file_data.sha256_digest, "blake2_256_digest": file_data.blake2_256_digest, @@ -1538,7 +1519,8 @@ def file_upload(request): # Dispatch our task to sync this to cache as soon as possible request.task(sync_file_to_cache).delay(file_.id) - return Response() + # Return any warnings that we've accumulated as the response body. + return Response("\n".join(warnings)) def _legacy_purge(status, *args, **kwargs): diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py --- a/warehouse/legacy/api/json.py +++ b/warehouse/legacy/api/json.py @@ -81,7 +81,9 @@ def _json_data(request, project, release, *, all_releases): "filename": f.filename, "packagetype": f.packagetype, "python_version": f.python_version, - "has_sig": f.has_signature, + # TODO: Remove this once we've had a long enough time with it + # here to consider it no longer in use. + "has_sig": False, "comment_text": f.comment_text, "md5_digest": f.md5_digest, "digests": { diff --git a/warehouse/legacy/api/xmlrpc/views.py b/warehouse/legacy/api/xmlrpc/views.py --- a/warehouse/legacy/api/xmlrpc/views.py +++ b/warehouse/legacy/api/xmlrpc/views.py @@ -402,7 +402,9 @@ def release_urls(request, package_name: str, version: str): "md5_digest": f.md5_digest, "sha256_digest": f.sha256_digest, "digests": {"md5": f.md5_digest, "sha256": f.sha256_digest}, - "has_sig": f.has_signature, + # TODO: Remove this once we've had a long enough time with it + # here to consider it no longer in use. + "has_sig": False, "upload_time": f.upload_time.isoformat() + "Z", "upload_time_iso_8601": f.upload_time.isoformat() + "Z", "comment_text": f.comment_text, diff --git a/warehouse/packaging/models.py b/warehouse/packaging/models.py --- a/warehouse/packaging/models.py +++ b/warehouse/packaging/models.py @@ -663,7 +663,6 @@ def __table_args__(cls): # noqa filename = Column(Text, unique=True) path = Column(Text, unique=True, nullable=False) size = Column(Integer) - has_signature = Column(Boolean) md5_digest = Column(Text, unique=True, nullable=False) sha256_digest = Column(CIText, unique=True, nullable=False) blake2_256_digest = Column(CIText, unique=True, nullable=False) @@ -692,14 +691,6 @@ def __table_args__(cls): # noqa server_default=sql.false(), ) - @hybrid_property - def pgp_path(self): - return self.path + ".asc" - - @pgp_path.expression # type: ignore - def pgp_path(self): - return func.concat(self.path, ".asc") - @hybrid_property def metadata_path(self): return self.path + ".metadata" diff --git a/warehouse/packaging/tasks.py b/warehouse/packaging/tasks.py --- a/warehouse/packaging/tasks.py +++ b/warehouse/packaging/tasks.py @@ -52,8 +52,6 @@ def sync_file_to_cache(request, file_id): _copy_file_to_cache(archive_storage, cache_storage, file.path) if file.metadata_file_sha256_digest is not None: _copy_file_to_cache(archive_storage, cache_storage, file.metadata_path) - if file.has_signature: - _copy_file_to_cache(archive_storage, cache_storage, file.pgp_path) file.cached = True @@ -70,7 +68,7 @@ def check_file_cache_tasks_outstanding(request): ) -Checksums = namedtuple("Checksums", ["file", "metadata_file", "pgp_file"]) +Checksums = namedtuple("Checksums", ["file", "metadata_file"]) def fetch_checksums(storage, file): @@ -84,12 +82,7 @@ def fetch_checksums(storage, file): except FileNotFoundError: file_metadata_checksum = None - try: - file_signature_checksum = storage.get_checksum(file.pgp_path) - except FileNotFoundError: - file_signature_checksum = None - - return Checksums(file_checksum, file_metadata_checksum, file_signature_checksum) + return Checksums(file_checksum, file_metadata_checksum) @tasks.task(ignore_results=True, acks_late=True) @@ -109,12 +102,11 @@ def reconcile_file_storages(request): archive_checksums = fetch_checksums(archive_storage, file) cache_checksums = fetch_checksums(cache_storage, file) - # Note: We don't store md5 digest for METADATA file or pgp signature - # in our database, record boolean for if we should expect values. + # Note: We don't store md5 digest for METADATA file in our database, + # record boolean for if we should expect values. expected_checksums = Checksums( file.md5_digest, bool(file.metadata_file_sha256_digest), - bool(file.has_signature), ) if ( @@ -124,7 +116,6 @@ def reconcile_file_storages(request): bool(archive_checksums.metadata_file) == expected_checksums.metadata_file ) - and (bool(archive_checksums.pgp_file) == expected_checksums.pgp_file) ): logger.info(f" File<{file.id}> ({file.path}) is all good ✨") file.cached = True @@ -186,32 +177,6 @@ def reconcile_file_storages(request): ) errors.append(file.metadata_path) - if expected_checksums.pgp_file and ( - archive_checksums.pgp_file is not None - and cache_checksums.pgp_file is None - ): - # The only file we have is in archive, so use that for cache - _copy_file_to_cache(archive_storage, cache_storage, file.pgp_path) - logger.info( - f" File<{file.id}> pgp signature ({file.pgp_path}) " - "pulled from archive ⬆️" - ) - metrics.increment("warehouse.filestorage.reconciled", tags=["type:pgp"]) - elif expected_checksums.pgp_file: - if archive_checksums.pgp_file == cache_checksums.pgp_file: - logger.info( - f" File<{file.id}> pgp signature ({file.pgp_path}) is ok ✅" - ) - else: - metrics.increment( - "warehouse.filestorage.unreconciled", tags=["type:pgp"] - ) - logger.error( - f"Unable to reconcile stored File<{file.id}> pgp signature " - f"({file.pgp_path}) ❌" - ) - errors.append(file.pgp_path) - if len(errors) == 0: file.cached = True @@ -516,6 +481,7 @@ def populate_data_using_schema(file): row_data[sch.name] = list(field_data) else: row_data[sch.name] = field_data + row_data["has_signature"] = False return row_data for first, second in product("fedcba9876543210", repeat=2):
diff --git a/tests/unit/email/test_init.py b/tests/unit/email/test_init.py --- a/tests/unit/email/test_init.py +++ b/tests/unit/email/test_init.py @@ -1405,6 +1405,78 @@ def test_basic_auth_with_2fa_email( ] +class TestGPGSignatureUploadedEmail: + def test_gpg_signature_uploaded_email( + self, pyramid_request, pyramid_config, monkeypatch + ): + stub_user = pretend.stub( + id="id", + username="username", + name="", + email="[email protected]", + primary_email=pretend.stub(email="[email protected]", verified=True), + ) + subject_renderer = pyramid_config.testing_add_renderer( + "email/gpg-signature-uploaded/subject.txt" + ) + subject_renderer.string_response = "Email Subject" + body_renderer = pyramid_config.testing_add_renderer( + "email/gpg-signature-uploaded/body.txt" + ) + body_renderer.string_response = "Email Body" + html_renderer = pyramid_config.testing_add_renderer( + "email/gpg-signature-uploaded/body.html" + ) + html_renderer.string_response = "Email HTML Body" + + send_email = pretend.stub( + delay=pretend.call_recorder(lambda *args, **kwargs: None) + ) + pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email) + monkeypatch.setattr(email, "send_email", send_email) + + pyramid_request.db = pretend.stub( + query=lambda a: pretend.stub( + filter=lambda *a: pretend.stub( + one=lambda: pretend.stub(user_id=stub_user.id) + ) + ), + ) + pyramid_request.user = stub_user + pyramid_request.registry.settings = {"mail.sender": "[email protected]"} + project_name = "exampleproject" + result = email.send_gpg_signature_uploaded_email( + pyramid_request, stub_user, project_name=project_name + ) + + assert result == {"project_name": project_name} + assert pyramid_request.task.calls == [pretend.call(send_email)] + assert send_email.delay.calls == [ + pretend.call( + f"{stub_user.username} <{stub_user.email}>", + { + "subject": "Email Subject", + "body_text": "Email Body", + "body_html": ( + "<html>\n<head></head>\n" + "<body><p>Email HTML Body</p></body>\n</html>\n" + ), + }, + { + "tag": "account:email:sent", + "user_id": stub_user.id, + "additional": { + "from_": "[email protected]", + "to": stub_user.email, + "subject": "Email Subject", + "redact_ip": False, + }, + "ip_address": "1.2.3.4", + }, + ) + ] + + class TestAccountDeletionEmail: def test_account_deletion_email(self, pyramid_request, pyramid_config, monkeypatch): stub_user = pretend.stub( diff --git a/tests/unit/forklift/test_legacy.py b/tests/unit/forklift/test_legacy.py --- a/tests/unit/forklift/test_legacy.py +++ b/tests/unit/forklift/test_legacy.py @@ -1346,19 +1346,23 @@ def test_upload_escapes_nul_characters(self, pyramid_config, db_request): assert "\x00" not in db_request.POST["summary"] @pytest.mark.parametrize( - ("has_signature", "digests"), + ("digests",), [ - (True, {"md5_digest": _TAR_GZ_PKG_MD5}), - (True, {"sha256_digest": _TAR_GZ_PKG_SHA256}), - (False, {"md5_digest": _TAR_GZ_PKG_MD5}), - (False, {"sha256_digest": _TAR_GZ_PKG_SHA256}), + ({"md5_digest": _TAR_GZ_PKG_MD5},), + ({"sha256_digest": _TAR_GZ_PKG_SHA256},), + ({"md5_digest": _TAR_GZ_PKG_MD5},), + ({"sha256_digest": _TAR_GZ_PKG_SHA256},), ( - True, - {"md5_digest": _TAR_GZ_PKG_MD5, "sha256_digest": _TAR_GZ_PKG_SHA256}, + { + "md5_digest": _TAR_GZ_PKG_MD5, + "sha256_digest": _TAR_GZ_PKG_SHA256, + }, ), ( - False, - {"md5_digest": _TAR_GZ_PKG_MD5, "sha256_digest": _TAR_GZ_PKG_SHA256}, + { + "md5_digest": _TAR_GZ_PKG_MD5, + "sha256_digest": _TAR_GZ_PKG_SHA256, + }, ), ], ) @@ -1368,7 +1372,6 @@ def test_successful_upload( monkeypatch, pyramid_config, db_request, - has_signature, digests, metrics, ): @@ -1407,24 +1410,9 @@ def test_successful_upload( db_request.POST.extend([("classifiers", "Environment :: Other Environment")]) db_request.POST.update(digests) - if has_signature: - gpg_signature = FieldStorage() - gpg_signature.filename = filename + ".asc" - gpg_signature.file = io.BytesIO( - b"-----BEGIN PGP SIGNATURE-----\n" b" This is a Fake Signature" - ) - db_request.POST["gpg_signature"] = gpg_signature - assert isinstance(db_request.POST["gpg_signature"], FieldStorage) - @pretend.call_recorder def storage_service_store(path, file_path, *, meta): - if file_path.endswith(".asc"): - expected = ( - b"-----BEGIN PGP SIGNATURE-----\n" b" This is a Fake Signature" - ) - else: - expected = _TAR_GZ_PKG_TESTDATA - + expected = _TAR_GZ_PKG_TESTDATA with open(file_path, "rb") as fp: assert fp.read() == expected @@ -1446,7 +1434,7 @@ def storage_service_store(path, file_path, *, meta): pretend.call(IMetricsService, context=None), pretend.call(IFileStorage, name="archive"), ] - assert len(storage_service.store.calls) == 2 if has_signature else 1 + assert len(storage_service.store.calls) == 1 assert storage_service.store.calls[0] == pretend.call( "/".join( [ @@ -1465,25 +1453,6 @@ def storage_service_store(path, file_path, *, meta): }, ) - if has_signature: - assert storage_service.store.calls[1] == pretend.call( - "/".join( - [ - _TAR_GZ_PKG_STORAGE_HASH[:2], - _TAR_GZ_PKG_STORAGE_HASH[2:4], - _TAR_GZ_PKG_STORAGE_HASH[4:], - filename + ".asc", - ] - ), - mock.ANY, - meta={ - "project": project.normalized_name, - "version": release.version, - "package-type": "sdist", - "python-version": "source", - }, - ) - # Ensure that a File object has been created. uploaded_file = ( db_request.db.query(File) @@ -1684,44 +1653,6 @@ def test_upload_fails_for_second_sdist(self, pyramid_config, db_request): assert resp.status_code == 400 assert resp.status == "400 Only one sdist may be uploaded per release." - @pytest.mark.parametrize("sig", [b"lol nope"]) - def test_upload_fails_with_invalid_signature(self, pyramid_config, db_request, sig): - user = UserFactory.create() - pyramid_config.testing_securitypolicy(identity=user) - db_request.user = user - EmailFactory.create(user=user) - project = ProjectFactory.create() - release = ReleaseFactory.create(project=project, version="1.0") - RoleFactory.create(user=user, project=project) - - filename = f"{project.name}-{release.version}.tar.gz" - - db_request.POST = MultiDict( - { - "metadata_version": "1.2", - "name": project.name, - "version": release.version, - "filetype": "sdist", - "md5_digest": _TAR_GZ_PKG_MD5, - "content": pretend.stub( - filename=filename, - file=io.BytesIO(_TAR_GZ_PKG_TESTDATA), - type="application/tar", - ), - "gpg_signature": pretend.stub( - filename=filename + ".asc", file=io.BytesIO(sig) - ), - } - ) - - with pytest.raises(HTTPBadRequest) as excinfo: - legacy.file_upload(db_request) - - resp = excinfo.value - - assert resp.status_code == 400 - assert resp.status == "400 PGP signature isn't ASCII armored." - def test_upload_fails_with_invalid_classifier(self, pyramid_config, db_request): user = UserFactory.create() pyramid_config.testing_securitypolicy(identity=user) @@ -2209,44 +2140,6 @@ def test_upload_succeeds_custom_project_size_limit( ), ] - def test_upload_fails_with_too_large_signature(self, pyramid_config, db_request): - user = UserFactory.create() - pyramid_config.testing_securitypolicy(identity=user) - db_request.user = user - EmailFactory.create(user=user) - project = ProjectFactory.create() - release = ReleaseFactory.create(project=project, version="1.0") - RoleFactory.create(user=user, project=project) - - filename = f"{project.name}-{release.version}.tar.gz" - - db_request.POST = MultiDict( - { - "metadata_version": "1.2", - "name": project.name, - "version": release.version, - "filetype": "sdist", - "md5_digest": _TAR_GZ_PKG_MD5, - "content": pretend.stub( - filename=filename, - file=io.BytesIO(_TAR_GZ_PKG_TESTDATA), - type="application/tar", - ), - "gpg_signature": pretend.stub( - filename=filename + ".asc", - file=io.BytesIO(b"a" * (legacy.MAX_FILESIZE + 1)), - ), - } - ) - - with pytest.raises(HTTPBadRequest) as excinfo: - legacy.file_upload(db_request) - - resp = excinfo.value - - assert resp.status_code == 400 - assert resp.status == "400 Signature too large." - def test_upload_fails_with_previously_used_filename( self, pyramid_config, db_request ): @@ -3593,6 +3486,55 @@ def test_upload_succeeds_creates_project( ), ] + def test_upload_succeeds_with_signature( + self, pyramid_config, db_request, metrics, project_service, monkeypatch + ): + user = UserFactory.create() + EmailFactory.create(user=user) + + filename = "{}-{}.tar.gz".format("example", "1.0") + + pyramid_config.testing_securitypolicy(identity=user) + db_request.user = user + db_request.POST = MultiDict( + { + "metadata_version": "1.2", + "name": "example", + "version": "1.0", + "filetype": "sdist", + "md5_digest": _TAR_GZ_PKG_MD5, + "content": pretend.stub( + filename=filename, + file=io.BytesIO(_TAR_GZ_PKG_TESTDATA), + type="application/tar", + ), + "gpg_signature": "...", + } + ) + + storage_service = pretend.stub(store=lambda path, filepath, meta: None) + db_request.find_service = lambda svc, name=None, context=None: { + IFileStorage: storage_service, + IMetricsService: metrics, + IProjectService: project_service, + }.get(svc) + db_request.user_agent = "warehouse-tests/6.6.6" + + send_email = pretend.call_recorder(lambda *a, **kw: None) + monkeypatch.setattr(legacy, "send_gpg_signature_uploaded_email", send_email) + + resp = legacy.file_upload(db_request) + + assert resp.status_code == 200 + assert resp.body == ( + b"GPG signature support has been removed from PyPI and the provided " + b"signature has been discarded." + ) + + assert send_email.calls == [ + pretend.call(db_request, user, project_name="example"), + ] + @pytest.mark.parametrize( ("emails_verified", "expected_success"), [ diff --git a/tests/unit/legacy/api/test_json.py b/tests/unit/legacy/api/test_json.py --- a/tests/unit/legacy/api/test_json.py +++ b/tests/unit/legacy/api/test_json.py @@ -195,7 +195,6 @@ def test_renders(self, pyramid_config, db_request, db_session): filename=f"{project.name}-{r.version}.tar.gz", python_version="source", size=200, - has_signature=True, ) for r in releases[1:] ] @@ -258,7 +257,7 @@ def test_renders(self, pyramid_config, db_request, db_session): "comment_text": None, "downloads": -1, "filename": files[0].filename, - "has_sig": True, + "has_sig": False, "md5_digest": files[0].md5_digest, "digests": { "md5": files[0].md5_digest, @@ -283,7 +282,7 @@ def test_renders(self, pyramid_config, db_request, db_session): "comment_text": None, "downloads": -1, "filename": files[1].filename, - "has_sig": True, + "has_sig": False, "md5_digest": files[1].md5_digest, "digests": { "md5": files[1].md5_digest, @@ -308,7 +307,7 @@ def test_renders(self, pyramid_config, db_request, db_session): "comment_text": None, "downloads": -1, "filename": files[2].filename, - "has_sig": True, + "has_sig": False, "md5_digest": files[2].md5_digest, "digests": { "blake2b_256": files[2].blake2_256_digest, @@ -334,7 +333,7 @@ def test_renders(self, pyramid_config, db_request, db_session): "comment_text": None, "downloads": -1, "filename": files[2].filename, - "has_sig": True, + "has_sig": False, "md5_digest": files[2].md5_digest, "digests": { "md5": files[2].md5_digest, @@ -503,7 +502,6 @@ def test_detail_renders(self, pyramid_config, db_request, db_session): filename=f"{project.name}-{r.version}.tar.gz", python_version="source", size=200, - has_signature=True, ) for r in releases[1:] ] @@ -565,7 +563,7 @@ def test_detail_renders(self, pyramid_config, db_request, db_session): "comment_text": None, "downloads": -1, "filename": files[-1].filename, - "has_sig": True, + "has_sig": False, "md5_digest": files[-1].md5_digest, "digests": { "md5": files[-1].md5_digest, @@ -595,7 +593,6 @@ def test_minimal_renders(self, pyramid_config, db_request): filename=f"{project.name}-{release.version}.tar.gz", python_version="source", size=200, - has_signature=True, ) user = UserFactory.create() @@ -656,7 +653,7 @@ def test_minimal_renders(self, pyramid_config, db_request): "comment_text": None, "downloads": -1, "filename": file.filename, - "has_sig": True, + "has_sig": False, "md5_digest": file.md5_digest, "digests": { "md5": file.md5_digest, diff --git a/tests/unit/legacy/api/xmlrpc/test_xmlrpc.py b/tests/unit/legacy/api/xmlrpc/test_xmlrpc.py --- a/tests/unit/legacy/api/xmlrpc/test_xmlrpc.py +++ b/tests/unit/legacy/api/xmlrpc/test_xmlrpc.py @@ -343,7 +343,7 @@ def test_release_urls(db_request): "md5_digest": file_.md5_digest, "sha256_digest": file_.sha256_digest, "digests": {"md5": file_.md5_digest, "sha256": file_.sha256_digest}, - "has_sig": file_.has_signature, + "has_sig": False, "upload_time": file_.upload_time.isoformat() + "Z", "upload_time_iso_8601": file_.upload_time.isoformat() + "Z", "comment_text": file_.comment_text, diff --git a/tests/unit/packaging/test_models.py b/tests/unit/packaging/test_models.py --- a/tests/unit/packaging/test_models.py +++ b/tests/unit/packaging/test_models.py @@ -550,7 +550,6 @@ def test_compute_paths(self, db_session): ) assert rfile.path == expected - assert rfile.pgp_path == expected + ".asc" assert rfile.metadata_path == expected + ".metadata" def test_query_paths(self, db_session): @@ -572,10 +571,10 @@ def test_query_paths(self, db_session): ) results = ( - db_session.query(File.path, File.pgp_path, File.metadata_path) + db_session.query(File.path, File.metadata_path) .filter(File.id == rfile.id) .limit(1) .one() ) - assert results == (expected, expected + ".asc", expected + ".metadata") + assert results == (expected, expected + ".metadata") diff --git a/tests/unit/packaging/test_tasks.py b/tests/unit/packaging/test_tasks.py --- a/tests/unit/packaging/test_tasks.py +++ b/tests/unit/packaging/test_tasks.py @@ -94,7 +94,6 @@ def mock_named_temporary_file(): def test_sync_file_to_cache_includes_bonus_files(db_request, monkeypatch, cached): file = FileFactory( cached=cached, - has_signature=True, metadata_file_sha256_digest="deadbeefdeadbeefdeadbeefdeadbeef", ) archive_stub = pretend.stub( @@ -128,17 +127,14 @@ def mock_named_temporary_file(): assert archive_stub.get_metadata.calls == [ pretend.call(file.path), pretend.call(file.metadata_path), - pretend.call(file.pgp_path), ] assert archive_stub.get.calls == [ pretend.call(file.path), pretend.call(file.metadata_path), - pretend.call(file.pgp_path), ] assert cache_stub.store.calls == [ pretend.call(file.path, "/tmp/wutang", meta={"fizz": "buzz"}), pretend.call(file.metadata_path, "/tmp/wutang", meta={"fizz": "buzz"}), - pretend.call(file.pgp_path, "/tmp/wutang", meta={"fizz": "buzz"}), ] else: assert archive_stub.get_metadata.calls == [] @@ -161,7 +157,6 @@ def test_fetch_checksums(): file_stub = pretend.stub( path="/path", metadata_path="/path.metadata", - pgp_path="/path.asc", ) storage_stub = pretend.stub( get_checksum=lambda pth: f"{pth}-deadbeef", @@ -170,7 +165,6 @@ def test_fetch_checksums(): assert warehouse.packaging.tasks.fetch_checksums(storage_stub, file_stub) == ( "/path-deadbeef", "/path.metadata-deadbeef", - "/path.asc-deadbeef", ) @@ -178,14 +172,12 @@ def test_fetch_checksums_none(): file_stub = pretend.stub( path="/path", metadata_path="/path.metadata", - pgp_path="/path.asc", ) storage_stub = pretend.stub(get_checksum=pretend.raiser(FileNotFoundError)) assert warehouse.packaging.tasks.fetch_checksums(storage_stub, file_stub) == ( None, None, - None, ) @@ -195,7 +187,6 @@ def test_reconcile_file_storages_all_good(db_request, metrics): all_good = FileFactory.create(release=release, cached=False) all_good.md5_digest = f"{all_good.path}-deadbeef" all_good.metadata_file_sha256_digest = f"{all_good.path}-feedbeef" - all_good.has_signature = True storage_service = pretend.stub(get_checksum=lambda pth: f"{pth}-deadbeef") db_request.find_service = pretend.call_recorder( @@ -221,7 +212,6 @@ def test_reconcile_file_storages_fixable(db_request, monkeypatch, metrics): fixable = FileFactory.create(release=release, cached=False) fixable.md5_digest = f"{fixable.path}-deadbeef" fixable.metadata_file_sha256_digest = f"{fixable.path}-feedbeef" - fixable.has_signature = True storage_service = pretend.stub(get_checksum=lambda pth: f"{pth}-deadbeef") broke_storage_service = pretend.stub(get_checksum=lambda pth: None) @@ -244,12 +234,10 @@ def test_reconcile_file_storages_fixable(db_request, monkeypatch, metrics): assert metrics.increment.calls == [ pretend.call("warehouse.filestorage.reconciled", tags=["type:dist"]), pretend.call("warehouse.filestorage.reconciled", tags=["type:metadata"]), - pretend.call("warehouse.filestorage.reconciled", tags=["type:pgp"]), ] assert copy_file.calls == [ pretend.call(storage_service, broke_storage_service, fixable.path), pretend.call(storage_service, broke_storage_service, fixable.metadata_path), - pretend.call(storage_service, broke_storage_service, fixable.pgp_path), ] assert fixable.cached is True @@ -268,10 +256,6 @@ def test_reconcile_file_storages_fixable(db_request, monkeypatch, metrics): ".metadata", "type:metadata", ), - ( - ".asc", - "type:pgp", - ), ], ) def test_reconcile_file_storages_borked( @@ -282,7 +266,6 @@ def test_reconcile_file_storages_borked( borked = FileFactory.create(release=release, cached=False) borked.md5_digest = f"{borked.path}-deadbeef" borked.metadata_file_sha256_digest = f"{borked.path}-feedbeef" - borked.has_signature = True storage_service = pretend.stub(get_checksum=lambda pth: f"{pth}-deadbeef") bad_storage_service = pretend.stub( @@ -323,10 +306,6 @@ def test_reconcile_file_storages_borked( ".metadata", "type:metadata", ), - ( - ".asc", - "type:pgp", - ), ], ) def test_not_all_files(db_request, monkeypatch, metrics, borked_ext, metrics_tag): @@ -570,7 +549,7 @@ def find_service(name=None): "packagetype": release_file.packagetype, "comment_text": release_file.comment_text, "size": release_file.size, - "has_signature": release_file.has_signature, + "has_signature": False, "md5_digest": release_file.md5_digest, "sha256_digest": release_file.sha256_digest, "blake2_256_digest": release_file.blake2_256_digest, @@ -628,7 +607,7 @@ def find_service(name=None): "python_version": release_file.python_version, "packagetype": release_file.packagetype, "comment_text": release_file.comment_text or None, - "has_signature": release_file.has_signature, + "has_signature": False, "md5_digest": release_file.md5_digest, "sha256_digest": release_file.sha256_digest, "blake2_256_digest": release_file.blake2_256_digest, @@ -774,7 +753,7 @@ def find_service(name=None): "python_version": release_file.python_version, "packagetype": release_file.packagetype, "comment_text": release_file.comment_text or None, - "has_signature": release_file.has_signature, + "has_signature": False, "md5_digest": release_file.md5_digest, "sha256_digest": release_file.sha256_digest, "blake2_256_digest": release_file.blake2_256_digest,
Upload API should accept signature files as separate requests **What's the problem this feature will solve?** Currently, signatures are required to be uploaded as part of the `POST` request for a given distribution file, rather than as separate files: https://github.com/pypi/warehouse/blob/3ceafd72904db0da0b980768876ebf680058fee6/warehouse/forklift/legacy.py#L1314-L1325 This leads to confusion when users are attempting to upload distribution files and signatures as separate invocations, such as https://github.com/pypa/twine/issues/931. **Describe the solution you'd like** The upload endpoint should accept signature files as separate requests, rather than as part of a `POST` request for a distribution file that it corresponds to. **Additional context** An 'upload 2.0' API is described in https://peps.python.org/pep-0694/, but has the same issue.
2023-05-21T14:54:16Z
[]
[]
autogluon/autogluon
234
autogluon__autogluon-234
[ "233" ]
9d07958b43d3ffad2ff8ddee10a5d5228ca09099
diff --git a/autogluon/task/image_classification/classifier.py b/autogluon/task/image_classification/classifier.py --- a/autogluon/task/image_classification/classifier.py +++ b/autogluon/task/image_classification/classifier.py @@ -89,19 +89,21 @@ def save(self, checkpoint): state_dict = self.state_dict() save(state_dict, checkpoint) - def predict(self, X, input_size=224, plot=True): + def predict(self, X, input_size=224, crop_ratio=0.875, set_prob_thresh=0.001, plot=False): """Predict class-index and associated class probability for each image in a given dataset (or just a single image). Parameters ---------- - X : str or :class:`autogluon.task.ImageClassification.Dataset` + X : str or :class:`autogluon.task.ImageClassification.Dataset` or list of `autogluon.task.ImageClassification.Dataset` If str, should be path to the input image (when we just want to predict on single image). - Otherwise should be dataset of multiple images in same format as training dataset. + If class:`autogluon.task.ImageClassification.Dataset`, should be dataset of multiple images in same format as training dataset. + If list of `autogluon.task.ImageClassification.Dataset`, should be a set of test dataset with different scales of origin images. input_size : int Size of the images (pixels). plot : bool Whether to plot the image being classified. - + set_prob_thresh: float + Results with probability below threshold are set to 0 by default. Examples -------- >>> from autogluon import ImageClassification as task @@ -112,37 +114,90 @@ def predict(self, X, input_size=224, plot=True): >>> test_data = task.Dataset('~/data/test', train=False) >>> class_index, class_probability = classifier.predict('example.jpg') """ - # model inference + input_size = self.model.input_size if hasattr(self.model, 'input_size') else input_size - resize = int(math.ceil(input_size / 0.875)) - transform_fn = Compose([ - Resize(resize), - CenterCrop(input_size), - ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) - ]) - def predict_img(img): + resize = int(math.ceil(input_size / crop_ratio)) + + transform_size = transforms.Compose([ + transforms.Resize(resize), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]) + + def predict_img(img, ensemble=False): proba = self.predict_proba(img) - ind = mx.nd.argmax(proba, axis=1).astype('int') - idx = mx.nd.stack(mx.nd.arange(proba.shape[0], ctx=proba.context), ind.astype('float32')) - probai = mx.nd.gather_nd(proba, idx) - return ind, probai, proba + if ensemble: + return proba + else: + ind = mx.nd.argmax(proba, axis=1).astype('int') + idx = mx.nd.stack(mx.nd.arange(proba.shape[0], ctx=proba.context), ind.astype('float32')) + probai = mx.nd.gather_nd(proba, idx) + return ind, probai, proba + + def avg_prediction(different_dataset, threshold=0.001): + result = {} + inds, probas, probals_all = [], [], [] + for i in range(len(different_dataset)): + items = len(different_dataset[0]) + for j in range(items): + result.setdefault(j, []).append(different_dataset[i][j]) + for c in result.keys(): + proba_all = sum([*result[c]]) / len(different_dataset) + proba_all = mx.nd.array(np.where(proba_all.asnumpy() >= threshold, proba_all.asnumpy(), 0)) + ind = mx.nd.argmax(proba_all, axis=1).astype('int') + idx = mx.nd.stack(mx.nd.arange(proba_all.shape[0], ctx=proba_all.context), ind.astype('float32')) + proba = mx.nd.gather_nd(proba_all, idx) + inds.append(ind.asscalar()) + probas.append(proba.asnumpy()) + probals_all.append(proba_all.asnumpy().flatten()) + return inds, probas, probals_all + + def predict_imgs(X): + if isinstance(X, list): + different_dataset = [] + for i, x in enumerate(X): + proba_all_one_dataset = [] + tbar = tqdm(range(len(x.items))) + for j, x_item in enumerate(x): + tbar.update(1) + proba_all = predict_img(x_item[0], ensemble=True) + tbar.set_description('ratio:[%d],The input picture [%d]' % (i, j)) + proba_all_one_dataset.append(proba_all) + different_dataset.append(proba_all_one_dataset) + inds, probas, probals_all = avg_prediction(different_dataset, threshold=set_prob_thresh) + else: + inds, probas, probals_all = [], [], [] + tbar = tqdm(range(len(X.items))) + for i, x in enumerate(X): + tbar.update(1) + ind, proba, proba_all = predict_img(x[0]) + tbar.set_description('The input picture [%d] is classified as [%d], with probability %.2f ' % + (i, ind.asscalar(), proba.asscalar())) + inds.append(ind.asscalar()) + probas.append(proba.asnumpy()) + probals_all.append(proba_all.asnumpy().flatten()) + return inds, probas, probals_all + if isinstance(X, str) and os.path.isfile(X): - img = self.loader(X) + img = mx.image.imread(filename=X) if plot: - plt.imshow(np.array(img)) + plt.imshow(img.asnumpy()) plt.show() - img = transform_fn(img) + + img = transform_size(img) return predict_img(img) + if isinstance(X, AutoGluonObject): X = X.init() - inds, probas, probals_all = [], [],[] - for x in X: - ind, proba, proba_all= predict_img(x[0]) - inds.append(ind.asscalar()) - probas.append(proba.asnumpy()) - probals_all.append(proba_all.asnumpy().flatten()) - return inds, probas, probals_all + return predict_imgs(X) + + if isinstance(X, list) and len(X)>1: + X_group = [] + for X_item in X: + X_item = X_item.init() + X_group.append(X_item) + return predict_imgs(X_group) @staticmethod def loader(path): diff --git a/autogluon/task/image_classification/dataset.py b/autogluon/task/image_classification/dataset.py --- a/autogluon/task/image_classification/dataset.py +++ b/autogluon/task/image_classification/dataset.py @@ -43,39 +43,7 @@ def __call__(self, x, *args): return (self._fn(x),) + args return self._fn(x) -@func() -def get_dataset(path=None, train=True, name=None, - input_size=224, crop_ratio=0.875, jitter_param=0.4, - *args, **kwargs): - """ Method to produce image classification dataset for AutoGluon, can either be a - :class:`ImageFolderDataset`, :class:`RecordDataset`, or a - popular dataset already built into AutoGluon ('mnist', 'cifar10', 'cifar100', 'imagenet'). - - Parameters - ---------- - name : str, optional - Which built-in dataset to use, will override all other options if specified. - The options are ('mnist', 'cifar', 'cifar10', 'cifar100', 'imagenet') - train : bool, default = True - Whether this dataset should be used for training or validation. - path : str - The training data location. If using :class:`ImageFolderDataset`, - image folder`path/to/the/folder` should be provided. - If using :class:`RecordDataset`, the `path/to/*.rec` should be provided. - input_size : int - The input image size. - crop_ratio : float - Center crop ratio (for evaluation only) - - Returns - ------- - Dataset object that can be passed to `task.fit()`, which is actually an :class:`autogluon.space.AutoGluonObject`. - To interact with such an object yourself, you must first call `Dataset.init()` to instantiate the object in Python. - """ - resize = int(math.ceil(input_size / crop_ratio)) - if isinstance(name, str) and name.lower() in built_in_datasets: - return get_built_in_dataset(name, train=train, input_size=input_size, *args, **kwargs) - +def generate_transform(train, resize, _is_osx, input_size, jitter_param): if _is_osx: # using PIL to load image (slow) transform = Compose([ @@ -106,22 +74,71 @@ def get_dataset(path=None, train=True, name=None, transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) + return transform + + +@func() +def get_dataset(path=None, train=True, name=None, + input_size=224, crop_ratio=0.875, jitter_param=0.4, scale_ratio_choice=[], + *args, **kwargs): + """ Method to produce image classification dataset for AutoGluon, can either be a + :class:`ImageFolderDataset`, :class:`RecordDataset`, or a + popular dataset already built into AutoGluon ('mnist', 'cifar10', 'cifar100', 'imagenet'). + Parameters + ---------- + name : str, optional + Which built-in dataset to use, will override all other options if specified. + The options are ('mnist', 'cifar', 'cifar10', 'cifar100', 'imagenet') + train : bool, default = True + Whether this dataset should be used for training or validation. + path : str + The training data location. If using :class:`ImageFolderDataset`, + image folder`path/to/the/folder` should be provided. + If using :class:`RecordDataset`, the `path/to/*.rec` should be provided. + input_size : int + The input image size. + crop_ratio : float + Center crop ratio (for evaluation only) + scale_ratio_choice: list + List of crop_ratio, only in the test dataset, the set of scaling ratios obtained is scaled to the original image, and then cut a fixed size (input_size) and get a set of predictions for averaging. + + Returns + ------- + Dataset object that can be passed to `task.fit()`, which is actually an :class:`autogluon.space.AutoGluonObject`. + To interact with such an object yourself, you must first call `Dataset.init()` to instantiate the object in Python. + """ + + resize = int(math.ceil(input_size / crop_ratio)) + transform = generate_transform(train, resize, _is_osx, input_size, jitter_param) + if isinstance(name, str) and name.lower() in built_in_datasets: + return get_built_in_dataset(name, train=train, input_size=input_size, *args, **kwargs) if '.rec' in path: dataset = RecordDataset(path, *args, transform=_TransformFirstClosure(transform), **kwargs) elif _is_osx: dataset = ImageFolderDataset(path, transform=transform, *args, **kwargs) elif not train: - dataset = TestImageFolderDataset(path, *args, - transform=_TransformFirstClosure(transform), **kwargs) + if scale_ratio_choice == []: + dataset = TestImageFolderDataset(path, *args, + transform=_TransformFirstClosure(transform), **kwargs) + else: + dataset = [] + for i in scale_ratio_choice: + resize = int(math.ceil(input_size / i)) + dataset_item = TestImageFolderDataset(path, *args, + transform=_TransformFirstClosure(generate_transform(train, resize, _is_osx, input_size, jitter_param)), **kwargs) + dataset.append(dataset_item.init()) + elif 'label_file' in kwargs: dataset = IndexImageDataset(path, transform=_TransformFirstClosure(transform), *args, **kwargs) else: dataset = NativeImageFolderDataset(path, *args, transform=_TransformFirstClosure(transform), **kwargs) - dataset = dataset.init() + + if scale_ratio_choice == []: + dataset = dataset.init() return dataset @obj() @@ -146,7 +163,7 @@ class IndexImageDataset(MXImageFolderDataset): def __init__(self, root, label_file, gray_scale=False, transform=None, extension='.jpg'): self._root = os.path.expanduser(root) - self.items = self.read_csv(label_file, root, extension) + self.items, self.synsets = self.read_csv(label_file, root, extension) self._flag = 0 if gray_scale else 1 self._transform = transform @@ -172,7 +189,15 @@ def label_to_index(label_list, name): for k, v in label_dict.items(): samples.append((os.path.join(root, f"{k}{extension}"), label_to_index(labels, v))) - return samples + return samples, labels + + @property + def num_classes(self): + return len(self.synsets) + + @property + def classes(self): + return self.synsets @property def num_classes(self): @@ -232,20 +257,33 @@ def __init__(self, root, gray_scale=False, transform=None): def _list_images(self, root): self.synsets = [] self.items = [] - - #for folder in sorted(os.listdir(root)): path = os.path.expanduser(root) if not os.path.isdir(path): raise ValueError('Ignoring %s, which is not a directory.'%path, stacklevel=3) - label = len(self.synsets) for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) - ext = os.path.splitext(filename)[1] - if ext.lower() not in self._exts: - warnings.warn('Ignoring %s of type %s. Only support %s'%( - filename, ext, ', '.join(self._exts))) - continue - self.items.append((filename, label)) + if os.path.isfile(filename): # add + label = len(self.synsets) + ext = os.path.splitext(filename)[1] + if ext.lower() not in self._exts: + warnings.warn('Ignoring %s of type %s. Only support %s'%( + filename, ext, ', '.join(self._exts))) + continue + self.items.append((filename, label)) + else: + folder = filename + if not os.path.isdir(folder): + raise ValueError('Ignoring %s, which is not a directory.'%path, stacklevel=3) + label = len(self.synsets) + for sub_filename in sorted(os.listdir(folder)): + sub_filename = os.path.join(folder, sub_filename) + ext = os.path.splitext(sub_filename)[1] + if ext.lower() not in self._exts: + warnings.warn('Ignoring %s of type %s. Only support %s'%( + sub_filename, ext, ', '.join(self._exts))) + continue + self.items.append((sub_filename, label)) + self.synsets.append(label) @property def num_classes(self): diff --git a/autogluon/task/image_classification/image_classification.py b/autogluon/task/image_classification/image_classification.py --- a/autogluon/task/image_classification/image_classification.py +++ b/autogluon/task/image_classification/image_classification.py @@ -59,9 +59,8 @@ def Dataset(*args, **kwargs): @staticmethod def fit(dataset, net=Categorical('ResNet50_v1b', 'ResNet18_v1b'), - optimizer= SGD(learning_rate=Real(1e-3, 1e-2, log=True), + optimizer= NAG(learning_rate=Real(1e-3, 1e-2, log=True), wd=Real(1e-4, 1e-3, log=True), multi_precision=False), - lr_scheduler='cosine', loss=SoftmaxCrossEntropyLoss(), split_ratio=0.8, batch_size=64, @@ -70,7 +69,7 @@ def fit(dataset, final_fit_epochs=None, ensemble=1, metric='accuracy', - nthreads_per_trial=4, + nthreads_per_trial=60, ngpus_per_trial=1, hybridize=True, search_strategy='random', @@ -93,14 +92,14 @@ def fit(dataset, warmup_lr=0.0, warmup_epochs=0), tricks=Dict( - last_gamma=False,#True - use_pretrained=False,#True + last_gamma=False, + use_pretrained=True, use_se=False, mixup=False, mixup_alpha=0.2, mixup_off_epoch= 0, - label_smoothing=False,#True - no_wd=False,#True + label_smoothing=False, + no_wd=False, teacher_name=None, temperature=20.0, hard_weight=0.5, @@ -121,8 +120,6 @@ def fit(dataset, Which existing neural network models to consider as candidates. optimizer : str or :class:`autogluon.space.AutoGluonObject` Which optimizers to consider as candidates for learning the neural network weights. - lr_scheduler : str - Describes how learning rate should be adjusted over the course of training. Options include: 'cosine', 'poly'. batch_size : int How many images to group in each mini-batch during gradient computations in training. epochs: int @@ -192,7 +189,7 @@ def fit(dataset, lr_config ---------- lr-mode : type=str, default='step'. - learning rate scheduler mode. options are step, poly and cosine. + describes how learning rate should be adjusted over the course of training. Options include: 'cosine', 'poly'. lr-decay : type=float, default=0.1. decay rate of learning rate. default is 0.1. lr-decay-period : type=int, default=0. @@ -247,7 +244,6 @@ def fit(dataset, dataset=dataset, net=net, optimizer=optimizer, - lr_scheduler=lr_scheduler, loss=loss, metric=metric, num_gpus=ngpus_per_trial, @@ -276,7 +272,7 @@ def fit(dataset, 'dist_ip_addrs': dist_ip_addrs, 'searcher': search_strategy, 'search_options': search_options, - 'plot_results': plot_results, + 'plot_results': plot_results } if search_strategy == 'hyperband': scheduler_options.update({ @@ -284,14 +280,14 @@ def fit(dataset, 'max_t': epochs, 'grace_period': grace_period if grace_period else epochs//4}) - results = BaseTask.run_fit(train_image_classification, search_strategy, - scheduler_options) + results = BaseTask.run_fit(train_image_classification, search_strategy, scheduler_options) args = sample_config(train_image_classification.args, results['best_config']) kwargs = {'num_classes': results['num_classes'], 'ctx': mx.cpu(0)} model = get_network(args.net, **kwargs) multi_precision = optimizer.kwvars['multi_precision'] if 'multi_precision' in optimizer.kwvars else False update_params(model, results.pop('model_params'), multi_precision) + if ensemble > 1: models = [model] if isinstance(search_strategy, str): diff --git a/autogluon/task/image_classification/nets.py b/autogluon/task/image_classification/nets.py --- a/autogluon/task/image_classification/nets.py +++ b/autogluon/task/image_classification/nets.py @@ -115,6 +115,7 @@ def _get_finetune_network(model_name, num_classes, ctx, **kwargs): finetune_net.fc.initialize(init.Xavier(), ctx=ctx) # initialize and context finetune_net.collect_params().reset_ctx(ctx) + # finetune_net.load_parameters(opt.resume_params, ctx=context, cast_dtype=True) finetune_net.hybridize() return finetune_net diff --git a/autogluon/task/image_classification/pipeline.py b/autogluon/task/image_classification/pipeline.py --- a/autogluon/task/image_classification/pipeline.py +++ b/autogluon/task/image_classification/pipeline.py @@ -36,8 +36,8 @@ def train_image_classification(args, reporter): args.net, args.tricks.teacher_name, args.tricks.hard_weight, - args.optimizer.multi_precision, args.hybridize, + args.optimizer.multi_precision, args.tricks.use_pretrained, args.tricks.use_gn, args.tricks.last_gamma, @@ -74,13 +74,13 @@ def teacher_prob(data): train_data, val_data, batch_fn, num_batches = \ get_data_loader(args.dataset, input_size, batch_size, args.num_workers, args.final_fit, args.split_ratio) - if isinstance(args.lr_config.lr_mode, str): + if isinstance(args.lr_config.lr_mode, str): # fix target_lr = LR_params(args.optimizer.lr, args.lr_config.lr_mode, args.epochs, num_batches, - args.lr_config.lr_decay_epoch, - args.lr_config.lr_decay , - args.lr_config.lr_decay_period, - args.lr_config.warmup_epochs, - args.lr_config.warmup_lr) + args.lr_config.lr_decay_epoch, + args.lr_config.lr_decay , + args.lr_config.lr_decay_period, + args.lr_config.warmup_epochs, + args.lr_config.warmup_lr) lr_scheduler = target_lr.get_lr_scheduler else: lr_scheduler = args.lr_config.lr_mode diff --git a/autogluon/task/image_classification/processing_params.py b/autogluon/task/image_classification/processing_params.py --- a/autogluon/task/image_classification/processing_params.py +++ b/autogluon/task/image_classification/processing_params.py @@ -23,12 +23,14 @@ def get_context(self): class Getmodel_kwargs(): - def __init__(self, context, + def __init__(self, + context, classes, - model_name, model_teacher, + model_name, + model_teacher, hard_weight, - multi_precision, hybridize, + multi_precision=False, use_pretrained=True, use_gn=False, last_gamma=False, @@ -57,27 +59,18 @@ def __init__(self, context, self._kwargs['last_gamma'] = True if self._model_teacher is not None and self._hard_weight < 1.0: - self._distillation = True + self.distillation = True else: - self._distillation = False + self.distillation = False - @property - def get_kwargs(self): - return self._kwargs - - @property - def distillation(self): - return self._distillation - @property def dtype(self): return self._dtype @property def get_teacher(self): - net_kwargs = self.get_kwargs - net = get_network(self._model_teacher, **net_kwargs) + net = get_network(self._model_teacher, **self._kwargs) net.cast(self._dtype) if self._hybridize: net.hybridize(static_alloc=True, static_shape=True) @@ -85,8 +78,7 @@ def get_teacher(self): @property def get_net(self): - net_kwargs = self.get_kwargs - net = get_network(self._model_name, **net_kwargs) + net = get_network(self._model_name, **self._kwargs) net.cast(self._dtype) if self._hybridize: net.hybridize(static_alloc=True, static_shape=True) diff --git a/autogluon/task/image_classification/utils.py b/autogluon/task/image_classification/utils.py --- a/autogluon/task/image_classification/utils.py +++ b/autogluon/task/image_classification/utils.py @@ -30,7 +30,7 @@ def get_data_loader(dataset, input_size, batch_size, num_workers, final_fit, spl imagenet_samples = 1281167 num_batches = imagenet_samples // batch_size else: - num_workers = 0 + num_workers = 0 #? train_data = DataLoader( train_dataset, batch_size=batch_size, shuffle=True, last_batch="discard", num_workers=num_workers) diff --git a/autogluon/utils/file_helper.py b/autogluon/utils/file_helper.py --- a/autogluon/utils/file_helper.py +++ b/autogluon/utils/file_helper.py @@ -1,13 +1,15 @@ import os, csv import pandas as pd +import numpy as np +from glob import glob -__all__ = ['generate_csv', 'generate_csv_submission'] +__all__ = ['generate_csv', 'generate_csv_submission', 'generate_prob_csv'] -def generate_csv_submission(dataset, data_path, local_path, inds, preds, class_name, custom): +def generate_csv_submission(dataset_path, dataset, local_path, inds, preds, class_name, custom): """ Generate_csv for submission with different formats. :param dataset: dataset name. - :param data_path: dataset path. + :param dataset_path: dataset path. :param local_path: save log and plot performance_vs_trials figure. :param inds: the category id. :param preds: the category probability. @@ -75,8 +77,8 @@ def generate_csv_submission(dataset, data_path, local_path, inds, preds, class_n 'value': 'probability_1', 'special': 0} - test_path = os.path.join(data_path, 'test') - csv_path = os.path.join(data_path, 'sample_submission.csv') + test_path = os.path.join(dataset_path, 'test') + csv_path = os.path.join(dataset_path, 'sample_submission.csv') ids = sorted(os.listdir(test_path)) save_csv_name = custom + '.csv' save_csv_path = os.path.join(local_path, dataset, save_csv_name) @@ -138,6 +140,43 @@ def get_name(name): df.to_csv(save_csv_path, index=False) print('generate_csv B is done') +def filter_value(prob, Threshold): + if prob > Threshold: + prob = prob + else: + prob = 0 + return prob + +def generate_prob_csv(test_dataset, preds, set_prob_thresh=0, ensemble_list='', custom='./submission.csv', scale_min_max=True): + if isinstance(test_dataset.rand, list): + ids = sorted([x for x, _ in test_dataset.rand[0].items]) + csv_path = test_dataset.rand[0]._root.replace('test', 'sample_submission.csv') + else: + ids = sorted([x for x, _ in test_dataset.rand.items]) + csv_path = test_dataset.rand._root.replace('test', 'sample_submission.csv') + df = pd.read_csv(csv_path) + imagename_list = [name_id[:-4] for name_id in ids] + row_index_group = [] + for i in imagename_list: + row_index = df[df['id'] == str(i)].index.tolist() + if not len(row_index) == 0: + row_index_group.append(row_index[0]) + df.loc[row_index_group, 1:] = preds + df.to_csv(custom, index=False) + + def ensemble_csv(glob_files): + file_list = [] + for i, glob_file in enumerate(glob(glob_files)): + file_list.append(pd.read_csv(glob_file, index_col=0)) + w = sum([*file_list])/len(file_list) + if scale_min_max: + w = w.apply(lambda x: np.round((x - min(x)) / (1.0 * (max(x) - min(x))), 2), axis=1) + for i in w.columns.values: + w[i] = w[i].apply(filter_value, Threshold=set_prob_thresh) + w.to_csv(custom) + if not ensemble_list.strip() == '': + ensemble_csv(ensemble_list) + print('dog_generate_csv is done') def generate_csv(inds, path): with open(path, 'w') as csvFile: @@ -150,4 +189,4 @@ def generate_csv(inds, path): writer = csv.writer(csvFile) writer.writerow(row) id += 1 - csvFile.close() \ No newline at end of file + csvFile.close() diff --git a/examples/image_classification/benchmark.py b/examples/image_classification/benchmark.py --- a/examples/image_classification/benchmark.py +++ b/examples/image_classification/benchmark.py @@ -28,9 +28,8 @@ def parse_args(): opt = parser.parse_args() return opt -def predict_details(data_path, classifier, load_dataset): - test_dataset = os.path.join(data_path, 'test') - inds, probs, probs_all= classifier.predict(task.Dataset(test_dataset)) +def predict_details(test_dataset, classifier, load_dataset): + inds, probs, probs_all= classifier.predict(test_dataset) value = [] target_dataset = load_dataset.init() for i in inds: @@ -41,8 +40,9 @@ def main(): opt = parse_args() if not os.path.exists(opt.dataset): os.mkdir(opt.dataset) + dataset_path = os.path.join(opt.data_dir, opt.dataset) + local_path = os.path.dirname(__file__) - data_path = os.path.join(local_path, opt.data_dir, opt.dataset) output_directory = os.path.join(opt.dataset ,'checkpoint/') filehandler = logging.FileHandler(os.path.join(opt.dataset ,'summary.log')) streamhandler = logging.StreamHandler() @@ -52,7 +52,7 @@ def main(): logger.addHandler(streamhandler) logging.info(opt) - target = config_choice(opt.dataset, opt.data_dir) + target = config_choice(opt.data_dir, opt.dataset) load_dataset = task.Dataset(target['dataset']) classifier = task.fit(dataset = load_dataset, output_directory = output_directory, @@ -73,8 +73,10 @@ def main(): logger.info(summary) if opt.submission: - inds, probs, probs_all, value = predict_details(data_path, classifier, load_dataset) - ag.utils.generate_csv_submission(opt.dataset, data_path, local_path, inds, probs_all, value, opt.custom) + test_dataset = task.Dataset(os.path.join(opt.data_dir, opt.dataset, 'test'), train=False) + inds, probs, probs_all, value = predict_details(test_dataset, classifier, load_dataset) + ag.utils.generate_csv_submission(dataset_path, opt.dataset, local_path, inds, probs_all, value, opt.custom) if __name__ == '__main__': main() + diff --git a/examples/image_classification/blog.py b/examples/image_classification/blog.py new file mode 100644 --- /dev/null +++ b/examples/image_classification/blog.py @@ -0,0 +1,39 @@ +import os +import autogluon as ag +from autogluon import ImageClassification as task +from mxnet import optimizer as optim + +def task_dog_breed_identification(data_path, dataset): + images_path = os.path.join(data_path, dataset, 'images_all') + label_path = os.path.join(data_path, dataset, 'labels.csv') + test_path = os.path.join(data_path, dataset, 'test') + load_dataset = task.Dataset(images_path, label_file=label_path) + + @ag.obj( + learning_rate=ag.space.Real(0.3, 0.5), + momentum=ag.space.Real(0.90, 0.95), + wd=ag.space.Real(1e-6, 1e-4, log=True), + multi_precision=False + ) + class NAG(optim.NAG): + pass + + classifier = task.fit(dataset=load_dataset, + net=ag.Categorical('standford_dog_resnext101_64x4d', 'standford_dog_resnet152_v1'), + optimizer=NAG(), + epochs=20, + final_fit_epochs=180, + num_trials=40, + ngpus_per_trial=8, + batch_size=48, + verbose=False, + ensemble=1) + + test_dataset = task.Dataset(test_path, train=False, crop_ratio=0.65) + inds, probs, probs_all = classifier.predict(test_dataset, set_prob_thresh=0.001) + ag.utils.generate_prob_csv(test_dataset, probs_all, custom='./submission.csv') + +if __name__ == '__main__': + data_path = '/home/ubuntu/workspace/dataset' + dataset = 'dog-breed-identification' + task_dog_breed_identification(data_path, dataset) \ No newline at end of file diff --git a/examples/image_classification/kaggle_configuration.py b/examples/image_classification/kaggle_configuration.py --- a/examples/image_classification/kaggle_configuration.py +++ b/examples/image_classification/kaggle_configuration.py @@ -11,23 +11,22 @@ def download_shopee(dataset, data_path): else: print(dataset + '.zip already exists.\n') -def config_choice(dataset, data_path): +def config_choice(data_path, dataset): global kaggle_choice dataset_path = os.path.join(data_path, dataset, 'images') if dataset == 'dogs-vs-cats-redux-kernels-edition': - net_cat = ag.space.Categorical('resnet34_v1b', 'resnet34_v1', 'resnet34_v2') + net_cat = ag.space.Categorical('resnet34_v1b') #resnet34_v1 @ag.obj( - learning_rate=ag.space.Real(1e-4, 1e-2, log=True), + learning_rate=ag.space.Real(0.3, 0.5), momentum=ag.space.Real(0.86, 0.99), - wd=ag.space.Real(1e-6, 1e-3, log=True), - multi_precision=False + wd=ag.space.Real(1e-5, 1e-3, log=True) ) class NAG(optim.NAG): pass optimizer = NAG() lr_config = ag.space.Dict( - lr_mode='cosine', + lr_mode='step', lr_decay=0.1, lr_decay_period=0, lr_decay_epoch='40,80', @@ -49,28 +48,27 @@ class NAG(optim.NAG): use_gn=False) kaggle_choice = {'classes': 2, 'net': net_cat, 'optimizer': optimizer, 'dataset': dataset_path, - 'batch_size': 384,#512 + 'batch_size': 320,#512 'epochs': 180, - 'ngpus_per_trial': 8, + 'ngpus_per_trial': 4, 'lr_config': lr_config, 'tricks': tricks, - 'num_trials': 30} + 'num_trials': 16} elif dataset == 'aerial-cactus-identification': net_aeri = ag.space.Categorical('resnet34_v1b') @ag.obj( - learning_rate=ag.space.Real(1e-4, 1e-2, log=True), + learning_rate=ag.space.Real(0.3, 0.5), momentum=ag.space.Real(0.88, 0.95), - wd=ag.space.Real(1e-6, 1e-4, log=True), - multi_precision=False + wd=ag.space.Real(1e-5, 1e-3, log=True) ) class NAG(optim.NAG): pass optimizer = NAG() lr_config = ag.space.Dict( - lr_mode='cosine', + lr_mode='step', lr_decay=0.1, lr_decay_period=0, - lr_decay_epoch='40,80', + lr_decay_epoch='60,120', warmup_lr=0.0, warmup_epochs=5) tricks = ag.space.Dict( @@ -89,21 +87,18 @@ class NAG(optim.NAG): use_gn=False) kaggle_choice = {'classes': 2, 'net': net_aeri, 'optimizer': optimizer, 'dataset': dataset_path, - 'batch_size': 256,#384 + 'batch_size': 320,#256 'epochs': 180, - 'ngpus_per_trial': 8, + 'ngpus_per_trial': 4, 'lr_config': lr_config, 'tricks': tricks, 'num_trials': 30} elif dataset == 'plant-seedlings-classification': - net_plant = ag.space.Categorical('resnet50_v1', 'resnet50_v1b', 'resnet50_v1c', - 'resnet50_v1d', 'resnet50_v1s') - + net_plant = ag.space.Categorical('resnet50_v1') @ag.obj( - learning_rate=ag.space.Real(1e-4, 1e-3, log=True), - momentum=ag.space.Real(0.93, 0.95), - wd=ag.space.Real(1e-6, 1e-4, log=True), - multi_precision=False + learning_rate=ag.space.Real(0.3, 0.5), + momentum=ag.space.Real(0.85, 0.95), + wd=ag.space.Real(1e-6, 1e-4, log=True) ) class NAG(optim.NAG): pass @@ -131,24 +126,22 @@ class NAG(optim.NAG): use_gn=False) kaggle_choice = {'classes': 12, 'net': net_plant, 'optimizer': optimizer, 'dataset': dataset_path, - 'batch_size': 16, - 'epochs': 96, - 'ngpus_per_trial': 8, + 'batch_size': 128, + 'epochs': 120, + 'ngpus_per_trial': 2, 'lr_config': lr_config, 'tricks': tricks, 'num_trials': 30} elif dataset == 'fisheries_Monitoring': net_fish = ag.space.Categorical('resnet50_v1') @ag.obj( - learning_rate=ag.space.Real(1e-3, 1e-2, log=True), - momentum=ag.space.Real(0.85, 0.90), - wd=ag.space.Real(1e-6, 1e-4, log=True), - multi_precision=False + learning_rate=ag.space.Real(0.3, 0.5), + momentum=ag.space.Real(0.85, 0.95), + wd=ag.space.Real(1e-6, 1e-4, log=True) ) class NAG(optim.NAG): pass optimizer = NAG() - lr_config = ag.space.Dict( lr_mode='cosine', lr_decay=0.1, @@ -172,22 +165,18 @@ class NAG(optim.NAG): use_gn=False) kaggle_choice = {'classes': 8, 'net': net_fish, 'optimizer': optimizer, 'dataset': dataset_path, - 'batch_size': 96, + 'batch_size': 128, 'epochs': 120, - 'ngpus_per_trial': 8, + 'ngpus_per_trial': 2, 'lr_config': lr_config, 'tricks': tricks, 'num_trials': 30} elif dataset == 'dog-breed-identification': - net_dog = ag.space.Categorical('resnet101_v1', 'resnet101_v2', 'resnext101_64x4d', 'resnet101_v1b_gn', - 'resnet101_v1b', 'resnet101_v1c', 'resnet101_v1d', 'resnet101_v1e', - 'resnet101_v1s', 'resnext101b_64x4d') - + net_dog = ag.space.Categorical('resnext101_64x4d') @ag.obj( - learning_rate=ag.space.Real(1e-4, 1e-3, log=True), - momentum=ag.space.Real(0.90, 0.95), - wd=ag.space.Real(1e-6, 1e-4, log=True), - multi_precision=True # True fix + learning_rate=ag.space.Real(0.3, 0.5), + momentum=ag.space.Real(0.85, 0.95), + wd=ag.space.Real(1e-6, 1e-4, log=True) ) class NAG(optim.NAG): pass @@ -196,7 +185,7 @@ class NAG(optim.NAG): lr_mode='cosine', lr_decay=0.1, lr_decay_period=0, - lr_decay_epoch='40,80', + lr_decay_epoch='60,120', warmup_lr=0.0, warmup_epochs=5) tricks = ag.space.Dict( @@ -217,17 +206,16 @@ class NAG(optim.NAG): 'dataset': dataset_path, 'batch_size': 48, 'epochs': 180, - 'ngpus_per_trial': 8, + 'ngpus_per_trial': 4, 'lr_config': lr_config, 'tricks': tricks, 'num_trials': 30} elif dataset == 'shopee-iet-machine-learning-competition': - net_shopee = ag.space.Categorical('resnet152_v1','resnet152_v2', 'resnet152_v1b', 'resnet152_v1d','resnet152_v1s') + net_shopee = ag.space.Categorical('resnet152_v1d') @ag.obj( - learning_rate=ag.space.Real(1e-4, 1e-2, log=True), - momentum=ag.space.Real(0.90, 1.0), - wd=ag.space.Real(1e-4, 1e-2, log=True), - multi_precision=False + learning_rate=ag.space.Real(1e-2, 1e-1, log=True), + momentum=ag.space.Real(0.85, 0.95), + wd=ag.space.Real(1e-6, 1e-4, log=True) ) class NAG(optim.NAG): pass @@ -236,7 +224,7 @@ class NAG(optim.NAG): lr_mode='cosine', lr_decay=0.1, lr_decay_period=0, - lr_decay_epoch='40,80', + lr_decay_epoch='60,120', warmup_lr=0.0, warmup_epochs=5) @@ -259,7 +247,7 @@ class NAG(optim.NAG): 'dataset': dataset_path, 'batch_size': 48, 'epochs': 180, - 'ngpus_per_trial': 8, + 'ngpus_per_trial': 4, 'lr_config': lr_config, 'tricks': tricks, 'num_trials': 30}
diff --git a/tests/unittests/test_classification_tricks.py b/tests/unittests/test_classification_tricks.py --- a/tests/unittests/test_classification_tricks.py +++ b/tests/unittests/test_classification_tricks.py @@ -89,14 +89,15 @@ def test_tricks(test_trials): num_trials = target['num_trials'], batch_size = target['batch_size'], verbose = True, - search_strategy='skopt', + search_strategy='random', tricks = target['tricks'], lr_config = target['lr_config'], plot_results = True) - # test_dataset = task.Dataset(target['dataset'].replace('images', 'test')) - # inds, probs, probals_all= classifier.predict(test_dataset) - # print(inds[0], probs[0], probals_all[0]) + test_dataset = task.Dataset(target['dataset'].replace('train', 'test/BabyPants'), train=False, + scale_ratio_choice=[0.7, 0.8, 0.875]) + inds, probs, probs_all = classifier.predict(test_dataset, set_prob_thresh=0.001) + print(inds[0],probs[0],probs_all[0]) print('Top-1 val acc: %.3f' % classifier.results['best_reward']) # summary = classifier.fit_summary(output_directory=dataset, verbosity=3) @@ -105,3 +106,6 @@ def test_tricks(test_trials): if __name__ == '__main__': test_tricks(2) + + + diff --git a/tests/unittests/test_image_classification.py b/tests/unittests/test_image_classification.py --- a/tests/unittests/test_image_classification.py +++ b/tests/unittests/test_image_classification.py @@ -24,3 +24,4 @@ def test_classifier_save_load(): if __name__ == '__main__': test_ensemble() + test_classifier_save_load()
Different classification,predict classification result is same,and probability is 1 按照官网的示例,使用训练出的模型预测两张不同分类的图片,预测结果不符合预期,类别结果错误,准确率1 ![image](https://user-images.githubusercontent.com/20124347/72679214-0d688a00-3ae8-11ea-9534-1f176b639446.png) ![image](https://user-images.githubusercontent.com/20124347/72679236-486abd80-3ae8-11ea-92dd-e4ddd8dd316e.png)
2020-01-19T15:00:05Z
[]
[]
autogluon/autogluon
559
autogluon__autogluon-559
[ "558" ]
81d2b1e9963ae84237387431f9a5ee89e9a8f1b0
diff --git a/autogluon/contrib/enas/enas.py b/autogluon/contrib/enas/enas.py --- a/autogluon/contrib/enas/enas.py +++ b/autogluon/contrib/enas/enas.py @@ -70,7 +70,7 @@ class ENAS_Net(Cls): def __init__(self, *args, **kwargs): kwvars.update(kwargs) super().__init__(*args, **kwvars) - # + # self._modules = {} self._kwspaces = collections.OrderedDict() for k, module in kwvars.items(): @@ -242,7 +242,7 @@ def graph(self): e.edge(pre_node, i) pre_node = i return e - + @property def kwspaces(self): return self._kwspaces @@ -303,6 +303,51 @@ def __repr__(self): reprstr += ')\n' return reprstr + def export(self, path): + """Export HybridBlock to json format that can be loaded by + `gluon.SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface. + + .. note:: When there are only one input, it will have name `data`. When there + Are more than one inputs, they will be named as `data0`, `data1`, etc. + + Parameters + ---------- + path : str + Path to save model. Two files `path-symbol.json` and `path.params` + will be created. + + Examples + -------- + + >>> mynet.export('enas') + >>> mynet_static = mx.gluon.nn.SymbolBlock.imports( + "enas-symbol.json", ['data'], "enas.params") + >>> x = mx.nd.random.uniform(shape=(1, 1, 28, 28)) + >>> y = mynet_static(x) + """ + from mxnet import npx as _mx_npx + from mxnet.util import is_np_array + from mxnet import ndarray + if not self._cached_graph: + raise RuntimeError( + "Please first call block.hybridize() and then run forward with " + "this block at least once before calling export.") + sym = self._cached_graph[1] + sym.save('%s-symbol.json'%path, remove_amp_cast=True) + + arg_names = set(sym.list_arguments()) + aux_names = set(sym.list_auxiliary_states()) + arg_dict = {} + for name, param in self.collect_params().items(): + if name in arg_names: + arg_dict['arg:%s'%name] = param._reduce() + elif name in aux_names: + arg_dict['aux:%s'%name] = param._reduce() + else: + pass + save_fn = _mx_npx.save if is_np_array() else ndarray.save + save_fn('%s.params'%(path), arg_dict) + class Zero_Unit(gluon.HybridBlock): def hybrid_forward(self, F, x): return x
diff --git a/tests/unittests/test_contrib_enas.py b/tests/unittests/test_contrib_enas.py new file mode 100644 --- /dev/null +++ b/tests/unittests/test_contrib_enas.py @@ -0,0 +1,65 @@ +import os + +import pytest +import numpy as np + +import autogluon as ag +import mxnet as mx +import mxnet.gluon.nn as nn +from autogluon.contrib.enas import * + +class Identity(mx.gluon.HybridBlock): + def hybrid_forward(self, F, x): + return x + +class ConvBNReLU(mx.gluon.HybridBlock): + def __init__(self, in_channels, channels, kernel, stride): + super().__init__() + padding = (kernel - 1) // 2 + self.conv = nn.Conv2D(channels, kernel, stride, padding, in_channels=in_channels) + self.bn = nn.BatchNorm(in_channels=channels) + self.relu = nn.Activation('relu') + def hybrid_forward(self, F, x): + return self.relu(self.bn(self.conv(x))) + +@enas_unit() +class ResUnit(mx.gluon.HybridBlock): + def __init__(self, in_channels, channels, hidden_channels, kernel, stride): + super().__init__() + self.conv1 = ConvBNReLU(in_channels, hidden_channels, kernel, stride) + self.conv2 = ConvBNReLU(hidden_channels, channels, kernel, 1) + if in_channels == channels and stride == 1: + self.shortcut = Identity() + else: + self.shortcut = nn.Conv2D(channels, 1, stride, in_channels=in_channels) + def hybrid_forward(self, F, x): + return self.conv2(self.conv1(x)) + self.shortcut(x) + + +def test_enas_net(): + mynet = ENAS_Sequential( + ResUnit(1, 8, hidden_channels=ag.space.Categorical(4, 8), kernel=ag.space.Categorical(3, 5), stride=2), + ResUnit(8, 8, hidden_channels=8, kernel=ag.space.Categorical(3, 5), stride=2), + ResUnit(8, 16, hidden_channels=8, kernel=ag.space.Categorical(3, 5), stride=2), + ResUnit(16, 16, hidden_channels=8, kernel=ag.space.Categorical(3, 5), stride=1, with_zero=True), + ResUnit(16, 16, hidden_channels=8, kernel=ag.space.Categorical(3, 5), stride=1, with_zero=True), + nn.GlobalAvgPool2D(), + nn.Flatten(), + nn.Activation('relu'), + nn.Dense(10, in_units=16), + ) + + mynet.initialize() + mynet.hybridize() + + x = mx.nd.random.uniform(shape=(1, 1, 28, 28)) + xx = mynet.evaluate_latency(x) + y = mynet(x) + assert mynet.nparams == 8714 + mynet.export('enas') + mynet_static = mx.gluon.nn.SymbolBlock.imports("enas-symbol.json", ['data'], "enas.params") + yy = mynet_static(x) + np.testing.assert_almost_equal(y.asnumpy(), yy.asnumpy()) + +if __name__ == "__main__": + test_enas_net()
How to save the model created with an ENAS_Scheduler? We have this nice tutorial showing how to use the ENAS_Scheduler to train a model. [ENAS Tutorial](https://autogluon.mxnet.io/tutorials/nas/enas_proxylessnas.html) My question is: How do we save and load trained models created that way? I've tried 5 different ways and none of them seem to work. Based on what I read, the one I think I'm supposed to use is this: ` mynet.export(modelFile, epoch=numEpochsTrained) ` However, that produces this error message when I run it: ``` Traceback (most recent call last): File "/usr/lib/python3.6/pdb.py", line 1667, in main pdb._runscript(mainpyfile) File "/usr/lib/python3.6/pdb.py", line 1548, in _runscript self.run(statement) File "/usr/lib/python3.6/bdb.py", line 434, in run exec(cmd, globals, locals) File "<string>", line 1, in <module> File "/app/src/cli/boundryDetector.py", line 2, in <module> import sys,os; File "/app/src/code/boundryDetector/../lib/click/src/click/core.py", line 857, in __call__ return self.main(*args, **kwargs) File "/app/src/code/boundryDetector/../lib/click/src/click/core.py", line 810, in main rv = self.invoke(ctx) File "/app/src/code/boundryDetector/../lib/click/src/click/core.py", line 1099, in invoke return ctx.invoke(self.callback, **ctx.params) File "/app/src/code/boundryDetector/../lib/click/src/click/core.py", line 613, in invoke return callback(*args, **kwargs) File "/app/src/cli/boundryDetector.py", line 108, in boundry_detector dnn.save(modelFile) File "/app/src/code/boundryDetector/./dnn/save.py", line 12, in save s.model.export(modelFile, epoch=s.numEpochsTrained) File "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/block.py", line 1106, in export assert name in aux_names AssertionError ``` I'm using: ``` autogluon==0.0.11 mxnet==1.6.0 ``` Your kind assistance would be greatly appreciated. :)
@zhreshold Could you take a look at this?
2020-07-17T21:55:29Z
[]
[]
autogluon/autogluon
1,113
autogluon__autogluon-1113
[ "1109", "1109" ]
516668adfe33c5adaed69c14976339480081eb79
diff --git a/vision/src/autogluon/vision/predictor/predictor.py b/vision/src/autogluon/vision/predictor/predictor.py --- a/vision/src/autogluon/vision/predictor/predictor.py +++ b/vision/src/autogluon/vision/predictor/predictor.py @@ -363,6 +363,12 @@ def fit(self, config['early_stop_baseline'] = -np.Inf if 'early_stop_max_value' not in config or config['early_stop_max_value'] == None: config['early_stop_max_value'] = np.Inf + # batch size cannot be larger than dataset size + bs = min(config.get('batch_size', 16), len(train_data)) + config['batch_size'] = bs + if ngpus_per_trial is not None and ngpus_per_trial > 1 and bs < ngpus_per_trial: + # batch size must be larger than # gpus + config['ngpus_per_trial'] = bs # verbosity if log_level > logging.INFO: logging.getLogger('gluoncv.auto.tasks.image_classification').propagate = False @@ -420,9 +426,13 @@ def _validate_data(self, data): elif isinstance(data, _ImageClassification.Dataset): assert 'label' in data.columns assert hasattr(data, 'classes') + orig_classes = data.classes + if not isinstance(data.classes, (tuple, list)): + # consider it as an invalid dataset without proper label, try to reconstruct as a normal DataFrame + orig_classes = [] # check whether classes are outdated, no action required if all unique labels is subset of `classes` unique_labels = sorted(data['label'].unique().tolist()) - if not (all(ulabel in data.classes for ulabel in unique_labels)): + if not (all(ulabel in orig_classes for ulabel in unique_labels)): data = _ImageClassification.Dataset(data, classes=unique_labels) logger.log(20, f'Reset labels to {unique_labels}') if len(data) < 1:
diff --git a/vision/tests/unittests/test_image_classification.py b/vision/tests/unittests/test_image_classification.py --- a/vision/tests/unittests/test_image_classification.py +++ b/vision/tests/unittests/test_image_classification.py @@ -1,4 +1,6 @@ from autogluon.vision import ImagePredictor as Task +import autogluon.core as ag +import os import pandas as pd import numpy as np import copy @@ -47,3 +49,11 @@ def test_task_label_remap(): score_accuracy = accuracy(y_true=test_dataset['label'], y_pred=pred) score_log_loss = log_loss(y_true=test_dataset['label'].replace(label_remap_inverse), y_pred=pred_proba.to_numpy()) assert score_accuracy > 0.2 # relax + +def test_invalid_image_dataset(): + ImagePredictor = Task + invalid_test = ag.download('https://autogluon.s3-us-west-2.amazonaws.com/miscs/test_autogluon_invalid_dataset.zip') + invalid_test = ag.unzip(invalid_test) + df = ImagePredictor.Dataset.from_csv(os.path.join(invalid_test, 'train.csv'), root=os.path.join(invalid_test, 'train_images')) + predictor = ImagePredictor(label="labels") + predictor.fit(df, df.copy(), time_limit=60)
TypeError: argument of type 'NoneType' is not iterable My data has two columns, `[image,labels]` and I load images as follow df = ImagePredictor.Dataset.from_csv('train.csv', root='train_images') when I do `df.show_images()` it plot the images. But when I do ``` predictor = ImagePredictor() predictor.fit(df, hyperparameters={'epochs': 2}) ``` It gives following error ``` --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-39-e0442c33015a> in <module> 1 predictor = ImagePredictor() ----> 2 predictor.fit(df, hyperparameters={'epochs': 2}) # you can trust the default config, we reduce the # epoch to save some build time /opt/conda/lib/python3.7/site-packages/autogluon/vision/configs/presets_configs.py in _call(*args, **kwargs) 13 def _call(*args, **kwargs): 14 gargs, gkwargs = set_presets(preset_name, *args, **kwargs) ---> 15 return f(*gargs, **gkwargs) 16 return _call 17 return _unpack_inner /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in fit(self, train_data, tuning_data, time_limit, presets, hyperparameters, **kwargs) 293 294 # data sanity check --> 295 train_data = self._validate_data(train_data) 296 train_labels = _get_valid_labels(train_data) 297 self._label_cleaner = LabelCleaner.construct(problem_type=self._problem_type, y=train_labels, y_uncleaned=train_labels) /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in _validate_data(self, data) 414 raise TypeError(f"Unable to process dataset of type: {type(data)}") 415 elif isinstance(data, _ImageClassification.Dataset): --> 416 assert 'label' in data.columns 417 assert hasattr(data, 'classes') 418 # check whether classes are outdated, no action required if all unique labels is subset of `classes` AssertionError: ``` which mean that column name should be `label` instead of `labels` so when I changed column name with `label` as follow ``` df = ImagePredictor.Dataset.from_csv('train.csv', root='train_images') df.columns=['image','label'] ``` and do `df.show_images()` I got following error ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-41-741ee22ad2e8> in <module> ----> 1 df.show_images() /opt/conda/lib/python3.7/site-packages/gluoncv/auto/data/dataset.py in show_images(self, indices, nsample, ncol, shuffle, resize, fontsize) 132 if 'label' in self.columns: 133 titles = [self.classes[int(self.at[idx, 'label'])] + ': ' + str(self.at[idx, 'label']) \ --> 134 for idx in indices if idx < len(self)] 135 _show_images(images, cols=ncol, titles=titles, fontsize=fontsize) 136 /opt/conda/lib/python3.7/site-packages/gluoncv/auto/data/dataset.py in <listcomp>(.0) 132 if 'label' in self.columns: 133 titles = [self.classes[int(self.at[idx, 'label'])] + ': ' + str(self.at[idx, 'label']) \ --> 134 for idx in indices if idx < len(self)] 135 _show_images(images, cols=ncol, titles=titles, fontsize=fontsize) 136 ValueError: invalid literal for int() with base 10: 'frog_eye_leaf_spot' ``` and ``` predictor = ImagePredictor() predictor.fit(df, hyperparameters={'epochs': 2}) ``` gives following error ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-42-e0442c33015a> in <module> 1 predictor = ImagePredictor() ----> 2 predictor.fit(df, hyperparameters={'epochs': 2}) # you can trust the default config, we reduce the # epoch to save some build time /opt/conda/lib/python3.7/site-packages/autogluon/vision/configs/presets_configs.py in _call(*args, **kwargs) 13 def _call(*args, **kwargs): 14 gargs, gkwargs = set_presets(preset_name, *args, **kwargs) ---> 15 return f(*gargs, **gkwargs) 16 return _call 17 return _unpack_inner /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in fit(self, train_data, tuning_data, time_limit, presets, hyperparameters, **kwargs) 293 294 # data sanity check --> 295 train_data = self._validate_data(train_data) 296 train_labels = _get_valid_labels(train_data) 297 self._label_cleaner = LabelCleaner.construct(problem_type=self._problem_type, y=train_labels, y_uncleaned=train_labels) /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in _validate_data(self, data) 418 # check whether classes are outdated, no action required if all unique labels is subset of `classes` 419 unique_labels = sorted(data['label'].unique().tolist()) --> 420 if not (all(ulabel in data.classes for ulabel in unique_labels)): 421 data = _ImageClassification.Dataset(data, classes=unique_labels) 422 logger.log(20, f'Reset labels to {unique_labels}') /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in <genexpr>(.0) 418 # check whether classes are outdated, no action required if all unique labels is subset of `classes` 419 unique_labels = sorted(data['label'].unique().tolist()) --> 420 if not (all(ulabel in data.classes for ulabel in unique_labels)): 421 data = _ImageClassification.Dataset(data, classes=unique_labels) 422 logger.log(20, f'Reset labels to {unique_labels}') TypeError: argument of type 'NoneType' is not iterable ``` I tried using label encoder but the problem is still there though error changes TypeError: argument of type 'NoneType' is not iterable My data has two columns, `[image,labels]` and I load images as follow df = ImagePredictor.Dataset.from_csv('train.csv', root='train_images') when I do `df.show_images()` it plot the images. But when I do ``` predictor = ImagePredictor() predictor.fit(df, hyperparameters={'epochs': 2}) ``` It gives following error ``` --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-39-e0442c33015a> in <module> 1 predictor = ImagePredictor() ----> 2 predictor.fit(df, hyperparameters={'epochs': 2}) # you can trust the default config, we reduce the # epoch to save some build time /opt/conda/lib/python3.7/site-packages/autogluon/vision/configs/presets_configs.py in _call(*args, **kwargs) 13 def _call(*args, **kwargs): 14 gargs, gkwargs = set_presets(preset_name, *args, **kwargs) ---> 15 return f(*gargs, **gkwargs) 16 return _call 17 return _unpack_inner /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in fit(self, train_data, tuning_data, time_limit, presets, hyperparameters, **kwargs) 293 294 # data sanity check --> 295 train_data = self._validate_data(train_data) 296 train_labels = _get_valid_labels(train_data) 297 self._label_cleaner = LabelCleaner.construct(problem_type=self._problem_type, y=train_labels, y_uncleaned=train_labels) /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in _validate_data(self, data) 414 raise TypeError(f"Unable to process dataset of type: {type(data)}") 415 elif isinstance(data, _ImageClassification.Dataset): --> 416 assert 'label' in data.columns 417 assert hasattr(data, 'classes') 418 # check whether classes are outdated, no action required if all unique labels is subset of `classes` AssertionError: ``` which mean that column name should be `label` instead of `labels` so when I changed column name with `label` as follow ``` df = ImagePredictor.Dataset.from_csv('train.csv', root='train_images') df.columns=['image','label'] ``` and do `df.show_images()` I got following error ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-41-741ee22ad2e8> in <module> ----> 1 df.show_images() /opt/conda/lib/python3.7/site-packages/gluoncv/auto/data/dataset.py in show_images(self, indices, nsample, ncol, shuffle, resize, fontsize) 132 if 'label' in self.columns: 133 titles = [self.classes[int(self.at[idx, 'label'])] + ': ' + str(self.at[idx, 'label']) \ --> 134 for idx in indices if idx < len(self)] 135 _show_images(images, cols=ncol, titles=titles, fontsize=fontsize) 136 /opt/conda/lib/python3.7/site-packages/gluoncv/auto/data/dataset.py in <listcomp>(.0) 132 if 'label' in self.columns: 133 titles = [self.classes[int(self.at[idx, 'label'])] + ': ' + str(self.at[idx, 'label']) \ --> 134 for idx in indices if idx < len(self)] 135 _show_images(images, cols=ncol, titles=titles, fontsize=fontsize) 136 ValueError: invalid literal for int() with base 10: 'frog_eye_leaf_spot' ``` and ``` predictor = ImagePredictor() predictor.fit(df, hyperparameters={'epochs': 2}) ``` gives following error ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-42-e0442c33015a> in <module> 1 predictor = ImagePredictor() ----> 2 predictor.fit(df, hyperparameters={'epochs': 2}) # you can trust the default config, we reduce the # epoch to save some build time /opt/conda/lib/python3.7/site-packages/autogluon/vision/configs/presets_configs.py in _call(*args, **kwargs) 13 def _call(*args, **kwargs): 14 gargs, gkwargs = set_presets(preset_name, *args, **kwargs) ---> 15 return f(*gargs, **gkwargs) 16 return _call 17 return _unpack_inner /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in fit(self, train_data, tuning_data, time_limit, presets, hyperparameters, **kwargs) 293 294 # data sanity check --> 295 train_data = self._validate_data(train_data) 296 train_labels = _get_valid_labels(train_data) 297 self._label_cleaner = LabelCleaner.construct(problem_type=self._problem_type, y=train_labels, y_uncleaned=train_labels) /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in _validate_data(self, data) 418 # check whether classes are outdated, no action required if all unique labels is subset of `classes` 419 unique_labels = sorted(data['label'].unique().tolist()) --> 420 if not (all(ulabel in data.classes for ulabel in unique_labels)): 421 data = _ImageClassification.Dataset(data, classes=unique_labels) 422 logger.log(20, f'Reset labels to {unique_labels}') /opt/conda/lib/python3.7/site-packages/autogluon/vision/predictor/predictor.py in <genexpr>(.0) 418 # check whether classes are outdated, no action required if all unique labels is subset of `classes` 419 unique_labels = sorted(data['label'].unique().tolist()) --> 420 if not (all(ulabel in data.classes for ulabel in unique_labels)): 421 data = _ImageClassification.Dataset(data, classes=unique_labels) 422 logger.log(20, f'Reset labels to {unique_labels}') TypeError: argument of type 'NoneType' is not iterable ``` I tried using label encoder but the problem is still there though error changes
solution is that we need to change label of csv file first, then save it and then load it again. also need to perform encoding i also tried to specify column name, but it did not work predictor = ImagePredictor(label="labels") Here is[ colab notebook ](https://colab.research.google.com/drive/1UJw6Hvl7B-B9gdH5yMKt0jmY8bJt_wHa?usp=sharing) solution is that we need to change label of csv file first, then save it and then load it again. also need to perform encoding i also tried to specify column name, but it did not work predictor = ImagePredictor(label="labels") Here is[ colab notebook ](https://colab.research.google.com/drive/1UJw6Hvl7B-B9gdH5yMKt0jmY8bJt_wHa?usp=sharing)
2021-05-14T00:56:47Z
[]
[]
autogluon/autogluon
1,183
autogluon__autogluon-1183
[ "1179" ]
6d197d0856d1e7fbb019009c47932f40bedf3343
diff --git a/core/src/autogluon/core/_setup_utils.py b/core/src/autogluon/core/_setup_utils.py --- a/core/src/autogluon/core/_setup_utils.py +++ b/core/src/autogluon/core/_setup_utils.py @@ -18,7 +18,7 @@ 'pandas': '>=1.0.0,<2.0', 'scikit-learn': '>=0.23.2,<0.25', # 0.22 crashes during efficient OOB in Tabular 'scipy': '>=1.5.4,<1.7', - 'gluoncv': '>=0.10.2.post0,<0.10.4', + 'gluoncv': '>=0.10.3,<0.10.4', 'tqdm': '>=4.38.0', 'Pillow': '<=8.1', 'graphviz': '<0.9.0,>=0.8.1', diff --git a/vision/src/autogluon/vision/predictor/predictor.py b/vision/src/autogluon/vision/predictor/predictor.py --- a/vision/src/autogluon/vision/predictor/predictor.py +++ b/vision/src/autogluon/vision/predictor/predictor.py @@ -16,7 +16,7 @@ from autogluon.core.utils import verbosity2loglevel, get_gpu_count from autogluon.core.utils.utils import generate_train_test_split from ..configs.presets_configs import unpack, _check_gpu_memory_presets -from ..utils import MXNetErrorCatcher +from ..utils import MXNetErrorCatcher, sanitize_batch_size __all__ = ['ImagePredictor'] @@ -380,11 +380,12 @@ def fit(self, if 'early_stop_max_value' not in config or config['early_stop_max_value'] == None: config['early_stop_max_value'] = np.Inf # batch size cannot be larger than dataset size - bs = min(config.get('batch_size', 16), len(train_data)) + if ngpus_per_trial is not None and ngpus_per_trial > 1: + min_value = ngpus_per_trial + else: + min_value = 1 + bs = sanitize_batch_size(config.get('batch_size', 16), min_value=min_value, max_value=len(train_data)) config['batch_size'] = bs - if ngpus_per_trial is not None and ngpus_per_trial > 1 and bs < ngpus_per_trial: - # batch size must be larger than # gpus - config['ngpus_per_trial'] = bs # verbosity if log_level > logging.INFO: logging.getLogger('gluoncv.auto.tasks.image_classification').propagate = False diff --git a/vision/src/autogluon/vision/utils/__init__.py b/vision/src/autogluon/vision/utils/__init__.py --- a/vision/src/autogluon/vision/utils/__init__.py +++ b/vision/src/autogluon/vision/utils/__init__.py @@ -1,3 +1,4 @@ from .learning_rate import * from .plot_network import plot_network from .error_handler import MXNetErrorCatcher +from .space_sanitizer import sanitize_batch_size diff --git a/vision/src/autogluon/vision/utils/space_sanitizer.py b/vision/src/autogluon/vision/utils/space_sanitizer.py new file mode 100644 --- /dev/null +++ b/vision/src/autogluon/vision/utils/space_sanitizer.py @@ -0,0 +1,36 @@ +"""ag.Space sanitizer for certain hyperparameters""" +import warnings +import numpy as np +from autogluon.core import Categorical, Int + + +def sanitize_batch_size(batch_size, min_value=1, max_value=np.inf): + if isinstance(batch_size, Categorical): + valid_bs = [] + bs_values = batch_size.data + for bs_value in bs_values: + if isinstance(bs_value, int) and min_value < bs_value < max_value: + valid_bs.append(bs_value) + if valid_bs != bs_values: + warnings.warn(f'Pruning batch size from {batch_size} to {valid_bs} due to memory limit.') + if len(valid_bs) == 1: + new_bs = valid_bs[0] + else: + new_bs = Categorical(*valid_bs) + elif isinstance(batch_size, Int): + lower = batch_size.lower + upper = batch_size.upper + if not isinstance(lower, int) or not isinstance(upper, int): + raise TypeError(f'Invalid lower {lower} or upper {upper} bound for Int space') + lower = max(lower, min_value) + upper = min(upper, max_value) + new_bs = Int(lower=lower, upper=upper) + if lower != batch_size.lower or upper != batch_size.higher: + warnings.warn(f'Adjusting batch size range from {batch_size} to {new_bs} due to memory limit.') + elif isinstance(batch_size, int): + new_bs = max(min(batch_size, max_value), min_value) + if new_bs != batch_size: + warnings.warn(f'Adjusting batch size from {batch_size} to {new_bs} due to memory limit.') + else: + raise TypeError(f'Expecting batch size to be (Categorical/Int/int), given {type(batch_size)}.') + return new_bs
diff --git a/vision/tests/unittests/test_image_classification.py b/vision/tests/unittests/test_image_classification.py --- a/vision/tests/unittests/test_image_classification.py +++ b/vision/tests/unittests/test_image_classification.py @@ -64,3 +64,11 @@ def test_invalid_image_dataset(): df = ImagePredictor.Dataset.from_csv(os.path.join(invalid_test, 'train.csv'), root=os.path.join(invalid_test, 'train_images')) predictor = ImagePredictor(label="labels") predictor.fit(df, df.copy(), time_limit=60) + + +def test_image_predictor_presets(): + ImagePredictor = Task + train_dataset, _, test_dataset = ImagePredictor.Dataset.from_folders('https://autogluon.s3.amazonaws.com/datasets/shopee-iet.zip') + for preset in ['medium_quality_faster_train', 'medium_quality_faster_inference']: + predictor = ImagePredictor() + predictor.fit(train_dataset,tuning_data=test_dataset, presets=[preset], time_limit=60, hyperparameters={'epochs':1})
TypeError: '<' not supported between instances of 'int' and 'Categorical' I think after PR #1164 I can use 'medium_quality_faster_inference' preset because previously I am getting error #1141. But now I am getting a new error. Version: 0.2.1b20210612 Colab notebook https://colab.research.google.com/drive/1SshDbvdCSzvRpxDc5qRsvxM03NfKEUqp?usp=sharing
Thanks yet again for highlighting the issue, this is very valuable! @zhreshold The error is because there is code that is trying to ensure the batch size is valid via: ``` bs = min(config.get('batch_size', 16), len(train_data)) ``` However, this is done when `config['batch_size']` can still be represented as a search space i.e. `Categorical[64, 128]`. Therefore, whenever a search space is specified for `batch_size`, predictor will crash. This means every preset option crashes except for `medium_quality_faster_train` which does not search. This bug was introduced in #1113 and was not caught by our unit tests, @zhreshold could you add unit tests to test at least the two `medium_quality` settings with simplified hyperparameters (such as `'epochs': 2`)? We will also need to move the batch size check internally or make it smarter so it checks each option in search space and corrects/removes them if they are invalid.
2021-06-14T23:42:03Z
[]
[]
autogluon/autogluon
1,901
autogluon__autogluon-1901
[ "1892" ]
ea486a477cc39461704c7d71736dc258ec9029ce
diff --git a/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py b/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py --- a/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +++ b/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py @@ -127,6 +127,7 @@ def get_params(self) -> dict: prediction_length=self.prediction_length, quantile_levels=self.quantile_levels, metadata=self.metadata, + target=self.target, ) ) return params @@ -356,10 +357,10 @@ def _hyperparameter_tune( ) return skip_hpo(self, train_data, val_data, time_limit=time_limit) else: - logger.debug(f"Hyperparameter search space for {self.name}: ") + logger.debug(f"\tHyperparameter search space for {self.name}: ") for hyperparameter in search_space: if isinstance(search_space[hyperparameter], ag.Space): - logger.debug(f"{hyperparameter}: {search_space[hyperparameter]}") + logger.debug(f"\t{hyperparameter}: {search_space[hyperparameter]}") dataset_train_filename = "dataset_train.pkl" train_path = self.path + dataset_train_filename
diff --git a/timeseries/tests/unittests/test_predictor.py b/timeseries/tests/unittests/test_predictor.py --- a/timeseries/tests/unittests/test_predictor.py +++ b/timeseries/tests/unittests/test_predictor.py @@ -5,6 +5,7 @@ import pytest from gluonts.model.seq2seq import MQRNNEstimator +import autogluon.core as ag from autogluon.timeseries.dataset import TimeSeriesDataFrame from autogluon.timeseries.models import DeepARModel from autogluon.timeseries.models.gluonts.models import GenericGluonTSModelFactory @@ -13,9 +14,8 @@ from .common import DUMMY_TS_DATAFRAME TEST_HYPERPARAMETER_SETTINGS = [ - "toy", {"SimpleFeedForward": {"epochs": 1}}, - {"DeepAR": {"epochs": 1}, "SimpleFeedForward": {"epochs": 1}}, + {"AutoETS": {}, "SimpleFeedForward": {"epochs": 1}}, ] @@ -184,6 +184,57 @@ def test_given_hyperparameters_when_predictor_called_and_loaded_back_then_all_mo assert not np.any(np.isnan(predictions)) [email protected]("target_column", ["target", "custom"]) [email protected]( + "hyperparameters", + [ + {"AutoETS": {}, "SimpleFeedForward": {"epochs": 1}}, + {"AutoETS": {}, "SimpleFeedForward": {"epochs": ag.Int(1, 3)}}, + ], +) +def test_given_hp_spaces_and_custom_target_when_predictor_called_predictor_can_predict( + temp_model_path, hyperparameters, target_column +): + df = DUMMY_TS_DATAFRAME.rename(columns={"target": target_column}) + + fit_kwargs = dict( + train_data=df, + hyperparameters=hyperparameters, + tuning_data=df, + ) + init_kwargs = dict(path=temp_model_path, prediction_length=2) + if target_column != "target": + init_kwargs.update({"target": target_column}) + + for hps in hyperparameters.values(): + if any(isinstance(v, ag.Space) for v in hps.values()): + fit_kwargs.update( + { + "hyperparameter_tune_kwargs": { + "scheduler": "local", + "searcher": "random", + "num_trials": 2, + }, + } + ) + break + + predictor = TimeSeriesPredictor(**init_kwargs) + predictor.fit(**fit_kwargs) + + assert predictor.get_model_names() + + for model_name in predictor.get_model_names(): + predictions = predictor.predict(df, model=model_name) + + assert isinstance(predictions, TimeSeriesDataFrame) + + predicted_item_index = predictions.index.levels[0] + assert all(predicted_item_index == df.index.levels[0]) # noqa + assert all(len(predictions.loc[i]) == 2 for i in predicted_item_index) + assert not np.any(np.isnan(predictions)) + + @pytest.mark.parametrize("hyperparameters", TEST_HYPERPARAMETER_SETTINGS) def test_given_hyperparameters_when_predictor_called_and_loaded_back_then_loaded_learner_can_predict( temp_model_path, hyperparameters
[BUG] Presets in TimeSeriesPredictor are not working well. - [x] I have checked that this bug exists on the latest stable version of AutoGluon - [x] and/or I have checked that this bug exists on the latest mainline of AutoGluon via source installation **Describe the bug** I realized that only 2 preset options are working well. I tested the new release of TimeSeriesPredictor on different presets, i.e ("best_quality", "high_quality", "good_quality", "medium_quality", "low_quality", and "low_quality_hpo"). Only "medium_quality" and "low_quality" are working. I noticed the other presets forces hyperparameterization of the models, then, when it starts it breaks. **To Reproduce** 1) ```predictor = TimeSeriesPredictor( target="value", prediction_length=prediction_length, eval_metric="MAPE", verbosity=4) predictor.fit(train_data=test_data, presets ='best_quality')``` 2) ```predictor = TimeSeriesPredictor( target="value", prediction_length=prediction_length, eval_metric="MAPE", verbosity=4) predictor.fit(train_data=test_data, presets ='medium_quality')``` **Screenshots** 1)![image](https://user-images.githubusercontent.com/32521301/175453958-26c759cf-4584-48e1-9541-752a49d2ab0e.png) 2)![image](https://user-images.githubusercontent.com/32521301/175454034-d7151c62-4859-43be-a90a-d4e5124485d3.png) **Installed Versions** Which version of AutoGluon are you are using? 0.5.0
2022-06-26T14:38:38Z
[]
[]
autogluon/autogluon
2,705
autogluon__autogluon-2705
[ "2688" ]
7e2b38d3759ae0115387e4dcf619b1585b1eca3d
diff --git a/features/src/autogluon/features/generators/abstract.py b/features/src/autogluon/features/generators/abstract.py --- a/features/src/autogluon/features/generators/abstract.py +++ b/features/src/autogluon/features/generators/abstract.py @@ -168,7 +168,8 @@ def __init__( self._post_generators.append(DropDuplicatesFeatureGenerator(post_drop_duplicates=False)) if name_prefix or name_suffix: from .rename import RenameFeatureGenerator - self._post_generators.append(RenameFeatureGenerator(name_prefix=name_prefix, name_suffix=name_suffix, inplace=True)) + # inplace=False required to avoid altering outer context: refer to https://github.com/autogluon/autogluon/issues/2688 + self._post_generators.append(RenameFeatureGenerator(name_prefix=name_prefix, name_suffix=name_suffix, inplace=False)) if self._post_generators: if not self.get_tags().get('allow_post_generators', True): @@ -333,7 +334,8 @@ def transform(self, X: DataFrame) -> DataFrame: if col not in X.columns: missing_cols.append(col) raise KeyError(f'{len(missing_cols)} required columns are missing from the provided dataset to transform using {self.__class__.__name__}. ' - f'Missing columns: {missing_cols}') + f'{len(missing_cols)} missing columns: {missing_cols} | ' + f'{len(list(X.columns))} available columns: {list(X.columns)}') if self._pre_astype_generator: X = self._pre_astype_generator.transform(X) X_out = self._transform(X)
diff --git a/features/tests/features/generators/test_auto_ml_pipeline.py b/features/tests/features/generators/test_auto_ml_pipeline.py --- a/features/tests/features/generators/test_auto_ml_pipeline.py +++ b/features/tests/features/generators/test_auto_ml_pipeline.py @@ -111,3 +111,139 @@ def test_auto_ml_pipeline_feature_generator(generator_helper, data_helper): # text_ngram checks assert expected_output_data_feat_total == list(output_data['__nlp__._total_'].values) + + +def test_auto_ml_pipeline_feature_generator_raw_text(generator_helper, data_helper): + # Given + input_data = data_helper.generate_multi_feature_full() + + toy_vectorizer = CountVectorizer(min_df=2, ngram_range=(1, 3), max_features=10, dtype=np.uint8) + + generator = AutoMLPipelineFeatureGenerator(enable_raw_text_features=True, vectorizer=toy_vectorizer) + + for generator_stage in generator.generators: + for generator_inner in generator_stage: + if isinstance(generator_inner, TextNgramFeatureGenerator): + # Necessary in test to avoid CI non-deterministically pruning ngram counts. + generator_inner.max_memory_ratio = None + + expected_feature_metadata_in_full = { + ('category', ()): ['cat'], + ('datetime', ()): ['datetime'], + ('float', ()): ['float'], + ('int', ()): ['int'], + ('object', ()): ['obj'], + ('object', ('datetime_as_object',)): ['datetime_as_object'], + ('object', ('text',)): ['text'] + } + + expected_feature_metadata_full = { + ('category', ()): ['obj', 'cat'], + ('float', ()): ['float'], + ('int', ()): ['int'], + ('int', ('binned', 'text_special')): [ + 'text.char_count', + 'text.word_count', + 'text.lower_ratio', + 'text.special_ratio', + 'text.symbol_ratio. ' + ], + ('int', ('datetime_as_int',)): [ + 'datetime', + 'datetime.year', + 'datetime.month', + 'datetime.day', + 'datetime.dayofweek', + 'datetime_as_object', + 'datetime_as_object.year', + 'datetime_as_object.month', + 'datetime_as_object.day', + 'datetime_as_object.dayofweek' + ], + ('int', ('text_ngram',)): [ + '__nlp__.breaks', + '__nlp__.end', + '__nlp__.end of', + '__nlp__.end of the', + '__nlp__.of', + '__nlp__.sentence', + '__nlp__.sentence breaks', + '__nlp__.the', + '__nlp__.the end', + '__nlp__.world', + '__nlp__._total_' + ], + ('object', ('text',)): [ + 'text_raw_text' + ], + } + + # When + output_data = generator_helper.fit_transform_assert( + input_data=input_data, + generator=generator, + expected_feature_metadata_in_full=expected_feature_metadata_in_full, + expected_feature_metadata_full=expected_feature_metadata_full, + ) + + assert list(input_data['text'].values) == list(output_data['text_raw_text'].values) + + +def test_auto_ml_pipeline_feature_generator_only_raw_text(generator_helper, data_helper): + """ + Specifically tests when only text columns are provided. + This verifies the edge-case bug in v0.6.2 from https://github.com/autogluon/autogluon/issues/2688 is not present. + """ + + # Given + input_data = data_helper.generate_text_feature().to_frame('text') + + toy_vectorizer = CountVectorizer(min_df=2, ngram_range=(1, 3), max_features=10, dtype=np.uint8) + + generator = AutoMLPipelineFeatureGenerator(enable_raw_text_features=True, vectorizer=toy_vectorizer) + + for generator_stage in generator.generators: + for generator_inner in generator_stage: + if isinstance(generator_inner, TextNgramFeatureGenerator): + # Necessary in test to avoid CI non-deterministically pruning ngram counts. + generator_inner.max_memory_ratio = None + + expected_feature_metadata_in_full = { + ('object', ('text',)): ['text'] + } + + expected_feature_metadata_full = { + ('int', ('binned', 'text_special')): [ + 'text.char_count', + 'text.word_count', + 'text.lower_ratio', + 'text.special_ratio', + 'text.symbol_ratio. ' + ], + ('int', ('text_ngram',)): [ + '__nlp__.breaks', + '__nlp__.end', + '__nlp__.end of', + '__nlp__.end of the', + '__nlp__.of', + '__nlp__.sentence', + '__nlp__.sentence breaks', + '__nlp__.the', + '__nlp__.the end', + '__nlp__.world', + '__nlp__._total_' + ], + ('object', ('text',)): [ + 'text_raw_text' + ], + } + + # When + output_data = generator_helper.fit_transform_assert( + input_data=input_data, + generator=generator, + expected_feature_metadata_in_full=expected_feature_metadata_in_full, + expected_feature_metadata_full=expected_feature_metadata_full, + ) + + assert list(input_data['text'].values) == list(output_data['text_raw_text'].values)
[BUG] KeyError: "1 required columns are missing from the provided dataset to transform using CategoryFeatureGenerator. Missing columns: ['Product_Description']" [please refer to my colab file to see the bug in details: https://colab.research.google.com/drive/1IbQeeavwM7wNSghw0B2kWBsE-RHHHUMU?usp=sharing] ====================================================================== Bug description: I am refering multimal tutorials to train a dataset with only text features.(refering page: https://auto.gluon.ai/stable/tutorials/tabular_prediction/tabular-multimodal-text-others.html) here are my settings: from autogluon.tabular import TabularPredictor predictor = TabularPredictor(label='Sentiment', path='./model',problem_type='multiclass',eval_metric="f1_micro",verbosity=4) predictor.fit(train_df,hyperparameters='multimodal', time_limit=None,presets=['best_quality'],auto_stack=True,refit_full=True,set_best_to_refit_full=True,keep_only_best=True,save_space=True,num_bag_sets=1, num_gpus=1) While training is going well, it seems that testing meets bugs: KeyError: "1 required columns are missing from the provided dataset to transform using CategoryFeatureGenerator. Missing columns: ['Product_Description']" here ['Product_Description']" is the feature of testing set. However, I promise the testing set include this feature. Can anyone give a hand?
Besides, this bug also seems strange: Fitting model: TextPredictor_BAG_L1 ... Dropped 262 of 263 features. Warning: Exception caused TextPredictor_BAG_L1 to fail during training... Skipping this model. module 'PIL.Image' has no attribute 'Resampling' how can PIL.Image influence TextPredictor...
2023-01-16T21:13:17Z
[]
[]
autogluon/autogluon
2,715
autogluon__autogluon-2715
[ "2674" ]
b18eaffb97df0c5a483d59189e00f07b1a722044
diff --git a/core/src/autogluon/core/trainer/abstract_trainer.py b/core/src/autogluon/core/trainer/abstract_trainer.py --- a/core/src/autogluon/core/trainer/abstract_trainer.py +++ b/core/src/autogluon/core/trainer/abstract_trainer.py @@ -95,6 +95,7 @@ def __init__(self, path: str, problem_type: str, eval_metric=None, self._num_rows_train = None self._num_cols_train = None + self._num_rows_val = None self.is_data_saved = False self._X_saved = False @@ -128,6 +129,11 @@ def _path_attr(self) -> str: def path_data(self) -> str: return self.path_utils + 'data' + os.path.sep + @property + def has_val(self) -> bool: + """Whether the trainer uses validation data""" + return self._num_rows_val is not None + def load_X(self): if self._X_saved: path = self.path_data + 'X.pkl' @@ -1968,7 +1974,7 @@ def _train_multi_and_ensemble(self, X, y, X_val, y_val, hyperparameters: dict = self._groups = groups self._num_rows_train = len(X) if X_val is not None: - self._num_rows_train += len(X_val) + self._num_rows_val = len(X_val) self._num_cols_train = len(list(X.columns)) model_names_fit = self.train_multi_levels(X, y, hyperparameters=hyperparameters, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled, level_start=1, level_end=num_stack_levels+1, time_limit=time_limit, **kwargs) @@ -2455,6 +2461,7 @@ def get_info(self, include_model_info=False) -> dict: time_train_start = self._time_train_start num_rows_train = self._num_rows_train num_cols_train = self._num_cols_train + num_rows_val = self._num_rows_val num_classes = self.num_classes # TODO: # Disk size of models @@ -2472,6 +2479,7 @@ def get_info(self, include_model_info=False) -> dict: 'time_train_start': time_train_start, 'num_rows_train': num_rows_train, 'num_cols_train': num_cols_train, + 'num_rows_val': num_rows_val, 'num_classes': num_classes, 'problem_type': problem_type, 'eval_metric': eval_metric, @@ -2939,13 +2947,13 @@ def calibrate_model(self, model_name: str = None, lr: float = 0.01, max_iter: in if m_full == model_name: model_name_og = m break - if self.bagged_mode: - y_val_probs = self.get_model_oof(model_name_og) - y_val = self.load_y().to_numpy() - else: + if self.has_val: X_val = self.load_X_val() y_val_probs = self.predict_proba(X_val, model_name_og) y_val = self.load_y_val().to_numpy() + else: # bagged mode + y_val_probs = self.get_model_oof(model_name_og) + y_val = self.load_y().to_numpy() if self.problem_type == BINARY: # Convert one-dimensional array to be in the form of a 2-class multiclass predict_proba output
diff --git a/tabular/tests/unittests/edgecases/test_edgecases.py b/tabular/tests/unittests/edgecases/test_edgecases.py --- a/tabular/tests/unittests/edgecases/test_edgecases.py +++ b/tabular/tests/unittests/edgecases/test_edgecases.py @@ -93,3 +93,29 @@ def test_num_folds_hpo(fit_helper): assert leaderboard.iloc[0]['num_models'] == 4 assert leaderboard.iloc[1]['num_models'] == 4 shutil.rmtree(predictor.path, ignore_errors=True) + + +def test_use_bag_holdout_calibrate(fit_helper): + """ + Test that use_bag_holdout=True works for calibration + Ensures the bug is fixed in https://github.com/autogluon/autogluon/issues/2674 + """ + init_args = dict( + eval_metric='log_loss' + ) + + fit_args = dict( + hyperparameters={'DUMMY': {}}, + num_bag_folds=2, + use_bag_holdout=True, + calibrate=True, + ) + + dataset_name = 'adult' + fit_helper.fit_and_validate_dataset( + dataset_name=dataset_name, + init_args=init_args, + fit_args=fit_args, + expected_model_count=2, + refit_full=False, + )
[BUG] Exception during fitting in calibrate_model() when using custom metric with needs_proba=True - [ ] I have checked that this bug exists on the latest stable version of AutoGluon - [ ] and/or I have checked that this bug exists on the latest mainline of AutoGluon via source installation **Describe the bug** At the end of training an exception is raised during `calibrate_model()` when using a custom metric with `needs_proba=True` I'm using version 0.6.0 (I have a different reported issue with 0.6.1). **Expected behavior** No exception **To Reproduce** ```python import numpy as np from autogluon.core.metrics import make_scorer from autogluon.tabular import TabularDataset, TabularPredictor def custom_score(y_true, y_proba): pred_true_np = y_true[y_proba >= 0.6] pred_false_np = y_true[y_proba <= 0.4] pred_true_correct_count = np.count_nonzero(pred_true_np) pred_false_correct_count = pred_false_np.size - np.count_nonzero(pred_false_np) pred_count = pred_true_np.size + pred_false_np.size pred_correct_count = pred_true_correct_count + pred_false_correct_count if pred_count == 0: return 0.5 score = pred_correct_count / pred_count return score metric = make_scorer( name="custom_score", score_func=custom_score, optimum=1, greater_is_better=True, needs_proba=True, ) train_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv') test_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv') predictor = TabularPredictor(label='class', eval_metric=metric).fit(train_data=train_data, time_limit=60) ``` **Logs** ```python Beginning AutoGluon training ... Time limit = 60s AutoGluon Version: 0.6.0 Python Version: 3.9.13 Operating System: Windows Platform Machine: AMD64 Platform Version: 10.0.19045 Train Data Rows: 39073 Train Data Columns: 14 Label Column: class Preprocessing data ... AutoGluon infers your prediction problem is: 'binary' (because only two unique label-values observed). 2 unique label values: [' <=50K', ' >50K'] If 'binary' is not the correct problem_type, please manually specify the problem_type parameter during predictor init (You may specify problem_type as one of: ['binary', 'multiclass', 'regression']) Selected class <--> label mapping: class 1 = >50K, class 0 = <=50K Note: For your binary classification, AutoGluon arbitrarily selected which label-value represents positive ( >50K) vs negative ( <=50K) class. To explicitly set the positive_class, either rename classes to 1 and 0, or specify positive_class in Predictor init. Using Feature Generators to preprocess the data ... Fitting AutoMLPipelineFeatureGenerator... Inferring data type of each feature based on column values. Set feature_metadata_in to manually specify special dtypes of the features. Stage 1 Generators: Fitting AsTypeFeatureGenerator... Note: Converting 1 features to boolean dtype as they only contain 2 unique values. Stage 2 Generators: Fitting FillNaFeatureGenerator... Stage 3 Generators: Fitting IdentityFeatureGenerator... Fitting CategoryFeatureGenerator... Fitting CategoryMemoryMinimizeFeatureGenerator... Stage 4 Generators: Fitting DropUniqueFeatureGenerator... Types of features in original data (raw dtype, special dtypes): ('int', []) : 6 | ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', ...] ('object', []) : 8 | ['workclass', 'education', 'marital-status', 'occupation', 'relationship', ...] Types of features in processed data (raw dtype, special dtypes): ('category', []) : 7 | ['workclass', 'education', 'marital-status', 'occupation', 'relationship', ...] ('int', []) : 6 | ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', ...] ('int', ['bool']) : 1 | ['sex'] 0.4s = Fit runtime 14 features in original data used to generate 14 features in processed data. Data preprocessing and feature engineering runtime = 0.45s ... AutoGluon will gauge predictive performance using evaluation metric: 'custom_score' This metric expects predicted probabilities rather than predicted class labels, so you'll need to use predict_proba() instead of predict() To change this, specify the eval_metric parameter of Predictor() Automatically generating train/validation split with holdout_frac=0.0639828014229775, Train Rows: 36573, Val Rows: 2500 Fitting 13 L1 models ... Fitting model: KNeighborsUnif ... Training model for up to 59.55s of the 59.55s of remaining time. 0.7752 = Validation score (custom_score) 1.66s = Training runtime 0.01s = Validation runtime Fitting model: KNeighborsDist ... Training model for up to 57.87s of the 57.87s of remaining time. 0.7932 = Validation score (custom_score) 0.05s = Training runtime 0.02s = Validation runtime Fitting model: LightGBMXT ... Training model for up to 57.79s of the 57.78s of remaining time. 0.9151 = Validation score (custom_score) 3.95s = Training runtime 0.05s = Validation runtime Fitting model: LightGBM ... Training model for up to 53.71s of the 53.7s of remaining time. 0.919 = Validation score (custom_score) 1.21s = Training runtime 0.02s = Validation runtime Fitting model: RandomForestGini ... Training model for up to 52.46s of the 52.46s of remaining time. 0.8937 = Validation score (custom_score) 3.86s = Training runtime 0.18s = Validation runtime Fitting model: RandomForestEntr ... Training model for up to 47.89s of the 47.89s of remaining time. 0.8966 = Validation score (custom_score) 4.65s = Training runtime 0.18s = Validation runtime Fitting model: CatBoost ... Training model for up to 42.59s of the 42.58s of remaining time. Ran out of time, early stopping on iteration 487. 0.9161 = Validation score (custom_score) 42.64s = Training runtime 0.02s = Validation runtime Fitting model: WeightedEnsemble_L2 ... Training model for up to 59.55s of the -0.13s of remaining time. 0.9202 = Validation score (custom_score) 0.34s = Training runtime 0.0s = Validation runtime AutoGluon training complete, total runtime = 60.55s ... Best model: "WeightedEnsemble_L2" Traceback (most recent call last): File "test.py", line 32, in <module> predictor = TabularPredictor(label='class', eval_metric=metric).fit(train_data=train_data, time_limit=60) File ".venv\lib\site-packages\autogluon\core\utils\decorators.py", line 30, in _call return f(*gargs, **gkwargs) File ".venv\lib\site-packages\autogluon\tabular\predictor\predictor.py", line 868, in fit self._post_fit( File ".venv\lib\site-packages\autogluon\tabular\predictor\predictor.py", line 921, in _post_fit self._trainer.calibrate_model() File ".venv\lib\site-packages\autogluon\core\trainer\abstract_trainer.py", line 2953, in calibrate_model temp_scalar = tune_temperature_scaling(y_val_probs=y_val_probs, y_val=y_val, File ".venv\lib\site-packages\autogluon\core\calibrate\temperature_scaling.py", line 57, in tune_temperature_scaling optimizer.step(temperature_scale_step) File ".venv\lib\site-packages\torch\optim\lr_scheduler.py", line 65, in wrapper return wrapped(*args, **kwargs) File ".venv\lib\site-packages\torch\optim\optimizer.py", line 113, in wrapper return func(*args, **kwargs) File ".venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File ".venv\lib\site-packages\torch\optim\lbfgs.py", line 311, in step orig_loss = closure() File ".venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File ".venv\lib\site-packages\autogluon\core\calibrate\temperature_scaling.py", line 52, in temperature_scale_step loss = nll_criterion(new_logits, y_val_tensor) File ".venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl return forward_call(*input, **kwargs) File ".venv\lib\site-packages\torch\nn\modules\loss.py", line 1164, in forward return F.cross_entropy(input, target, weight=self.weight, File ".venv\lib\site-packages\torch\nn\functional.py", line 3014, in cross_entropy return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing) RuntimeError: expected scalar type Long but found Int ``` **Installed Versions** autogluon-0.6.0 **Additional context** In the actual training I'm doing I get a slightly different exception than above: ```python Traceback (most recent call last): File "ag.py", line 79, in train predictor.fit(train_data) File ".venv\lib\site-packages\autogluon\core\utils\decorators.py", line 30, in _call return f(*gargs, **gkwargs) File ".venv\lib\site-packages\autogluon\tabular\predictor\predictor.py", line 868, in fit self._post_fit( File ".venv\lib\site-packages\autogluon\tabular\predictor\predictor.py", line 921, in _post_fit self._trainer.calibrate_model() File ".venv\lib\site-packages\autogluon\core\trainer\abstract_trainer.py", line 2953, in calibrate_model temp_scalar = tune_temperature_scaling(y_val_probs=y_val_probs, y_val=y_val, File ".venv\lib\site-packages\autogluon\core\calibrate\temperature_scaling.py", line 57, in tune_temperature_scaling optimizer.step(temperature_scale_step) File ".venv\lib\site-packages\torch\optim\lr_scheduler.py", line 65, in wrapper return wrapped(*args, **kwargs) File ".venv\lib\site-packages\torch\optim\optimizer.py", line 113, in wrapper return func(*args, **kwargs) File ".venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File ".venv\lib\site-packages\torch\optim\lbfgs.py", line 311, in step orig_loss = closure() File ".venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File ".venv\lib\site-packages\autogluon\core\calibrate\temperature_scaling.py", line 50, in temperature_scale_step temp = temperature_param.unsqueeze(1).expand(logits.size(0), logits.size(1)) IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) ```
Seems to work correctly if I call `make_scorer(... needs_threshold=True)` as in the docs instead of `make_scorer(... needs_proba=True)` which I assumed is more correct for my case. Fixed in 0.6.2 I spoke too soon, it still happens in 0.6.2 if I use `make_scorer(... needs_proba=True)` but not on the simple repro from above. As a workaround I use `make_scorer(... needs_threshold=True)`. Feel free to close if usage of `needs_proba=True` is not supported, it's unclear to me what's the correct way to create a scorer which needs `proba`. ```python Traceback (most recent call last): File "ag.py", line 79, in train _PREDICTOR.fit(train_data, **fit_params, **fit_params_extra) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/autogluon/core/utils/decorators.py", line 30, in _call return f(*gargs, **gkwargs) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/autogluon/tabular/predictor/predictor.py", line 867, in fit self._post_fit( File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/autogluon/tabular/predictor/predictor.py", line 920, in _post_fit self._trainer.calibrate_model() File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/autogluon/core/trainer/abstract_trainer.py", line 2954, in calibrate_model temp_scalar = tune_temperature_scaling(y_val_probs=y_val_probs, y_val=y_val, File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/autogluon/core/calibrate/temperature_scaling.py", line 57, in tune_temperature_scaling optimizer.step(temperature_scale_step) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/optim/lr_scheduler.py", line 65, in wrapper return wrapped(*args, **kwargs) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/optim/optimizer.py", line 113, in wrapper return func(*args, **kwargs) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/optim/lbfgs.py", line 311, in step orig_loss = closure() File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/autogluon/core/calibrate/temperature_scaling.py", line 52, in temperature_scale_step loss = nll_criterion(new_logits, y_val_tensor) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl return forward_call(*input, **kwargs) File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/nn/modules/loss.py", line 1164, in forward return F.cross_entropy(input, target, weight=self.weight, File "/home/studio-lab-user/.conda/envs/exprun/lib/python3.9/site-packages/torch/nn/functional.py", line 3014, in cross_entropy return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing) ValueError: Expected input batch_size (229) to match target batch_size (1116). ``` @2-5 for the bug present in v0.6.2, could you provide a minimal example of it occurring? Will help me deep dive. Note: setting `needs_threshold=True` simply disables calibration from occurring, which is why it doesn't error. It seems to happen when bagging is used with `tuning_data` and `use_bag_holdout`. ```python ValueError: Expected input batch_size (200) to match target batch_size (1200). ``` 200 is the `tuning_data` row count, and 1200 is the `train_data` row count. I couldn't get it to reproduce with the autogluon sample CSV (autogluon.s3.amazonaws.com/datasets/Inc/train.csv) with the same row counts, so there is some sort of data shape/distribution dependency. Reproduction (with the below `train.csv`): [train.csv](https://github.com/autogluon/autogluon/files/10435637/train.csv) ```python import numpy as np from autogluon.core.metrics import make_scorer from autogluon.tabular import TabularDataset, TabularPredictor def custom_score(y_true, y_proba): true_np = y_true[y_proba >= 0.6] if true_np.size == 0: return 0.5 return np.count_nonzero(true_np) / true_np.size metric = make_scorer( name="custom_score", score_func=custom_score, optimum=1, greater_is_better=True, needs_proba=True, ) train_data = TabularDataset("train.csv") tuning_data = train_data[:200] predictor = TabularPredictor(label="y", eval_metric=metric) predictor.fit( train_data=train_data, tuning_data=tuning_data, num_bag_folds=8, use_bag_holdout=True, ag_args_ensemble=dict( fold_fitting_strategy="sequential_local", ), time_limit=60, ) ``` Exception: ```python Traceback (most recent call last): File "test_proba.py", line 28, in <module> predictor.fit( File ".\site-packages\autogluon\core\utils\decorators.py", line 30, in _call return f(*gargs, **gkwargs) File ".\site-packages\autogluon\tabular\predictor\predictor.py", line 867, in fit self._post_fit( File ".\site-packages\autogluon\tabular\predictor\predictor.py", line 920, in _post_fit self._trainer.calibrate_model() File ".\site-packages\autogluon\core\trainer\abstract_trainer.py", line 2954, in calibrate_model temp_scalar = tune_temperature_scaling(y_val_probs=y_val_probs, y_val=y_val, File ".\site-packages\autogluon\core\calibrate\temperature_scaling.py", line 57, in tune_temperature_scaling optimizer.step(temperature_scale_step) File ".\site-packages\torch\optim\lr_scheduler.py", line 65, in wrapper return wrapped(*args, **kwargs) File ".\site-packages\torch\optim\optimizer.py", line 113, in wrapper return func(*args, **kwargs) File ".\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File ".\site-packages\torch\optim\lbfgs.py", line 311, in step orig_loss = closure() File ".\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File ".\site-packages\autogluon\core\calibrate\temperature_scaling.py", line 52, in temperature_scale_step loss = nll_criterion(new_logits, y_val_tensor) File ".\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl return forward_call(*input, **kwargs) File ".\site-packages\torch\nn\modules\loss.py", line 1164, in forward return F.cross_entropy(input, target, weight=self.weight, File ".\site-packages\torch\nn\functional.py", line 3014, in cross_entropy return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing) ValueError: Expected input batch_size (200) to match target batch_size (1200). ``` Thanks a ton for the example! I was able to reproduce into a minimal example: ```python3 from autogluon.tabular import TabularDataset, TabularPredictor train_data = TabularDataset("train.csv") tuning_data = train_data[:200] predictor = TabularPredictor(label="y", eval_metric='log_loss') predictor.fit( train_data=train_data, tuning_data=tuning_data, num_bag_folds=2, use_bag_holdout=True, hyperparameters={'GBM': {}} ) ``` Working on a fix currently
2023-01-17T20:24:15Z
[]
[]
autogluon/autogluon
3,033
autogluon__autogluon-3033
[ "3031" ]
8a391cde79a836e6a5069fa3e9efae8973b18310
diff --git a/core/src/autogluon/core/constants.py b/core/src/autogluon/core/constants.py --- a/core/src/autogluon/core/constants.py +++ b/core/src/autogluon/core/constants.py @@ -19,6 +19,7 @@ AG_ARGS = 'ag_args' # Contains arguments to control model name, model priority, and the valid configurations which it can be used in. AG_ARGS_FIT = 'ag_args_fit' # Contains arguments that impact model training, such as early stopping rounds, #cores, #gpus, max time limit, max memory usage # TODO AG_ARGS_ENSEMBLE = 'ag_args_ensemble' # Contains arguments that impact model ensembling, such as if an ensemble model is allowed to use the original features. # TODO: v0.1 add to documentation +AG_ARG_PREFIX = 'ag.' # Prefix to add to a hyperparameter to indicate it is an aux param for ag_args_fit. OBJECTIVES_TO_NORMALIZE = ['log_loss', 'pac_score', 'soft_log_loss'] # do not like predicted probabilities = 0 diff --git a/core/src/autogluon/core/models/abstract/abstract_model.py b/core/src/autogluon/core/models/abstract/abstract_model.py --- a/core/src/autogluon/core/models/abstract/abstract_model.py +++ b/core/src/autogluon/core/models/abstract/abstract_model.py @@ -8,7 +8,7 @@ from autogluon.core.utils import try_import import sys import time -from typing import Dict, Optional, Union +from typing import Any, Dict, Optional, Union import numpy as np import pandas as pd @@ -24,7 +24,7 @@ from .model_trial import model_trial, skip_hpo from ._tags import _DEFAULT_CLASS_TAGS, _DEFAULT_TAGS from ... import metrics, Space -from ...constants import AG_ARGS_FIT, BINARY, REGRESSION, QUANTILE, REFIT_FULL_SUFFIX, OBJECTIVES_TO_NORMALIZE +from ...constants import AG_ARG_PREFIX, AG_ARGS_FIT, BINARY, REGRESSION, QUANTILE, REFIT_FULL_SUFFIX, OBJECTIVES_TO_NORMALIZE from ...data.label_cleaner import LabelCleaner, LabelCleanerMulticlassToBinary from ...hpo.exceptions import EmptySearchSpace from ...hpo.constants import RAY_BACKEND, CUSTOM_BACKEND @@ -137,21 +137,10 @@ def __init__(self, self.compile_time = None # Time taken to compile the model in seconds self.val_score = None # Score with eval_metric (Validation data) + self._user_params, self._user_params_aux = self._init_user_params(params=hyperparameters) + self.params = {} self.params_aux = {} - - if hyperparameters is not None: - hyperparameters = hyperparameters.copy() - if hyperparameters is not None and AG_ARGS_FIT in hyperparameters: - self._user_params_aux = hyperparameters.pop(AG_ARGS_FIT) # TODO: Delete after initialization? - else: - self._user_params_aux = None - if self._user_params_aux is None: - self._user_params_aux = dict() - self._user_params = hyperparameters # TODO: Delete after initialization? - if self._user_params is None: - self._user_params = dict() - self.params_trained = dict() self._is_initialized = False self._is_fit_metadata_registered = False @@ -159,6 +148,70 @@ def __init__(self, self._compiler = None + @classmethod + def _init_user_params(cls, params: Optional[Dict[str, Any]] = None, ag_args_fit: str = AG_ARGS_FIT, ag_arg_prefix: str = AG_ARG_PREFIX) -> (Dict[str, Any], Dict[str, Any]): + """ + Given the user-specified hyperparameters, split into `params` and `params_aux`. + + Parameters + ---------- + params : Optional[Dict[str, Any]], default = None + The model hyperparameters dictionary + ag_args_fit : str, default = "ag_args_fit" + The params key to look for that contains params_aux. + If the key is present, the value is used for params_aux and popped from params. + If no such key is found, then initialize params_aux as an empty dictionary. + ag_arg_prefix : str, default = "ag." + The key prefix to look for that indicates a parameter is intended for params_aux. + If None, this logic is skipped. + If a key starts with this prefix, it is popped from params and added to params_aux with the prefix removed. + For example: + input: params={'ag.foo': 2, 'abc': 7}, params_aux={'bar': 3}, and ag_arg_prefix='.ag', + output: params={'abc': 7}, params_aux={'bar': 3, 'foo': 2} + In cases where the key is specified multiple times, the value of the key with the prefix will always take priority. + A warning will be logged if a key is present multiple times. + For example, given the most complex scenario: + input: params={'ag.foo': 1, 'foo': 2, 'ag_args_fit': {'ag.foo': 3, 'foo': 4}} + output: params={'foo': 2}, params_aux={'foo': 1} + + Returns + ------- + params, params_aux : (Dict[str, Any], Dict[str, Any]) + params will contain the native model hyperparameters + params_aux will contain special auxiliary hyperparameters + """ + params = copy.deepcopy(params) if params is not None else dict() + assert isinstance(params, dict), f"Invalid dtype of params! Expected dict, but got {type(params)}" + for k in params.keys(): + if not isinstance(k, str): + logger.warning(f'Warning: Specified {cls.__name__} hyperparameter key is not of type str: {k} (type={type(k)}). ' + f'There might be a bug in your configuration.') + + params_aux = params.pop(ag_args_fit, dict()) + if params_aux is None: + params_aux = dict() + assert isinstance(params_aux, dict), f"Invalid dtype of params_aux! Expected dict, but got {type(params_aux)}" + if ag_arg_prefix is not None: + param_aux_keys = list(params_aux.keys()) + for k in param_aux_keys: + if isinstance(k, str) and k.startswith(ag_arg_prefix): + k_no_prefix = k[len(ag_arg_prefix):] + if k_no_prefix in params_aux: + logger.warning(f'Warning: {cls.__name__} hyperparameter "{k}" is present ' + f'in `ag_args_fit` as both "{k}" and "{k_no_prefix}". ' + f'Will use "{k}" and ignore "{k_no_prefix}".') + params_aux[k_no_prefix] = params_aux.pop(k) + param_keys = list(params.keys()) + for k in param_keys: + if isinstance(k, str) and k.startswith(ag_arg_prefix): + k_no_prefix = k[len(ag_arg_prefix):] + if k_no_prefix in params_aux: + logger.warning(f'Warning: {cls.__name__} hyperparameter "{k}" is present ' + f'in both `ag_args_fit` and `hyperparameters`. ' + f'Will use `hyperparameters` value.') + params_aux[k_no_prefix] = params.pop(k) + return params, params_aux + def _init_params(self): """Initializes model hyperparameters""" hyperparameters = self._user_params @@ -694,7 +747,7 @@ def fit(self, **kwargs): kwargs = self.initialize(**kwargs) # FIXME: This might have to go before self._preprocess_fit_args, but then time_limit might be incorrect in **kwargs init to initialize kwargs = self._preprocess_fit_args(**kwargs) if 'time_limit' in kwargs and kwargs['time_limit'] is not None and kwargs['time_limit'] <= 0: - logger.warning(f'\tWarning: Model has no time left to train, skipping model... (Time Left = {round(kwargs["time_limit"], 1)}s)') + logger.warning(f'\tWarning: Model has no time left to train, skipping model... (Time Left = {kwargs["time_limit"]:.1f}s)') raise TimeLimitExceeded self._register_fit_metadata(**kwargs) @@ -1435,28 +1488,89 @@ def get_minimum_resources(self, is_gpu_available=False) -> Dict[str, int]: 'num_cpus': 1, } - def _estimate_memory_usage(self, X, **kwargs) -> int: + def _estimate_memory_usage(self, X: pd.DataFrame, **kwargs) -> int: """ - This method simply provides a default implementation. Each model should consider implementing custom memory estimate logic. + Estimates the peak memory usage during model fitting. + This method simply provides a default implementation. Each model should consider implementing custom memory estimation logic. + + Parameters + ---------- + X : pd.DataFrame, + The training data intended to fit the model with. + **kwargs : dict, + The `.fit` kwargs. + Can optionally be used by custom implementations to better estimate memory usage. + To best understand what kwargs are available, enter a debugger and put a breakpoint in this method to manually inspect the keys. + + Returns + ------- + The estimated peak memory usage in bytes during model fit. """ return 4 * get_approximate_df_mem_usage(X).sum() @disable_if_lite_mode() - def _validate_fit_memory_usage(self, **kwargs): + def _validate_fit_memory_usage(self, mem_error_threshold: float = 0.9, mem_warning_threshold: float = 0.75, mem_size_threshold: int = None, **kwargs): + """ + Asserts that enough memory is available to fit the model + + If not enough memory, will raise NotEnoughMemoryError + Memory thresholds depend on the `params_aux` hyperparameter `max_memory_usage_ratio`, which generally defaults to 1. + if `max_memory_usage_ratio=None`, all memory checks are skipped. + + Parameters + ---------- + mem_error_threshold : float, default = 0.9 + A multiplier to max_memory_usage_ratio to get the max_memory_usage_error_ratio + If expected memory usage is >max_memory_usage_error_ratio, raise NotEnoughMemoryError + mem_warning_threshold : float, default = 0.75 + A multiplier to max_memory_usage_ratio to get the max_memory_usage_warning_ratio + If expected memory usage is >max_memory_usage_error_ratio, raise NotEnoughMemoryError + mem_size_threshold : int, default = None + If not None, skips checking available memory if the expected model size is less than `mem_size_threshold` bytes. + This is used to speed-up training by avoiding the check in cases where the machine almost certainly has sufficient memory. + **kwargs : dict, + Fit time kwargs, including X, y, X_val, and y_val. + Can be used to customize estimation of memory usage. + """ max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] + if max_memory_usage_ratio is None: + return # Skip memory check + approx_mem_size_req = self.estimate_memory_usage(**kwargs) + if mem_size_threshold is not None and approx_mem_size_req < (mem_size_threshold * min(max_memory_usage_ratio, 1)): + return # Model is smaller than the min threshold to check available mem + available_mem = ResourceManager.get_available_virtual_mem() ratio = approx_mem_size_req / available_mem - if ratio > (0.9 * max_memory_usage_ratio): - logger.warning('\tWarning: Not enough memory to safely train model, roughly requires: %s GB, but only %s GB is available...' % (round(approx_mem_size_req / 1e9, 3), round(available_mem / 1e9, 3))) + min_error_memory_ratio = ratio / mem_error_threshold + min_warning_memory_ratio = ratio / mem_warning_threshold + max_memory_usage_error_ratio = mem_error_threshold * max_memory_usage_ratio + max_memory_usage_warning_ratio = mem_warning_threshold * max_memory_usage_ratio + + log_ag_args_fit_example = '`predictor.fit(..., ag_args_fit={"ag.max_memory_usage_ratio": VALUE})`' + log_ag_args_fit_example = f'\n\t\tTo set the same value for all models, do the following when calling predictor.fit: {log_ag_args_fit_example}' + + log_user_guideline = f'Estimated to require {approx_mem_size_req / 1e9:.3f} GB ' \ + f'out of {available_mem / 1e9:.3f} GB available memory ({min_error_memory_ratio*100:.3f}%)... ' \ + f'({max_memory_usage_error_ratio*100:.3f}% of avail memory is the max safe size)' + if min_error_memory_ratio > max_memory_usage_error_ratio: + log_user_guideline += f'\n\tTo force training the model, specify the model hyperparameter "ag.max_memory_usage_ratio" to a larger value ' \ + f'(currently {max_memory_usage_ratio}, set to >={min_error_memory_ratio + 0.05:.2f} to avoid the error)' \ + f'{log_ag_args_fit_example}' + if min_error_memory_ratio >= 1: + log_user_guideline += f'\n\t\tSetting "ag.max_memory_usage_ratio" to values above 1 may result in out-of-memory errors. ' \ + f'You may consider using a machine with more memory as a safer alternative.' + logger.warning(f'\tWarning: Not enough memory to safely train model. {log_user_guideline}') raise NotEnoughMemoryError - elif ratio > (0.6 * max_memory_usage_ratio): - logger.warning('\tWarning: Potentially not enough memory to safely train model, roughly requires: %s GB, but only %s GB is available...' % (round(approx_mem_size_req / 1e9, 3), round(available_mem / 1e9, 3))) + elif min_warning_memory_ratio > max_memory_usage_warning_ratio: + log_user_guideline += f'\n\tTo avoid this warning, specify the model hyperparameter "ag.max_memory_usage_ratio" to a larger value ' \ + f'(currently {max_memory_usage_ratio}, set to >={min_warning_memory_ratio + 0.05:.2f} to avoid the warning)' \ + f'{log_ag_args_fit_example}' + if min_warning_memory_ratio >= 1: + log_user_guideline += f'\n\t\tSetting "ag.max_memory_usage_ratio" to values above 1 may result in out-of-memory errors. ' \ + f'You may consider using a machine with more memory as a safer alternative.' + logger.warning(f'\tWarning: Potentially not enough memory to safely train model. {log_user_guideline}') - # Removes non-essential objects from the model to reduce memory and disk footprint. - # If `remove_fit=True`, enables the removal of variables which are required for fitting the model. If the model is already fully trained, then it is safe to remove these. - # If `remove_info=True`, enables the removal of variables which are used during model.get_info(). The values will be None when calling model.get_info(). - # If `requires_save=True`, enables the removal of variables which are part of the model.pkl object, requiring an overwrite of the model to disk if it was previously persisted. def reduce_memory_size(self, remove_fit=True, remove_info=False, requires_save=True, **kwargs): """ Removes non-essential objects from the model to reduce memory and disk footprint. @@ -1570,27 +1684,29 @@ def _get_default_stopping_metric(self): stopping_metric = metrics.get_metric(stopping_metric, self.problem_type, 'stopping_metric') return stopping_metric + # TODO: v1.0 Move params_aux to params, separate logic as in _get_ag_params, keep `ag.` prefix for ag_args_fit params + # This will allow to hyperparameter tune ag_args_fit hyperparameters. + # Also delete `self.params_aux` entirely, make it a method instead. def _get_params(self) -> dict: """Gets all params.""" return self.params.copy() def _get_ag_params(self) -> dict: - """Gets params that are not passed to the inner model, but are used by the wrapper.""" + """ + Gets params that are not passed to the inner model, but are used by the wrapper. + These params should exist in `self.params_aux`. + """ ag_param_names = self._ag_params() if ag_param_names: - return {key: val for key, val in self.params.items() if key in ag_param_names} + return {key: val for key, val in self.params_aux.items() if key in ag_param_names} else: return dict() def _get_model_params(self) -> dict: """Gets params that are passed to the inner model.""" - ag_param_names = self._ag_params() - if ag_param_names: - return {key: val for key, val in self.params.items() if key not in ag_param_names} - else: - return self._get_params() + return self._get_params() - # TODO: Add documentation for valid args for each model. Currently only `ag.early_stop` + # TODO: Add documentation for valid args for each model. Currently only `early_stop` def _ag_params(self) -> set: """ Set of params that are not passed to self.model, but are used by the wrapper. @@ -1602,7 +1718,7 @@ def _ag_params(self) -> set: Possible params: - ag.early_stop : int, str, or tuple + early_stop : int, str, or tuple generic name for early stopping logic. Typically can be an int or a str preset/strategy. Also possible to pass tuple of (class, kwargs) to construct a custom early stopping object. Refer to `autogluon.core.utils.early_stopping` for examples. diff --git a/tabular/src/autogluon/tabular/models/catboost/catboost_model.py b/tabular/src/autogluon/tabular/models/catboost/catboost_model.py --- a/tabular/src/autogluon/tabular/models/catboost/catboost_model.py +++ b/tabular/src/autogluon/tabular/models/catboost/catboost_model.py @@ -9,9 +9,8 @@ from autogluon.core.constants import PROBLEM_TYPES_CLASSIFICATION, MULTICLASS, SOFTCLASS from autogluon.core.models import AbstractModel from autogluon.core.models._utils import get_early_stopping_rounds -from autogluon.core.utils.exceptions import NotEnoughMemoryError, TimeLimitExceeded +from autogluon.core.utils.exceptions import TimeLimitExceeded from autogluon.core.utils import try_import_catboost -from autogluon.common.utils.lite import disable_if_lite_mode from .callbacks import EarlyStoppingCallback, MemoryCheckCallback, TimeCheckCallback from .catboost_utils import get_catboost_metric_from_ag_metric @@ -106,7 +105,7 @@ def _fit(self, X_val = self.preprocess(X_val) X_val = Pool(data=X_val, label=y_val, cat_features=cat_features, weight=sample_weight_val) eval_set = X_val - early_stopping_rounds = ag_params.get('ag.early_stop', 'adaptive') + early_stopping_rounds = ag_params.get('early_stop', 'adaptive') if isinstance(early_stopping_rounds, (str, tuple, list)): early_stopping_rounds = self._get_early_stopping_rounds(num_rows_train=num_rows_train, strategy=early_stopping_rounds) @@ -284,21 +283,14 @@ def _get_early_stopping_rounds(self, num_rows_train, strategy='auto'): return get_early_stopping_rounds(num_rows_train=num_rows_train, strategy=strategy) def _ag_params(self) -> set: - return {'ag.early_stop'} - - @disable_if_lite_mode() - def _validate_fit_memory_usage(self, **kwargs): - max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] - approx_mem_size_req = self.estimate_memory_usage(**kwargs) - if approx_mem_size_req > 1e9: # > 1 GB - available_mem = ResourceManager.get_available_virtual_mem() - ratio = approx_mem_size_req / available_mem - if ratio > (1 * max_memory_usage_ratio): - logger.warning('\tWarning: Not enough memory to safely train CatBoost model, roughly requires: %s GB, but only %s GB is available...' % (round(approx_mem_size_req / 1e9, 3), round(available_mem / 1e9, 3))) - raise NotEnoughMemoryError - elif ratio > (0.75 * max_memory_usage_ratio): - logger.warning('\tWarning: Potentially not enough memory to safely train CatBoost model, roughly requires: %s GB, but only %s GB is available...' % (round(approx_mem_size_req / 1e9, 3), round(available_mem / 1e9, 3))) - + return {'early_stop'} + + def _validate_fit_memory_usage(self, mem_error_threshold: float = 1, mem_warning_threshold: float = 0.75, mem_size_threshold: int = 1e9, **kwargs): + return super()._validate_fit_memory_usage(mem_error_threshold=mem_error_threshold, + mem_warning_threshold=mem_warning_threshold, + mem_size_threshold=mem_size_threshold, + **kwargs) + def get_minimum_resources(self, is_gpu_available=False): minimum_resources = { 'num_cpus': 1, diff --git a/tabular/src/autogluon/tabular/models/knn/knn_model.py b/tabular/src/autogluon/tabular/models/knn/knn_model.py --- a/tabular/src/autogluon/tabular/models/knn/knn_model.py +++ b/tabular/src/autogluon/tabular/models/knn/knn_model.py @@ -106,18 +106,11 @@ def _estimate_memory_usage(self, X, **kwargs): expected_final_model_size_bytes = model_size_bytes * 3.6 # Roughly what can be expected of the final KNN model in memory size return expected_final_model_size_bytes - def _validate_fit_memory_usage(self, **kwargs): - max_memory_safety_proportion = 0.2 - max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] - expected_final_model_size_bytes = self.estimate_memory_usage(**kwargs) - if expected_final_model_size_bytes > 10000000: # Only worth checking if expected model size is >10MB - available_mem = ResourceManager.get_available_virtual_mem() - model_memory_ratio = expected_final_model_size_bytes / available_mem - if model_memory_ratio > (0.15 * max_memory_usage_ratio): - logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory... ' - f'({max_memory_safety_proportion*100}% is the max safe size.)') - if model_memory_ratio > (max_memory_safety_proportion * max_memory_usage_ratio): - raise NotEnoughMemoryError # don't train full model to avoid OOM error + def _validate_fit_memory_usage(self, mem_error_threshold: float = 0.2, mem_warning_threshold: float = 0.15, mem_size_threshold: int = 1e7, **kwargs): + return super()._validate_fit_memory_usage(mem_error_threshold=mem_error_threshold, + mem_warning_threshold=mem_warning_threshold, + mem_size_threshold=mem_size_threshold, + **kwargs) # TODO: Won't work for RAPIDS without modification # TODO: Technically isn't OOF, but can be used inplace of OOF. Perhaps rename to something more accurate? diff --git a/tabular/src/autogluon/tabular/models/lgb/lgb_model.py b/tabular/src/autogluon/tabular/models/lgb/lgb_model.py --- a/tabular/src/autogluon/tabular/models/lgb/lgb_model.py +++ b/tabular/src/autogluon/tabular/models/lgb/lgb_model.py @@ -128,7 +128,7 @@ def _fit(self, if dataset_val is not None: from .callbacks import early_stopping_custom # TODO: Better solution: Track trend to early stop when score is far worse than best score, or score is trending worse over time - early_stopping_rounds = ag_params.get('ag.early_stop', 'adaptive') + early_stopping_rounds = ag_params.get('early_stop', 'adaptive') if isinstance(early_stopping_rounds, (str, tuple, list)): early_stopping_rounds = self._get_early_stopping_rounds(num_rows_train=num_rows_train, strategy=early_stopping_rounds) if early_stopping_rounds is None: @@ -358,7 +358,7 @@ def _features(self): return self._features_internal_list def _ag_params(self) -> set: - return {'ag.early_stop'} + return {'early_stop'} def _more_tags(self): # `can_refit_full=True` because num_boost_round is communicated at end of `_fit` diff --git a/tabular/src/autogluon/tabular/models/rf/rf_model.py b/tabular/src/autogluon/tabular/models/rf/rf_model.py --- a/tabular/src/autogluon/tabular/models/rf/rf_model.py +++ b/tabular/src/autogluon/tabular/models/rf/rf_model.py @@ -131,13 +131,11 @@ def _estimate_memory_usage(self, X, **kwargs): expected_min_memory_usage = bytes_per_estimator * n_estimators_minimum return expected_min_memory_usage - def _validate_fit_memory_usage(self, **kwargs): - max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] - available_mem = ResourceManager.get_available_virtual_mem() - expected_min_memory_usage = self.estimate_memory_usage(**kwargs) / available_mem - if expected_min_memory_usage > (0.5 * max_memory_usage_ratio): # if minimum estimated size is greater than 50% memory - logger.warning(f'\tWarning: Model is expected to require {round(expected_min_memory_usage * 100, 2)}% of available memory (Estimated before training)...') - raise NotEnoughMemoryError + def _validate_fit_memory_usage(self, mem_error_threshold: float = 0.5, mem_warning_threshold: float = 0.4, mem_size_threshold: int = 1e7, **kwargs): + return super()._validate_fit_memory_usage(mem_error_threshold=mem_error_threshold, + mem_warning_threshold=mem_warning_threshold, + mem_size_threshold=mem_size_threshold, + **kwargs) def _expected_mem_usage(self, n_estimators_final, bytes_per_estimator): available_mem = ResourceManager.get_available_virtual_mem() diff --git a/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py b/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py --- a/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py +++ b/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py @@ -9,7 +9,6 @@ from autogluon.core.models import AbstractModel from autogluon.core.models._utils import get_early_stopping_rounds from autogluon.core.utils import try_import_xgboost -from autogluon.core.utils.exceptions import NotEnoughMemoryError from . import xgboost_utils from .hyperparameters.parameters import get_param_baseline @@ -110,7 +109,7 @@ def _fit(self, else: X_val = self.preprocess(X_val, is_train=False) eval_set.append((X_val, y_val)) - early_stopping_rounds = ag_params.get('ag.early_stop', 'adaptive') + early_stopping_rounds = ag_params.get('early_stop', 'adaptive') if isinstance(early_stopping_rounds, (str, tuple, list)): early_stopping_rounds = self._get_early_stopping_rounds(num_rows_train=num_rows_train, strategy=early_stopping_rounds) @@ -176,7 +175,7 @@ def _get_num_classes(self, y): return num_classes def _ag_params(self) -> set: - return {'ag.early_stop'} + return {'early_stop'} def _estimate_memory_usage(self, X, **kwargs): num_classes = self.num_classes if self.num_classes else 1 # self.num_classes could be None after initialization if it's a regression problem @@ -184,18 +183,12 @@ def _estimate_memory_usage(self, X, **kwargs): approx_mem_size_req = data_mem_usage * 7 + data_mem_usage / 4 * num_classes # TODO: Extremely crude approximation, can be vastly improved return approx_mem_size_req - def _validate_fit_memory_usage(self, **kwargs): - max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] - approx_mem_size_req = self.estimate_memory_usage(**kwargs) - if approx_mem_size_req > 1e9: # > 1 GB - available_mem = ResourceManager.get_available_virtual_mem() - ratio = approx_mem_size_req / available_mem - if ratio > (1 * max_memory_usage_ratio): - logger.warning('\tWarning: Not enough memory to safely train XGBoost model, roughly requires: %s GB, but only %s GB is available...' % (round(approx_mem_size_req / 1e9, 3), round(available_mem / 1e9, 3))) - raise NotEnoughMemoryError - elif ratio > (0.75 * max_memory_usage_ratio): - logger.warning('\tWarning: Potentially not enough memory to safely train XGBoost model, roughly requires: %s GB, but only %s GB is available...' % (round(approx_mem_size_req / 1e9, 3), round(available_mem / 1e9, 3))) - + def _validate_fit_memory_usage(self, mem_error_threshold: float = 1.0, mem_warning_threshold: float = 0.75, mem_size_threshold: int = 1e9, **kwargs): + return super()._validate_fit_memory_usage(mem_error_threshold=mem_error_threshold, + mem_warning_threshold=mem_warning_threshold, + mem_size_threshold=mem_size_threshold, + **kwargs) + def get_minimum_resources(self, is_gpu_available=False): minimum_resources = { 'num_cpus': 1,
diff --git a/core/tests/unittests/models/abstract_model/test_init_user_params.py b/core/tests/unittests/models/abstract_model/test_init_user_params.py new file mode 100644 --- /dev/null +++ b/core/tests/unittests/models/abstract_model/test_init_user_params.py @@ -0,0 +1,153 @@ + +import copy +from typing import Any, Dict, Optional + +from autogluon.core.models import AbstractModel + + +def _assert_init_user_params(params_og: Optional[Dict[str, Any]], expected_params: Dict[str, Any], expected_params_aux: Dict[str, Any], **kwargs): + """ + Assert that `AbstractModel._init_user_params` works as intended + and that `AbstractModel` calls `AbstractModel._init_user_params` in the expected way during init. + """ + expected_params_og = copy.deepcopy(params_og) if params_og is not None else params_og + params, params_aux = AbstractModel._init_user_params(params=params_og, **kwargs) + assert params_og == expected_params_og # Ensure no outer context update + assert params == expected_params + assert params_aux == expected_params_aux + + if kwargs is None or len(kwargs.keys()) == 0: + abstract_model = AbstractModel(name='', path='', hyperparameters=params_og) + assert params_og == expected_params_og + assert abstract_model._user_params == expected_params + assert abstract_model._user_params_aux == expected_params_aux + + +def test_init_user_params_none(): + params_og = None + expected_params = {} + expected_params_aux = {} + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_simple(): + params_og = { + 'foo': 1, + 'bar': 2, + } + expected_params = { + 'foo': 1, + 'bar': 2, + } + expected_params_aux = {} + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_ag_args_fit_none(): + params_og = { + 'foo': 1, + 'bar': 2, + 'ag_args_fit': None, + } + expected_params = { + 'foo': 1, + 'bar': 2, + } + expected_params_aux = {} + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_with_prefix(): + params_og = { + 'foo': 1, + 'bar': 2, + 'ag.foo': 3, + } + expected_params = { + 'foo': 1, + 'bar': 2, + } + expected_params_aux = { + 'foo': 3 + } + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_with_ag_args_fit(): + params_og = { + 'foo': 1, + 'bar': 2, + 'ag_args_fit': {'foo': 3}, + } + expected_params = { + 'foo': 1, + 'bar': 2, + } + expected_params_aux = { + 'foo': 3 + } + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_with_ag_args_fit_and_prefix(): + params_og = { + 'foo': 1, + 'bar': 2, + 'ag_args_fit': {'foo': 3, 'ag.foo': 4, 'ag.bar': 5, 'ag.ag.bar': 7}, + } + expected_params = { + 'foo': 1, + 'bar': 2, + } + expected_params_aux = { + 'foo': 4, + 'bar': 5, + 'ag.bar': 7, + } + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_with_all(): + params_og = { + 'foo': 1, + 'bar': 2, + 'ag.foo': 12, + 'ag_args_fit': {'foo': 3, 'ag.foo': 4, 'ag.bar': 5, 'ag.ag.bar': 7}, + } + expected_params = { + 'foo': 1, + 'bar': 2, + } + expected_params_aux = { + 'foo': 12, + 'bar': 5, + 'ag.bar': 7, + } + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux) + + +def test_init_user_params_with_all_and_custom(): + params_og = { + 'foo': 1, + 'bar': 2, + 'custom.': 'hello', + 'ag.foo': 12, + 'ag_args_fit': {'foo': 3, 'ag.foo': 4, 'ag.bar': 5, 'ag.ag.bar': 7}, + 'hello': {'custom.5': 22, 'ag.custom.5': 33} + } + kwargs = { + "ag_args_fit": 'hello', + "ag_arg_prefix": 'custom.' + } + expected_params = { + 'foo': 1, + 'bar': 2, + 'ag.foo': 12, + 'ag_args_fit': {'foo': 3, 'ag.foo': 4, 'ag.bar': 5, 'ag.ag.bar': 7}, + } + expected_params_aux = { + '': 'hello', + '5': 22, + 'ag.custom.5': 33, + } + _assert_init_user_params(params_og=params_og, expected_params=expected_params, expected_params_aux=expected_params_aux, **kwargs)
Tabular: Clarify memory requirements when memory is insufficient to fit In the below example, it is unclear what the user should do: ``` Fitting model: LightGBM ... Warning: Not enough memory to safely train model, roughly requires: 9.425 GB, but only 9.642 GB is available... Not enough memory to train LightGBM... Skipping this model. ``` For example, 1. The user may want to try to train the model anyways, even if it could result in OOM. The way to do so is not communicated. 2. The user may want to know how much memory they would need available to avoid skipping the model. This is not communicated. 3. The user may want to know why the model requires X GB of memory. This is not communicated. Note: The code that logs this information is here https://github.com/autogluon/autogluon/blob/0.7.0/core/src/autogluon/core/models/abstract/abstract_model.py#L1445-L1454
2023-03-12T21:45:36Z
[]
[]
autogluon/autogluon
3,190
autogluon__autogluon-3190
[ "2687" ]
6d2e3c4b8c64c51e826f6f7b76cd05a0188a846d
diff --git a/common/src/autogluon/common/utils/try_import.py b/common/src/autogluon/common/utils/try_import.py --- a/common/src/autogluon/common/utils/try_import.py +++ b/common/src/autogluon/common/utils/try_import.py @@ -1,5 +1,6 @@ import logging import platform +import sys from types import ModuleType from ..version import __version__ @@ -30,7 +31,7 @@ def try_import_mxboard(): def try_import_ray() -> ModuleType: - RAY_MAX_VERSION = "2.7.0" + RAY_MAX_VERSION = "2.7.0" # sync with core/setup.py ray_max_version_os_map = dict( Darwin=RAY_MAX_VERSION, Windows=RAY_MAX_VERSION, @@ -62,7 +63,12 @@ def try_import_catboost(): try: import catboost except ImportError as e: - raise ImportError("`import catboost` failed. " f"A quick tip is to install via `pip install autogluon.tabular[catboost]=={__version__}`.") + error_msg = "`import catboost` failed. " + if sys.version_info >= (3, 11) and sys.platform == "darwin": + error_msg += f"Detected your env as {sys.platform}. Please either downgrade your python version to below 3.11 or move to another platform. Then install via ``pip install autogluon.tabular[catboost]=={__version__}``" + else: + error_msg += f"A quick tip is to install via `pip install autogluon.tabular[catboost]=={__version__}`." + raise ImportError() except ValueError as e: raise ImportError( "Import catboost failed. Numpy version may be outdated, " @@ -122,7 +128,9 @@ def try_import_torch(): import torch except ImportError as e: raise ImportError( - "Unable to import dependency torch\n" "A quick tip is to install via `pip install torch`.\n" "The minimum torch version is currently 1.6." + "Unable to import dependency torch\n" + "A quick tip is to install via `pip install torch`.\n" + "The minimum torch version is currently 2.0." # sync with core/_setup_utils.py ) diff --git a/core/setup.py b/core/setup.py --- a/core/setup.py +++ b/core/setup.py @@ -48,10 +48,9 @@ extras_require = { "ray": [ "ray[default]>=2.6.3,<2.7", - "pydantic>=1.10.4,<2.0", # https://github.com/ray-project/ray/issues/36990 ], "raytune": [ - "ray[tune]>=2.6.3,<2.7", + "ray[default,tune]>=2.6.3,<2.7", # TODO: consider alternatives as hyperopt is not actively maintained. "hyperopt>=0.2.7,<0.2.8", # This is needed for the bayes search to work. # 'GPy>=1.10.0,<1.11.0' # TODO: Enable this once PBT/PB2 are supported by ray lightning diff --git a/core/src/autogluon/core/_setup_utils.py b/core/src/autogluon/core/_setup_utils.py --- a/core/src/autogluon/core/_setup_utils.py +++ b/core/src/autogluon/core/_setup_utils.py @@ -11,7 +11,7 @@ AUTOGLUON_ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..")) -PYTHON_REQUIRES = ">=3.8, <3.11" +PYTHON_REQUIRES = ">=3.8, <3.12" # Only put packages here that would otherwise appear multiple times across different module's setup.py files. @@ -26,7 +26,7 @@ "networkx": ">=3.0,<4", # Major version cap "tqdm": ">=4.38,<5", # Major version cap "Pillow": ">=9.3,<9.6", # "<{N+2}" upper cap - "torch": ">=2.0,<2.1", # "<{N+1}" upper cap + "torch": ">=2.0,<2.1", # "<{N+1}" upper cap, sync with common/src/autogluon/common/utils/try_import.py "lightning": ">=2.0.0,<2.1", # "<{N+1}" upper cap } if LITE_MODE: @@ -137,6 +137,7 @@ def default_setup_args(*, version, submodule): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Topic :: Software Development", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Information Analysis", diff --git a/examples/automm/object_detection/detection_eval.py b/examples/automm/object_detection/detection_eval.py --- a/examples/automm/object_detection/detection_eval.py +++ b/examples/automm/object_detection/detection_eval.py @@ -10,9 +10,6 @@ python detection_eval.py \ --test_path ./VOCdevkit/VOC2007/Annotations/test_cocoformat.json \ --checkpoint_name faster_rcnn_r50_fpn_1x_voc0712 - -Note that for now it's required to install nightly build torchmetrics. -This will be solved in next pr. (MeanAveragePrecision will be moved to AG temporarily.) """ import argparse diff --git a/examples/automm/object_detection/detection_train.py b/examples/automm/object_detection/detection_train.py --- a/examples/automm/object_detection/detection_train.py +++ b/examples/automm/object_detection/detection_train.py @@ -23,9 +23,6 @@ --lr <learning_rate> \ --wd <weight_decay> \ --epochs <epochs> - -Note that for now it's required to install nightly build torchmetrics. -This will be solved in next pr. (MeanAveragePrecision will be moved to AG temporarily.) """ import argparse diff --git a/multimodal/setup.py b/multimodal/setup.py --- a/multimodal/setup.py +++ b/multimodal/setup.py @@ -37,9 +37,9 @@ "transformers[sentencepiece]>=4.31.0,<4.32.0", "timm>=0.9.5,<0.10.0", "torchvision>=0.14.0,<0.16.0", # torch 1.13 requires torchvision 0.14. Increase it to 0.15 when dropping the support of torch 1.13. - "scikit-image>=0.19.1,<0.20.0", + "scikit-image>=0.19.1,<0.21.0", "text-unidecode>=1.3,<1.4", - "torchmetrics>=1.0.0,<1.1.0", + "torchmetrics>=1.0.0,<1.2.0", "nptyping>=1.4.4,<2.5.0", "omegaconf>=2.1.1,<2.3.0", f"autogluon.core[raytune]=={version}", @@ -64,7 +64,7 @@ "onnx>=1.13.0,<1.14.0", "onnxruntime>=1.15.0,<1.16.0;platform_system=='Darwin'", "onnxruntime-gpu>=1.15.0,<1.16.0;platform_system!='Darwin'", - "tensorrt>=8.5.3.1,<8.5.4;platform_system=='Linux'", + "tensorrt>=8.5.3.1,<8.5.4;platform_system=='Linux' and python_version<'3.11'", # tensorrt > 8.5.4 cause segfault ] extras_require = { diff --git a/tabular/setup.py b/tabular/setup.py --- a/tabular/setup.py +++ b/tabular/setup.py @@ -38,8 +38,9 @@ "catboost": [ # CatBoost wheel build is not working correctly on darwin for CatBoost 1.2, so use old version in this case. # https://github.com/autogluon/autogluon/pull/3190#issuecomment-1540599280 - 'catboost>=1.1,<1.2 ; sys_platform == "darwin"', - "catboost>=1.1,<1.3", + # Catboost 1.2 doesn't have wheel for python 3.11 + "catboost>=1.1,<1.2 ; sys_platform == 'darwin' and python_version < '3.11'", + "catboost>=1.1,<1.3; sys_platform != 'darwin'", ], # FIXME: Debug why xgboost 1.6 has 4x+ slower inference on multiclass datasets compared to 1.4 # It is possibly only present on MacOS, haven't tested linux. @@ -66,7 +67,8 @@ ], "vowpalwabbit": [ # FIXME: 9.5+ causes VW to save an empty model which always predicts 0. Confirmed on MacOS (Intel CPU). Unknown how to fix. - "vowpalwabbit>=9,<9.9", + # No vowpalwabbit wheel for python 3.11 or above yet + "vowpalwabbit>=9,<9.9; python_version < '3.11'", ], "skl2onnx": [ "skl2onnx>=1.15.0,<1.16.0",
diff --git a/multimodal/tests/unittests/others/test_deployment_onnx.py b/multimodal/tests/unittests/others/test_deployment_onnx.py --- a/multimodal/tests/unittests/others/test_deployment_onnx.py +++ b/multimodal/tests/unittests/others/test_deployment_onnx.py @@ -6,6 +6,7 @@ import pytest import torch from datasets import load_dataset +from packaging import version from scipy.stats import pearsonr, spearmanr from sklearn.metrics.pairwise import paired_cosine_distances from torch import FloatTensor @@ -23,6 +24,11 @@ "ae": AEDataset(), } +try: + import tensorrt +except ImportError: + tensorrt = None + def evaluate(predictor, df, onnx_session=None): labels = df["score"].to_numpy() @@ -192,6 +198,10 @@ def test_onnx_export_timm_image(checkpoint_name, num_gpus): ), ], ) [email protected]( + tensorrt is None or version.parse(tensorrt.__version__) >= version.parse("8.5.4"), + reason="tensorrt above 8.5.4 cause segfault, but is required to support py311", +) def test_onnx_optimize_for_inference(dataset_name, model_names, text_backbone, image_backbone): dataset = ALL_DATASETS[dataset_name] hyperparameters = { diff --git a/tabular/tests/unittests/models/test_catboost.py b/tabular/tests/unittests/models/test_catboost.py --- a/tabular/tests/unittests/models/test_catboost.py +++ b/tabular/tests/unittests/models/test_catboost.py @@ -1,6 +1,11 @@ +import sys + +import pytest + from autogluon.tabular.models.catboost.catboost_model import CatBoostModel [email protected](sys.version_info >= (3, 11) and sys.platform == "darwin", reason="catboost has no wheel for py311 darwin") def test_catboost_binary(fit_helper): fit_args = dict( hyperparameters={CatBoostModel: {}}, @@ -9,6 +14,7 @@ def test_catboost_binary(fit_helper): fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args) [email protected](sys.version_info >= (3, 11) and sys.platform == "darwin", reason="catboost has no wheel for py311 darwin") def test_catboost_multiclass(fit_helper): fit_args = dict( hyperparameters={CatBoostModel: {}}, @@ -17,6 +23,7 @@ def test_catboost_multiclass(fit_helper): fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args) [email protected](sys.version_info >= (3, 11) and sys.platform == "darwin", reason="catboost has no wheel for py311 darwin") def test_catboost_regression(fit_helper): fit_args = dict( hyperparameters={CatBoostModel: {}}, @@ -26,6 +33,7 @@ def test_catboost_regression(fit_helper): fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args) [email protected](sys.version_info >= (3, 11) and sys.platform == "darwin", reason="catboost has no wheel for py311 darwin") def test_catboost_quantile(fit_helper): fit_args = dict( hyperparameters={"CAT": {}}, diff --git a/tabular/tests/unittests/models/test_vowpalwabbit.py b/tabular/tests/unittests/models/test_vowpalwabbit.py --- a/tabular/tests/unittests/models/test_vowpalwabbit.py +++ b/tabular/tests/unittests/models/test_vowpalwabbit.py @@ -1,6 +1,11 @@ +import sys + +import pytest + from autogluon.tabular.models.vowpalwabbit.vowpalwabbit_model import VowpalWabbitModel [email protected](sys.version_info >= (3, 11), reason="vowpalwabbit doesn't support python 3.11 and above yet") def test_vowpalwabbit_binary(fit_helper): fit_args = dict( hyperparameters={VowpalWabbitModel: {}}, @@ -9,6 +14,7 @@ def test_vowpalwabbit_binary(fit_helper): fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args) [email protected](sys.version_info >= (3, 11), reason="vowpalwabbit doesn't support python 3.11 and above yet") def test_vowpalwabbit_multiclass(fit_helper): fit_args = dict( hyperparameters={VowpalWabbitModel: {}}, @@ -17,6 +23,7 @@ def test_vowpalwabbit_multiclass(fit_helper): fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args) [email protected](sys.version_info >= (3, 11), reason="vowpalwabbit doesn't support python 3.11 and above yet") def test_vowpalwabbit_regression(fit_helper): fit_args = dict( hyperparameters={VowpalWabbitModel: {}}, diff --git a/tabular/tests/unittests/test_tabular.py b/tabular/tests/unittests/test_tabular.py --- a/tabular/tests/unittests/test_tabular.py +++ b/tabular/tests/unittests/test_tabular.py @@ -21,6 +21,7 @@ """ import os import shutil +import sys import tempfile import warnings from random import seed @@ -34,6 +35,7 @@ from autogluon.core.constants import BINARY, MULTICLASS, PROBLEM_TYPES_CLASSIFICATION, QUANTILE, REGRESSION from autogluon.core.utils import download, unzip from autogluon.tabular import TabularDataset, TabularPredictor +from autogluon.tabular.configs.hyperparameter_configs import get_hyperparameter_config PARALLEL_LOCAL_BAGGING = "parallel_local" SEQUENTIAL_LOCAL_BAGGING = "sequential_local" @@ -47,7 +49,7 @@ def test_tabular(): subsample_size = None hyperparameter_tune_kwargs = None verbosity = 2 # how much output to print - hyperparameters = None + hyperparameters = get_hyperparameter_config("default") time_limit = None fast_benchmark = True # False # If True, run a faster benchmark (subsample training sets, less epochs, etc), @@ -59,6 +61,10 @@ def test_tabular(): subsample_size = 100 time_limit = 60 + # Catboost > 1.2 is required for python 3.11 but cannot be correctly installed on macos + if sys.version_info >= (3, 11) and sys.platform == "darwin": + hyperparameters.pop("CAT") + fit_args = {"verbosity": verbosity} if hyperparameter_tune_kwargs is not None: fit_args["hyperparameter_tune_kwargs"] = hyperparameter_tune_kwargs
Support Python 3.11 Add support for Python 3.11 in AutoGluon. Tentatively planned for AutoGluon v0.8 release in ~June 2023. Unknown if there will be blockers at this time. - [x] Pre-req: Add Python 3.10 support (https://github.com/autogluon/autogluon/issues/2686) - [x] https://github.com/autogluon/autogluon/pull/2739 - [x] https://github.com/catboost/catboost/issues/2213 - [x] Note: Latest CatBoost seems to break on MacOS when Python != 3.11, need to investigate. - [x] ray 3.11 support - [x] torchtext 3.11 support - [x] torchvision 3.11 support
Must be updated `torch>=1.13.1` Not supported: `ray` `catboost` `torchtext` `torchvision` CatBoost plans to support Python 3.11 soon, likely in the next few months: https://github.com/catboost/catboost/issues/2213 torch is done: https://github.com/autogluon/autogluon/pull/2739 are there any specific reasons, apart from its dependencies, to enforce the upper python version in the autogluon packages? if one were to try to install autogluon on cp311, and autogluon did not have this upper cap, pip would simply fail on one of the dependencies not supporting cp311. should be fine right? > torch is done: #2739 Thanks for notifying! > are there any specific reasons, apart from its dependencies, to enforce the upper python version in the autogluon packages? > > if one were to try to install autogluon on cp311, and autogluon did not have this upper cap, pip would simply fail on one of the dependencies not supporting cp311. should be fine right? It is better to avoid confusion from users if we don't actually support 3.11. To have it crash on one of the inner dependencies install is equivalent to AutoGluon succeeding install and then crashing on import due to incompatible dependencies. In both cases it is misleading the user into thinking it is somehow their fault for the error. I think it is good we maintain an upper python version cap, and expand the cap once we support a new version. `ray~=2.3` [ships](https://pypi.org/project/ray/2.3.0/#files) cp311 `torchtext~=0.15` [ships](https://download.pytorch.org/whl/torchtext/) cp311 `torchvision~=0.15` [ships](https://download.pytorch.org/whl/torchvision/) cp311 the latter two require `torch==2.0.0`: ``` Metadata-Version: 2.1 Name: torchvision Version: 0.15.1 Summary: image and video datasets and models for torch deep learning Home-page: https://github.com/pytorch/vision Author: PyTorch Core Team Author-email: [email protected] License: BSD Requires-Python: >=3.8 License-File: LICENSE Requires-Dist: numpy Requires-Dist: requests Requires-Dist: torch (==2.0.0) Requires-Dist: pillow (!=8.3.*,>=5.3.0) Provides-Extra: scipy Requires-Dist: scipy ; extra == 'scipy' ``` this also implies a CUDA version bump to `cu117` or `cu118`. there's also CUDA 12 support in the pipeline: https://github.com/pytorch/pytorch/issues/91122 cc @sxjscience CatBoost 1.2 released today, adding Python 3.11 support: https://github.com/catboost/catboost/releases/tag/v1.2 @ddelange `torch==2.0.0` would be a milestone on it's own there's quite a chance that pt 2.0 will 'just work'. there's now `zero_grad(set_to_none=True)` (default used to be False), for the rest the changes [look](https://github.com/pytorch/pytorch/releases/tag/v2.0.0) all good to me torchmetrics pt 2.0 support is merged but unreleased: https://github.com/Lightning-AI/torchmetrics/commit/95599c9a7bd5e6c8f0a8039562dbbc832b434031 torch lightning supports pt 2.0 as of v2.0.0: https://github.com/Lightning-AI/lightning/commit/ef2a6088ff5bc515be27e24f5f6c8b16b96d3bb8
2023-05-02T13:38:18Z
[]
[]
autogluon/autogluon
3,236
autogluon__autogluon-3236
[ "3036" ]
41e80518ae1cc366a155e2b40dd5abb0129412c6
diff --git a/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py b/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py --- a/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py +++ b/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py @@ -2,6 +2,7 @@ import copy import itertools +import logging from collections.abc import Iterable from typing import Any, List, Optional, Tuple, Type @@ -12,6 +13,8 @@ from autogluon.common.loaders import load_pd +logger = logging.getLogger(__name__) + ITEMID = "item_id" TIMESTAMP = "timestamp" @@ -323,11 +326,17 @@ def _construct_pandas_frame_from_data_frame( df = df.copy() if id_column is not None: - assert id_column in df.columns, f"Column {id_column} not found!" + assert id_column in df.columns, f"Column '{id_column}' not found!" + if id_column != ITEMID and ITEMID in df.columns: + logger.warning(f"Renaming existing column '{ITEMID}' -> '__{ITEMID}' to avoid name collisions.") + df.rename(columns={ITEMID: "__" + ITEMID}, inplace=True) df.rename(columns={id_column: ITEMID}, inplace=True) if timestamp_column is not None: - assert timestamp_column in df.columns, f"Column {timestamp_column} not found!" + assert timestamp_column in df.columns, f"Column '{timestamp_column}' not found!" + if timestamp_column != TIMESTAMP and TIMESTAMP in df.columns: + logger.warning(f"Renaming existing column '{TIMESTAMP}' -> '__{TIMESTAMP}' to avoid name collisions.") + df.rename(columns={TIMESTAMP: "__" + TIMESTAMP}, inplace=True) df.rename(columns={timestamp_column: TIMESTAMP}, inplace=True) if TIMESTAMP in df.columns: diff --git a/timeseries/src/autogluon/timeseries/predictor.py b/timeseries/src/autogluon/timeseries/predictor.py --- a/timeseries/src/autogluon/timeseries/predictor.py +++ b/timeseries/src/autogluon/timeseries/predictor.py @@ -204,15 +204,10 @@ def _check_and_prepare_data_frame(self, df: Union[TimeSeriesDataFrame, pd.DataFr ) if self.ignore_time_index: df = df.get_reindexed_view(freq="S") - timestamps = df.reset_index(level=TIMESTAMP)[TIMESTAMP] - is_sorted = timestamps.groupby(level=ITEMID, sort=False).apply(lambda x: x.is_monotonic_increasing).all() - if not is_sorted: - warnings.warn( - "Provided data contains timestamps that are not sorted chronologically. " - "This will lead to TimeSeriesPredictor not working as intended. " - "Please make sure that the timestamps are sorted in increasing order for all time series." - ) - # TODO: Make sure that entries for each item_id are contiguous -> https://github.com/autogluon/autogluon/issues/3036 + # MultiIndex.is_monotonic_increasing checks if index is sorted by ["item_id", "timestamp"] + if not df.index.is_monotonic_increasing: + df = df.sort_index() + df._cached_freq = None # in case frequency was incorrectly cached as IRREGULAR_TIME_INDEX_FREQSTR if df.freq is None: raise ValueError( "Frequency not provided and cannot be inferred. This is often due to the " @@ -623,8 +618,11 @@ def predict( """ if random_seed is not None: set_random_seed(random_seed) + # Don't use data.item_ids in case data is not a TimeSeriesDataFrame + original_item_id_order = data.reset_index()[ITEMID].unique() data = self._check_and_prepare_data_frame(data) - return self._learner.predict(data, known_covariates=known_covariates, model=model) + predictions = self._learner.predict(data, known_covariates=known_covariates, model=model) + return predictions.reindex(original_item_id_order, level=ITEMID) def evaluate(self, data: Union[TimeSeriesDataFrame, pd.DataFrame], **kwargs): """Evaluate the performance for given dataset, computing the score determined by ``self.eval_metric``
diff --git a/timeseries/tests/smoketests/test_features_and_covariates.py b/timeseries/tests/smoketests/test_features_and_covariates.py --- a/timeseries/tests/smoketests/test_features_and_covariates.py +++ b/timeseries/tests/smoketests/test_features_and_covariates.py @@ -105,7 +105,9 @@ def test_predictor_smoke_test( known_covariates = test_data.slice_by_timestep(-prediction_length, None)[known_covariates_names] predictions = predictor.predict(train_data, known_covariates=known_covariates) - # Handle the case when ignore_time_index=True - future_test_data = predictor._check_and_prepare_data_frame(test_data).slice_by_timestep(-prediction_length, None) + + if ignore_time_index: + test_data = test_data.get_reindexed_view(freq="S") + future_test_data = test_data.slice_by_timestep(-prediction_length, None) assert predictions.index.equals(future_test_data.index) diff --git a/timeseries/tests/unittests/test_predictor.py b/timeseries/tests/unittests/test_predictor.py --- a/timeseries/tests/unittests/test_predictor.py +++ b/timeseries/tests/unittests/test_predictor.py @@ -585,6 +585,16 @@ def test_given_data_cannot_be_interpreted_as_tsdf_then_exception_raised(temp_mod predictor.fit(df, hyperparameters={"Naive": {}}) +def test_given_data_is_not_sorted_then_predictor_can_fit_and_predict(temp_model_path): + shuffled_df = pd.DataFrame(DUMMY_TS_DATAFRAME).sample(frac=1.0) + ts_df = TimeSeriesDataFrame(shuffled_df) + + predictor = TimeSeriesPredictor(path=temp_model_path, prediction_length=2) + predictor.fit(ts_df, hyperparameters={"Naive": {}}) + predictions = predictor.predict(ts_df) + assert len(predictions) == predictor.prediction_length * ts_df.num_items + + def test_when_both_argument_aliases_are_passed_to_init_then_exception_is_raised(temp_model_path): with pytest.raises(ValueError, match="Please specify at most one of these arguments"): predictor = TimeSeriesPredictor(path=temp_model_path, target="custom_target", label="custom_target") diff --git a/timeseries/tests/unittests/test_ts_dataset.py b/timeseries/tests/unittests/test_ts_dataset.py --- a/timeseries/tests/unittests/test_ts_dataset.py +++ b/timeseries/tests/unittests/test_ts_dataset.py @@ -832,3 +832,17 @@ def test_when_dropna_called_then_missing_values_are_dropped(): df.iloc[[1, 5, 10, 14, 22]] = np.nan df_dropped = df.dropna() assert not df_dropped.isna().any().any() + + +def test_when_data_contains_item_id_column_that_is_unused_then_column_is_renamed(): + df = SAMPLE_DATAFRAME.copy() + df["custom_id"] = df[ITEMID] + ts_df = TimeSeriesDataFrame.from_data_frame(df, id_column="custom_id") + assert f"__{ITEMID}" in ts_df.columns + + +def test_when_data_contains_timestamp_column_that_is_unused_then_column_is_renamed(): + df = SAMPLE_DATAFRAME.copy() + df["custom_timestamp"] = df[TIMESTAMP] + ts_df = TimeSeriesDataFrame.from_data_frame(df, timestamp_column="custom_timestamp") + assert f"__{TIMESTAMP}" in ts_df.columns
[BUG] [timeseries] TimeSeriesEvaluator fails if rows of the input data are not grouped by item_id - [x] I have checked that this bug exists on the latest stable version of AutoGluon - [x] and/or I have checked that this bug exists on the latest mainline of AutoGluon via source installation **Describe the bug** All models fail during training if the rows of input data are not grouped by item id. **Expected behavior** The models shouldn't fail. **To Reproduce** ```python import pandas as pd from autogluon.timeseries import TimeSeriesDataFrame, TimeSeriesPredictor df = TimeSeriesDataFrame(pd.DataFrame( { "item_id": ["A", "B", "A", "B", "A", "B", "A", "B", "A", "B"], "timestamp": ["2020", "2020", "2021", "2021", "2022", "2022", "2023", "2023", "2024", "2024"], "target": list(range(10)), } )) # No models will be trained predictor = TimeSeriesPredictor(prediction_length=2).fit(df) # Works as expected predictor = TimeSeriesPredictor(prediction_length=2).fit(df.sort_index()) ``` **Installed Versions** Which version of AutoGluon are you are using? If you are using 0.4.0 and newer, please run the following code snippet: <details> ```python # Replace this code with the output of the following: from autogluon.core.utils import show_versions show_versions() ``` </details>
2023-05-24T15:06:48Z
[]
[]
autogluon/autogluon
3,240
autogluon__autogluon-3240
[ "2685" ]
1349d01fe4506e4fe450a1e3bf2433e8fa270abe
diff --git a/core/src/autogluon/core/problem_type.py b/core/src/autogluon/core/problem_type.py new file mode 100644 --- /dev/null +++ b/core/src/autogluon/core/problem_type.py @@ -0,0 +1,86 @@ +from typing import Dict, List + +from .constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE + +__all__ = ['problem_type_info'] + + +# Note to developers: This is a free-form class. If you need additional parameters, add them. +class ProblemType: + """ + Simple class that holds information on what a problem type is capable of doing. + + Parameters + ---------- + can_predict : bool + Whether models for this problem type have the ability to predict via `model.predict(...)`. + can_predict_proba : bool + Whether models for this problem type have the ability to predict probabilities via `model.predict_proba(...)`. + is_classification : bool + Whether this is considered a classification problem type. + For example: + `binary`, `multiclass`, and `softclass` are considered classification problem types. + `regression` and `quantile` are not considered classification problem types. + """ + def __init__(self, + can_predict: bool, + can_predict_proba: bool, + is_classification: bool): + self.can_predict = can_predict + self.can_predict_proba = can_predict_proba + self.is_classification = is_classification + + +class ProblemTypeInfo: + """Class that stores all problem_type information, and can vend this information via the provided methods.""" + def __init__(self, problem_type_dict: Dict[str, ProblemType]): + self.problem_type_dict = problem_type_dict + + def list_problem_types(self): + return [self.problem_type_dict.keys()] + + def can_predict(self, problem_type: str) -> bool: + return self._get_problem_type(problem_type).can_predict + + def can_predict_proba(self, problem_type: str) -> bool: + return self._get_problem_type(problem_type).can_predict_proba + + def is_classification(self, problem_type: str) -> bool: + return self._get_problem_type(problem_type).is_classification + + def _get_problem_type(self, problem_type: str) -> ProblemType: + return self.problem_type_dict[problem_type] + + def list_classification(self) -> List[str]: + return [name for name, problem_type in self.problem_type_dict.items() if problem_type.is_classification] + + +problem_type_info = ProblemTypeInfo( + problem_type_dict={ + BINARY: ProblemType( + can_predict=True, + can_predict_proba=True, + is_classification=True, + ), + MULTICLASS: ProblemType( + can_predict=True, + can_predict_proba=True, + is_classification=True, + ), + SOFTCLASS: ProblemType( + can_predict=True, + can_predict_proba=True, + is_classification=True, + ), + REGRESSION: ProblemType( + can_predict=True, + can_predict_proba=False, + is_classification=False, + ), + QUANTILE: ProblemType( + can_predict=True, + can_predict_proba=False, + is_classification=False, + ), + } +) diff --git a/tabular/src/autogluon/tabular/predictor/predictor.py b/tabular/src/autogluon/tabular/predictor/predictor.py --- a/tabular/src/autogluon/tabular/predictor/predictor.py +++ b/tabular/src/autogluon/tabular/predictor/predictor.py @@ -22,6 +22,7 @@ from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT, PSEUDO_MODEL_SUFFIX, PROBLEM_TYPES_CLASSIFICATION from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary from autogluon.core.dataset import TabularDataset +from autogluon.core.problem_type import problem_type_info from autogluon.core.pseudolabeling.pseudolabeling import filter_pseudo, filter_ensemble_pseudo from autogluon.core.scheduler.scheduler_factory import scheduler_factory from autogluon.core.trainer import AbstractTrainer @@ -1157,7 +1158,7 @@ def _run_pseudolabeling(self, unlabeled_data: pd.DataFrame, max_iter: int, Maximum allowed number of iterations, where in each iteration, the data are pseudolabeled by the current predictor and the predictor is refit including the pseudolabled data in its training set. return_pred_proba: bool, default = False - Transductive learning setting, will return predictive probabiliteis of unlabeled_data + Transductive learning setting, will return predictive probabilities of unlabeled_data use_ensemble: bool, default = False If True will use ensemble pseudo labeling algorithm if False will use best model pseudo labeling method @@ -1199,8 +1200,12 @@ def _run_pseudolabeling(self, unlabeled_data: pd.DataFrame, max_iter: int, test_pseudo_idxes_true, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test) y_pred_proba = y_pred.copy() else: - y_pred_proba = self.predict_proba(data=X_test, as_multiclass=True) - y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type) + if self.can_predict_proba: + y_pred_proba = self.predict_proba(data=X_test, as_multiclass=True) + y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type) + else: + y_pred = self.predict(data=X_test) + y_pred_proba = y_pred test_pseudo_idxes_true = filter_pseudo(y_pred_proba_og=y_pred_proba, problem_type=self.problem_type) if return_pred_prob: @@ -1245,7 +1250,10 @@ def _run_pseudolabeling(self, unlabeled_data: pd.DataFrame, max_iter: int, if fit_ensemble and not fit_ensemble_every_iter: self._fit_weighted_ensemble_pseudo() - y_pred_proba_og = self.predict_proba(unlabeled_data) + if self.can_predict_proba: + y_pred_proba_og = self.predict_proba(unlabeled_data) + else: + y_pred_proba_og = self.predict(unlabeled_data) if return_pred_prob: return self, y_pred_proba_og @@ -1338,7 +1346,7 @@ def fit_pseudolabel(self, pseudo_data: pd.DataFrame, max_iter: int = 5, return_p self.fit_weighted_ensemble() if return_pred_prob: - y_pred_proba = self.predict_proba(pseudo_data) + y_pred_proba = self.predict_proba(pseudo_data) if self.can_predict_proba else self.predict(pseudo_data) return self, y_pred_proba else: return self @@ -1378,11 +1386,10 @@ def predict(self, data, model=None, as_pandas=True, transform_features=True): data = self.__get_dataset(data) return self._learner.predict(X=data, model=model, as_pandas=as_pandas, transform_features=transform_features) - # TODO: v0.8: Error if called with self.problem_type='regression' or 'quantile' def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True, transform_features=True): """ Use trained models to produce predicted class probabilities rather than class-labels (if task is classification). - If `predictor.problem_type` is regression, this functions identically to `predict`, returning the same output. + If `predictor.problem_type` is regression or quantile, this will raise an AssertionError. Parameters ---------- @@ -1415,26 +1422,13 @@ def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True, tr For binary classification problems, the output contains for each datapoint the predicted probabilities of the negative and positive classes, unless you specify `as_multiclass=False`. """ self._assert_is_fit('predict_proba') - data = self.__get_dataset(data) if not self.can_predict_proba: - warnings.warn( - f'Calling `predictor.predict_proba` when problem_type={self.problem_type} will raise an AssertionError starting in AutoGluon v0.8. ' - 'Please call `predictor.predict` instead. You can check the value of `predictor.can_predict_proba` to tell if predict_proba is valid.', - category=FutureWarning - ) + raise AssertionError(f'`predictor.predict_proba` is not supported when problem_type="{self.problem_type}". ' + f'Please call `predictor.predict` instead. ' + f'You can check the value of `predictor.can_predict_proba` to tell if predict_proba is valid.') + data = self.__get_dataset(data) return self._learner.predict_proba(X=data, model=model, as_pandas=as_pandas, as_multiclass=as_multiclass, transform_features=transform_features) - # TODO: Ensure this is correct as new problem_types are added. - # Consider making problem_type a class object to be able to look this up easier. - @property - def _is_classification(self) -> bool: - """ - Return True if problem_type is classification, otherwise return False. - Raises an AssertionError if `self.problem_type` is None. - """ - assert self.problem_type is not None, "problem_type cannot be None when determining if the predictor is solving a classification problem" - return self.problem_type not in [REGRESSION, QUANTILE] - @property def can_predict_proba(self) -> bool: """ @@ -1442,7 +1436,7 @@ def can_predict_proba(self) -> bool: Raises an AssertionError if called before fitting. """ self._assert_is_fit('can_predict_proba') - return self._is_classification + return problem_type_info.can_predict_proba(problem_type=self.problem_type) def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict: """ @@ -1704,6 +1698,10 @@ def predict_proba_multi(self, Dictionary with model names as keys and model prediction probabilities as values. """ self._assert_is_fit('predict_proba_multi') + if not self.can_predict_proba: + raise AssertionError(f'`predictor.predict_proba_multi` is not supported when problem_type="{self.problem_type}". ' + f'Please call `predictor.predict_multi` instead. ' + f'You can check the value of `predictor.can_predict_proba` to tell if predict_proba_multi is valid.') data = self.__get_dataset(data, allow_nan=True) return self._learner.predict_proba_multi(X=data, models=models,
diff --git a/tabular/tests/conftest.py b/tabular/tests/conftest.py --- a/tabular/tests/conftest.py +++ b/tabular/tests/conftest.py @@ -168,9 +168,14 @@ def fit_and_validate_dataset(dataset_name, if sample_size is not None and sample_size < len(test_data): test_data = test_data.sample(n=sample_size, random_state=0) predictor.predict(test_data) - pred_proba = predictor.predict_proba(test_data) predictor.evaluate(test_data) - predictor.evaluate_predictions(y_true=test_data[label], y_pred=pred_proba) + + if predictor.can_predict_proba: + pred_proba = predictor.predict_proba(test_data) + predictor.evaluate_predictions(y_true=test_data[label], y_pred=pred_proba) + else: + with pytest.raises(AssertionError): + predictor.predict_proba(test_data) model_names = predictor.get_model_names() model_name = model_names[0] @@ -181,7 +186,8 @@ def fit_and_validate_dataset(dataset_name, refit_model_name = refit_model_names[model_name] assert '_FULL' in refit_model_name predictor.predict(test_data, model=refit_model_name) - predictor.predict_proba(test_data, model=refit_model_name) + if predictor.can_predict_proba: + predictor.predict_proba(test_data, model=refit_model_name) predictor.info() predictor.leaderboard(test_data, extra_info=True, extra_metrics=extra_metrics) @@ -216,7 +222,7 @@ def fit_and_validate_dataset_with_cascade(dataset_name, shutil.rmtree(predictor.path, ignore_errors=True) # Delete AutoGluon output directory to ensure runs' information has been removed. @staticmethod - def fit_dataset(train_data, init_args, fit_args, sample_size=None): + def fit_dataset(train_data, init_args, fit_args, sample_size=None) -> TabularPredictor: if sample_size is not None and sample_size < len(train_data): train_data = train_data.sample(n=sample_size, random_state=0) return TabularPredictor(**init_args).fit(train_data, **fit_args) diff --git a/tabular/tests/unittests/test_tabular.py b/tabular/tests/unittests/test_tabular.py --- a/tabular/tests/unittests/test_tabular.py +++ b/tabular/tests/unittests/test_tabular.py @@ -70,15 +70,18 @@ def test_tabular(): def _assert_predict_dict_identical_to_predict(predictor, data): """Assert that predict_proba_dict and predict_dict are identical to looping calls to predict and predict_proba""" - predict_proba_dict = predictor.predict_proba_multi(data=data) predict_dict = predictor.predict_multi(data=data) - assert set(predictor.get_model_names()) == set(predict_proba_dict.keys()) assert set(predictor.get_model_names()) == set(predict_dict.keys()) for m in predictor.get_model_names(): model_pred = predictor.predict(data, model=m) - model_pred_proba = predictor.predict_proba(data, model=m) assert model_pred.equals(predict_dict[m]) - assert model_pred_proba.equals(predict_proba_dict[m]) + + if predictor.can_predict_proba: + predict_proba_dict = predictor.predict_proba_multi(data=data) + assert set(predictor.get_model_names()) == set(predict_proba_dict.keys()) + for m in predictor.get_model_names(): + model_pred_proba = predictor.predict_proba(data, model=m) + assert model_pred_proba.equals(predict_proba_dict[m]) def test_advanced_functionality(): @@ -145,9 +148,12 @@ def test_advanced_functionality(): y_pred = predictor.predict(test_data) y_pred_from_transform = predictor.predict(test_data_transformed, transform_features=False) assert y_pred.equals(y_pred_from_transform) - y_pred_proba = predictor.predict_proba(test_data) - y_pred_proba_from_transform = predictor.predict_proba(test_data_transformed, transform_features=False) - assert y_pred_proba.equals(y_pred_proba_from_transform) + + y_pred_proba = None + if predictor.can_predict_proba: + y_pred_proba = predictor.predict_proba(test_data) + y_pred_proba_from_transform = predictor.predict_proba(test_data_transformed, transform_features=False) + assert y_pred_proba.equals(y_pred_proba_from_transform) assert predictor.get_model_names_persisted() == [] # Assert that no models were persisted during training assert predictor.unpersist_models() == [] # Assert that no models were unpersisted @@ -182,8 +188,11 @@ def test_advanced_functionality(): path_clone = predictor.clone(path=predictor.path[:-1] + '_clone' + os.path.sep) predictor_clone = TabularPredictor.load(path_clone) assert predictor.path != predictor_clone.path - y_pred_proba_clone = predictor_clone.predict_proba(test_data) - assert y_pred_proba.equals(y_pred_proba_clone) + if predictor_clone.can_predict_proba: + y_pred_proba_clone = predictor_clone.predict_proba(test_data) + assert y_pred_proba.equals(y_pred_proba_clone) + y_pred_clone = predictor_clone.predict(test_data) + assert y_pred.equals(y_pred_clone) leaderboard_clone = predictor_clone.leaderboard(data=test_data) assert len(leaderboard) == len(leaderboard_clone) @@ -196,8 +205,11 @@ def test_advanced_functionality(): assert path_clone_for_deployment == path_clone_for_deployment_og predictor_clone_for_deployment = TabularPredictor.load(path_clone_for_deployment) assert predictor.path != predictor_clone_for_deployment.path - y_pred_proba_clone_for_deployment = predictor_clone_for_deployment.predict_proba(test_data) - assert y_pred_proba.equals(y_pred_proba_clone_for_deployment) + if predictor_clone_for_deployment.can_predict_proba: + y_pred_proba_clone_for_deployment = predictor_clone_for_deployment.predict_proba(test_data) + assert y_pred_proba.equals(y_pred_proba_clone_for_deployment) + y_pred_clone_for_deployment = predictor_clone_for_deployment.predict(test_data) + assert y_pred.equals(y_pred_clone_for_deployment) leaderboard_clone_for_deployment = predictor_clone_for_deployment.leaderboard(data=test_data) assert len(leaderboard) >= len(leaderboard_clone_for_deployment) # Raise exception due to lacking fit artifacts @@ -229,8 +241,11 @@ def test_advanced_functionality(): raise AssertionError('predictor.predict should raise exception after all models are deleted') # Assert that clone is not impacted by changes to original assert len(predictor_clone.leaderboard(data=test_data)) == len(leaderboard_clone) - y_pred_proba_clone_2 = predictor_clone.predict_proba(data=test_data) - assert y_pred_proba.equals(y_pred_proba_clone_2) + if predictor_clone.can_predict_proba: + y_pred_proba_clone_2 = predictor_clone.predict_proba(data=test_data) + assert y_pred_proba.equals(y_pred_proba_clone_2) + y_pred_clone_2 = predictor_clone.predict(data=test_data) + assert y_pred.equals(y_pred_clone_2) print('Tabular Advanced Functionality Test Succeeded.')
[v0.8] Tabular: Throw Exception when calling predict_proba in regression We should throw an exception when predict_proba is called in regression instead of returning the same output as predict. PR to add FutureWarning: #2684 PR to raise exception (v0.8): #3240
2023-05-25T20:57:06Z
[]
[]
autogluon/autogluon
3,286
autogluon__autogluon-3286
[ "2429" ]
e9dcdf0c9cd7fe784949a7816b89fd25a4a4341a
diff --git a/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py b/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py --- a/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py +++ b/tabular/src/autogluon/tabular/models/xgboost/xgboost_model.py @@ -25,6 +25,7 @@ class XGBoostModel(AbstractModel): """ def __init__(self, **kwargs): super().__init__(**kwargs) + self._ohe: bool = True self._ohe_generator = None self._xgb_model_type = None @@ -54,13 +55,13 @@ def get_eval_metric(self): def _preprocess(self, X, is_train=False, max_category_levels=None, **kwargs): X = super()._preprocess(X=X, **kwargs) - if self._ohe_generator is None: - self._ohe_generator = xgboost_utils.OheFeatureGenerator(max_levels=max_category_levels) - if is_train: - self._ohe_generator.fit(X) + if self._ohe: + self._ohe_generator = xgboost_utils.OheFeatureGenerator(max_levels=max_category_levels) + self._ohe_generator.fit(X) - X = self._ohe_generator.transform(X) + if self._ohe: + X = self._ohe_generator.transform(X) return X @@ -83,6 +84,13 @@ def _fit(self, if num_cpus: params['n_jobs'] = num_cpus max_category_levels = params.pop('proc.max_category_levels', 100) + enable_categorical = params.get('enable_categorical', False) + if enable_categorical: + """Skip one-hot-encoding and pass categoricals directly to XGBoost""" + self._ohe = False + else: + """One-hot-encode categorical features""" + self._ohe = True if verbosity <= 2: verbose = False
diff --git a/tabular/tests/unittests/models/test_xgboost.py b/tabular/tests/unittests/models/test_xgboost.py --- a/tabular/tests/unittests/models/test_xgboost.py +++ b/tabular/tests/unittests/models/test_xgboost.py @@ -24,3 +24,11 @@ def test_xgboost_regression(fit_helper): ) dataset_name = 'ames' fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args) + + +def test_xgboost_binary_enable_categorical(fit_helper): + fit_args = dict( + hyperparameters={XGBoostModel: {'enable_categorical': True}}, + ) + dataset_name = 'adult' + fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
Test XGBoost Categorical Support XGBoost recently added native categorical feature support. > XGBoost now [supports the "optimal partitioning" algorithm used by LightGBM](https://xgboost.readthedocs.io/en/stable/tutorials/categorical.html#optimal-partitioning) as opposed to requiring one-hot-encoding. We should test how it performs compared to the one-hot-encoding we currently use.
2023-06-06T23:10:41Z
[]
[]
autogluon/autogluon
3,298
autogluon__autogluon-3298
[ "1038" ]
95f3caa7732047f581e00f6f557b0873731c6d32
diff --git a/core/src/autogluon/core/calibrate/__init__.py b/core/src/autogluon/core/calibrate/__init__.py --- a/core/src/autogluon/core/calibrate/__init__.py +++ b/core/src/autogluon/core/calibrate/__init__.py @@ -0,0 +1 @@ +from ._decision_threshold import calibrate_decision_threshold diff --git a/core/src/autogluon/core/calibrate/_decision_threshold.py b/core/src/autogluon/core/calibrate/_decision_threshold.py new file mode 100644 --- /dev/null +++ b/core/src/autogluon/core/calibrate/_decision_threshold.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import logging +from typing import Callable, List, Union + +import numpy as np + +from ..constants import BINARY +from ..metrics import Scorer +from ..utils import get_pred_from_proba + +logger = logging.getLogger(__name__) + + +# TODO: docstring +# TODO: Can use a smarter search strategy than brute force for faster speed, such as bayes opt. +def calibrate_decision_threshold(y: np.array, + y_pred_proba: np.array, + metric: Union[Callable, Scorer], + metric_kwargs: dict | None = None, + decision_thresholds: Union[int, List[float]] = 50, + metric_name: str | None = None, + verbose: bool = True) -> float: + problem_type = BINARY + assert len(y_pred_proba.shape) == 1 + assert len(y.shape) == 1 + assert len(y) == len(y_pred_proba) + + if metric_kwargs is None: + metric_kwargs = dict() + + if isinstance(metric, Scorer): + if metric_name is None: + metric_name = metric.name + if not metric.needs_pred: + logger.warning(f'WARNING: The provided metric "{metric_name}" does not use class predictions for scoring, ' + f'and thus is invalid for decision threshold calibration. ' + f'Falling back to `decision_threshold=0.5`.') + return 0.5 + metric_name_log = f' {metric_name}' if metric_name is not None else '' + + if isinstance(decision_thresholds, int): + # Order thresholds by their proximity to 0.5 + num_checks_half = decision_thresholds + num_checks = num_checks_half * 2 + decision_thresholds = [[0.5]] + [[0.5 - (i / num_checks), 0.5 + (i / num_checks)] for i in + range(1, num_checks_half + 1)] + decision_thresholds = [item for sublist in decision_thresholds for item in sublist] + else: + for decision_threshold in decision_thresholds: + if decision_threshold > 1 or decision_threshold < 0: + raise ValueError(f'Invalid decision_threshold specified: {decision_threshold} |' + f' Decision thresholds must be between 0 and 1.') + best_score_val = None + best_threshold = None + + y_pred_val = get_pred_from_proba( + y_pred_proba=y_pred_proba, + problem_type=problem_type, + decision_threshold=0.5, + ) + # TODO: Avoid calling like this, re-use logic that works with weights + extra args + score_val_baseline = metric(y, y_pred_val, **metric_kwargs) + + if verbose: + logger.log(20, f'Calibrating decision threshold to optimize metric{metric_name_log} ' + f'| Checking {len(decision_thresholds)} thresholds...') + for decision_threshold in decision_thresholds: + extra_log = '' + y_pred_val = get_pred_from_proba( + y_pred_proba=y_pred_proba, + problem_type=problem_type, + decision_threshold=decision_threshold, + ) + # TODO: Avoid calling like this, re-use logic that works with weights + extra args + score_val = metric(y, y_pred_val, **metric_kwargs) + + if best_score_val is None or score_val > best_score_val: + best_threshold = decision_threshold + best_score_val = score_val + extra_log = '\t| NEW BEST' + elif best_score_val == score_val: + # If the new threshold is closer to 0.5 than the previous threshold, prioritize it. + if abs(decision_threshold - 0.5) < abs(best_threshold - 0.5): + best_threshold = decision_threshold + best_score_val = score_val + extra_log = '\t| NEW BEST (Tie, using threshold that is closer to 0.5)' + + if verbose: + logger.log(15, f'\tthreshold: {decision_threshold:.3f}\t| val: {score_val:.4f}{extra_log}') + if verbose: + logger.log(20, f'\tBase Threshold: {0.5:.3f}\t| val: {score_val_baseline:.4f}') + logger.log(20, f'\tBest Threshold: {best_threshold:.3f}\t| val: {best_score_val:.4f}') + return best_threshold diff --git a/core/src/autogluon/core/trainer/abstract_trainer.py b/core/src/autogluon/core/trainer/abstract_trainer.py --- a/core/src/autogluon/core/trainer/abstract_trainer.py +++ b/core/src/autogluon/core/trainer/abstract_trainer.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import logging import os @@ -20,10 +22,12 @@ from .utils import process_hyperparameters from ..augmentation.distill_utils import format_distillation_labels, augment_data +from ..calibrate import calibrate_decision_threshold from ..calibrate.conformity_score import compute_conformity_score from ..calibrate.temperature_scaling import tune_temperature_scaling from ..constants import AG_ARGS, BINARY, MULTICLASS, REGRESSION, QUANTILE, SOFTCLASS, REFIT_FULL_NAME, REFIT_FULL_SUFFIX from ..data.label_cleaner import LabelCleanerMulticlassToBinary +from ..metrics import get_metric, Scorer from ..models import AbstractModel, BaggedEnsembleModel, StackerEnsembleModel, WeightedEnsembleModel, GreedyWeightedEnsembleModel, SimpleWeightedEnsembleModel from ..utils import default_holdout_frac, get_pred_from_proba, generate_train_test_split, infer_eval_metric, compute_permutation_feature_importance, \ extract_column, compute_weighted_metric, convert_pred_probas_to_df @@ -646,6 +650,12 @@ def score_with_y_pred_proba(self, y, y_pred_proba, weights=None) -> float: return compute_weighted_metric(y, y_pred, self.eval_metric, weights, weight_evaluation=self.weight_evaluation, quantile_levels=self.quantile_levels) + def _score_with_y_pred(self, y, y_pred, weights=None, metric=None) -> float: + if metric is None: + metric = self.eval_metric + return compute_weighted_metric(y, y_pred, metric=metric, weights=weights, weight_evaluation=self.weight_evaluation, + quantile_levels=self.quantile_levels) + # TODO: Slow if large ensemble with many models, could cache output result to speed up cascades during inference def _construct_model_pred_order(self, models: List[str]) -> List[str]: """ @@ -1185,6 +1195,10 @@ def refit_ensemble_full(self, model='all') -> dict: self.save() return self.get_model_full_dict() + def get_refit_full_parent(self, model: str) -> str: + """Get refit full model's parent. If model does not have a parent, return `model`.""" + return self.get_model_attribute(model=model, attribute='refit_full_parent', default=model) + # TODO: Take best performance model with lowest inference def get_model_best(self, can_infer=None, allow_full=True, infer_limit=None): models = self.get_model_names(can_infer=can_infer) @@ -2382,6 +2396,9 @@ def get_model_full_dict(self, inverse=False) -> Dict[str, str]: model_full_dict = {parent: refit for refit, parent in model_full_dict.items()} return model_full_dict + def model_exists(self, model: str) -> bool: + return model in self.get_model_names() + def _get_banned_model_names(self) -> list: """Gets all model names which would cause model files to be overwritten if a new model was trained with the name""" return self.get_model_names() + list(self._extra_banned_names) @@ -3046,3 +3063,64 @@ def calibrate_model(self, model_name: str = None, lr: float = 0.01, max_iter: in logger.log(15, f'Temperature term found is: {temp_scalar}') model.params_aux["temperature_scalar"] = temp_scalar model.save() + + def calibrate_decision_threshold(self, + X: pd.DataFrame | None = None, + y: np.array | None = None, + metric: str | Scorer | None = None, + model: str = 'best', + weights=None, + decision_thresholds: int | List[float] = 50, + verbose: bool = True) -> float: + # TODO: Docstring + assert self.problem_type == BINARY, f'calibrate_decision_threshold is only available for `problem_type="{BINARY}"`' + + if metric is None: + metric = self.eval_metric + elif isinstance(metric, str): + metric = get_metric(metric, self.problem_type, 'eval_metric') + + if model == 'best': + model = self.get_model_best() + + if y is None: + # If model is refit_full, use its parent to avoid over-fitting + model_parent = self.get_refit_full_parent(model=model) + if not self.model_exists(model_parent): + raise AssertionError(f'Unable to calibrate the decision threshold on the internal data because the ' + f'model "{model}" is a refit_full model trained on all of the internal data, ' + f'whose parent model "{model_parent}" does not exist or was deleted.\n' + f'It may have been deleted due to `predictor.fit(..., keep_only_best=True)`. ' + f'Ensure `keep_only_best=False` to be able to calibrate refit_full models.') + model = model_parent + + # TODO: Add helpful logging when data is not available, for example post optimize for deployment + if self.has_val: + # Use validation data + X = self.load_X_val() + if self.weight_evaluation: + X, weights = extract_column(X=X, col_name=self.sample_weight) + y: np.array = self.load_y_val() + y_pred_proba = self.predict_proba(X=X, model=model) + else: + # Use out-of-fold data + if self.weight_evaluation: + X = self.load_X() + X, weights = extract_column(X=X, col_name=self.sample_weight) + y: np.array = self.load_y() + y_pred_proba = self.get_model_oof(model=model) + else: + y_pred_proba = self.predict_proba(X=X, model=model) + + if not metric.needs_pred: + logger.warning(f'WARNING: The provided metric "{metric.name}" does not use class predictions for scoring, ' + f'and thus is invalid for decision threshold calibration. ' + f'Falling back to `decision_threshold=0.5`.') + return 0.5 + + return calibrate_decision_threshold(y=y, + y_pred_proba=y_pred_proba, + metric=lambda y, y_pred : self._score_with_y_pred(y=y, y_pred=y_pred, weights=weights, metric=metric), + decision_thresholds=decision_thresholds, + metric_name=metric.name, + verbose=verbose) diff --git a/core/src/autogluon/core/utils/utils.py b/core/src/autogluon/core/utils/utils.py --- a/core/src/autogluon/core/utils/utils.py +++ b/core/src/autogluon/core/utils/utils.py @@ -4,7 +4,7 @@ import time import random import sys -from typing import Callable, List, Union, Tuple +from typing import Callable, List, Tuple, Union import numpy as np import pandas as pd @@ -253,28 +253,42 @@ def augment_rare_classes(X, label, threshold): return X -def get_pred_from_proba_df(y_pred_proba, problem_type=BINARY): - """From input DataFrame of pred_proba, return Series of pred""" +def get_pred_from_proba_df(y_pred_proba: pd.DataFrame, + problem_type: str = BINARY, + decision_threshold: float = None) -> pd.Series: + """ + From input DataFrame of pred_proba, return Series of pred. + The input DataFrame's columns must be the names of the target classes. + """ if problem_type == REGRESSION: y_pred = y_pred_proba elif problem_type == QUANTILE: y_pred = y_pred_proba + elif problem_type == BINARY and decision_threshold is not None: + negative_class, positive_class = y_pred_proba.columns + y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba.values, + problem_type=problem_type, + decision_threshold=decision_threshold) + y_pred = [positive_class if pred == 1 else negative_class for pred in y_pred] + y_pred = pd.Series(data=y_pred, index=y_pred_proba.index) else: y_pred = y_pred_proba.idxmax(axis=1) return y_pred -def get_pred_from_proba(y_pred_proba: np.ndarray, problem_type=BINARY): +def get_pred_from_proba(y_pred_proba: np.ndarray, problem_type=BINARY, decision_threshold: float = None): if problem_type == BINARY: + if decision_threshold is None: + decision_threshold = 0.5 # Using > instead of >= to align with Pandas `.idxmax` logic which picks the left-most column during ties. # If this is not done, then predictions can be inconsistent when converting in binary classification from multiclass-form pred_proba and # binary-form pred_proba when the pred_proba is 0.5 for positive and negative classes. if len(y_pred_proba.shape) == 2: assert y_pred_proba.shape[1] == 2 # Assume positive class is in 2nd position - y_pred = [1 if pred > 0.5 else 0 for pred in y_pred_proba[:, 1]] + y_pred = [1 if pred > decision_threshold else 0 for pred in y_pred_proba[:, 1]] else: - y_pred = [1 if pred > 0.5 else 0 for pred in y_pred_proba] + y_pred = [1 if pred > decision_threshold else 0 for pred in y_pred_proba] elif problem_type == REGRESSION: y_pred = y_pred_proba elif problem_type == QUANTILE: diff --git a/tabular/src/autogluon/tabular/learner/abstract_learner.py b/tabular/src/autogluon/tabular/learner/abstract_learner.py --- a/tabular/src/autogluon/tabular/learner/abstract_learner.py +++ b/tabular/src/autogluon/tabular/learner/abstract_learner.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import json import logging @@ -7,13 +9,13 @@ import numpy as np import pandas as pd from pandas import DataFrame, Series -from typing import List +from typing import List, Union from sklearn.metrics import classification_report from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT from autogluon.core.data.label_cleaner import LabelCleaner, LabelCleanerMulticlassToBinary from autogluon.core.learner import AbstractLearner -from autogluon.core.metrics import confusion_matrix, get_metric +from autogluon.core.metrics import confusion_matrix, get_metric, Scorer from autogluon.core.models.greedy_ensemble.ensemble_selection import EnsembleSelection from autogluon.core.utils import get_leaderboard_pareto_frontier, augment_rare_classes, extract_column, compute_weighted_metric from autogluon.core.utils import get_pred_from_proba, get_pred_from_proba_df, infer_problem_type @@ -132,7 +134,13 @@ def _fit(self, X: DataFrame, X_val: DataFrame = None, scheduler_options=None, hy feature_prune=False, holdout_frac=0.1, hyperparameters=None, verbosity=2): raise NotImplementedError - def predict_proba(self, X: DataFrame, model=None, as_pandas=True, as_multiclass=True, inverse_transform=True, transform_features=True): + def predict_proba(self, + X: DataFrame, + model: str | None = None, + as_pandas: bool = True, + as_multiclass: bool = True, + inverse_transform: bool = True, + transform_features: bool = True): X_index = copy.deepcopy(X.index) if as_pandas else None if X.empty: y_pred_proba = np.array([]) @@ -147,11 +155,23 @@ def predict_proba(self, X: DataFrame, model=None, as_pandas=True, as_multiclass= inverse_transform=inverse_transform) return y_pred_proba - def predict(self, X: DataFrame, model=None, as_pandas=True, inverse_transform=True, transform_features=True): + def predict(self, + X: DataFrame, + model: str | None = None, + as_pandas: bool = True, + inverse_transform: bool = True, + transform_features: bool = True, + *, + decision_threshold: float | None = None, + ): + if decision_threshold is None: + decision_threshold = 0.5 X_index = copy.deepcopy(X.index) if as_pandas else None y_pred_proba = self.predict_proba(X=X, model=model, as_pandas=False, as_multiclass=False, inverse_transform=False, transform_features=transform_features) problem_type = self.label_cleaner.problem_type_transform or self.problem_type - y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=problem_type) + y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, + problem_type=problem_type, + decision_threshold=decision_threshold) y_pred = self._post_process_predict(y_pred=y_pred, as_pandas=as_pandas, index=X_index, @@ -292,7 +312,9 @@ def predict_multi(self, models: List[str] = None, as_pandas: bool = True, transform_features: bool = True, - inverse_transform: bool = True) -> dict: + inverse_transform: bool = True, + *, + decision_threshold: float = None) -> dict: """ Identical to predict_proba_multi, except returns predictions instead of probabilities. """ @@ -302,18 +324,30 @@ def predict_multi(self, transform_features=transform_features, inverse_transform=inverse_transform) predict_dict = {} - if as_pandas: - for m in predict_proba_dict: - predict_dict[m] = get_pred_from_proba_df(predict_proba_dict[m], problem_type=self.problem_type) - else: - for m in predict_proba_dict: - y_pred = get_pred_from_proba(predict_proba_dict[m], problem_type=self.problem_type) - predict_dict[m] = self._post_process_predict(y_pred=y_pred, - as_pandas=as_pandas, - index=None, - inverse_transform=inverse_transform) + for m in predict_proba_dict: + predict_dict[m] = self.get_pred_from_proba(y_pred_proba=predict_proba_dict[m], + decision_threshold=decision_threshold, + inverse_transform=inverse_transform) return predict_dict + def get_pred_from_proba(self, + y_pred_proba: np.ndarray | pd.DataFrame, + decision_threshold: float | None = None, + inverse_transform: bool = True) -> np.array | pd.Series: + if isinstance(y_pred_proba, pd.DataFrame): + y_pred = get_pred_from_proba_df(y_pred_proba, + problem_type=self.problem_type, + decision_threshold=decision_threshold) + else: + y_pred = get_pred_from_proba(y_pred_proba, + problem_type=self.problem_type, + decision_threshold=decision_threshold) + y_pred = self._post_process_predict(y_pred=y_pred, + as_pandas=False, + index=None, + inverse_transform=inverse_transform) + return y_pred + def _validate_fit_input(self, X: DataFrame, **kwargs): if self.label not in X.columns: raise KeyError(f"Label column '{self.label}' is missing from training data. Training data columns: {list(X.columns)}") @@ -425,7 +459,8 @@ def score(self, X: DataFrame, y=None, model=None): return compute_weighted_metric(y, y_pred, self.eval_metric, w, weight_evaluation=self.weight_evaluation, quantile_levels=self.quantile_levels) # Scores both learner and all individual models, along with computing the optimal ensemble score + weights (oracle) - def score_debug(self, X: DataFrame, y=None, extra_info=False, compute_oracle=False, extra_metrics=None, skip_score=False, silent=False): + def score_debug(self, X: DataFrame, y=None, extra_info=False, compute_oracle=False, extra_metrics=None, + decision_threshold=None, skip_score=False, silent=False): leaderboard_df = self.leaderboard(extra_info=extra_info, silent=silent) if extra_metrics is None: extra_metrics = [] @@ -479,6 +514,7 @@ def score_debug(self, X: DataFrame, y=None, extra_info=False, compute_oracle=Fal scores[model_name] = self._score_with_pred_proba( y_pred_proba_internal=y_pred_proba_internal, metric=self.eval_metric, + decision_threshold=decision_threshold, **scoring_args ) for metric in extra_metrics: @@ -488,6 +524,7 @@ def score_debug(self, X: DataFrame, y=None, extra_info=False, compute_oracle=Fal extra_scores[metric.name][model_name] = self._score_with_pred_proba( y_pred_proba_internal=y_pred_proba_internal, metric=metric, + decision_threshold=decision_threshold, **scoring_args ) @@ -572,6 +609,7 @@ def _score_with_pred_proba(self, y_pred_proba_internal, metric, sample_weight=None, + decision_threshold=None, weight_evaluation=None): metric = get_metric(metric, self.problem_type, 'leaderboard_metric') if weight_evaluation is None: @@ -579,7 +617,9 @@ def _score_with_pred_proba(self, if metric.needs_pred: if self.problem_type == BINARY: # Use 1 and 0, otherwise f1 can crash due to unknown pos_label. - y_pred = get_pred_from_proba(y_pred_proba_internal, problem_type=self.problem_type) + y_pred = self.get_pred_from_proba(y_pred_proba_internal, + decision_threshold=decision_threshold, + inverse_transform=False) y_tmp = y_internal else: y_pred = self.label_cleaner.inverse_transform_proba(y_pred_proba_internal, as_pred=True) @@ -626,7 +666,7 @@ def _validate_class_labels(self, y: Series): # log_loss / pac_score raise ValueError(f'Multiclass scoring with eval_metric=\'{self.eval_metric.name}\' does not support unknown classes. Unknown classes: {unknown_classes}') - def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, auxiliary_metrics=True, detailed_report=False): + def evaluate_predictions(self, y_true, y_pred, sample_weight=None, decision_threshold=None, silent=False, auxiliary_metrics=True, detailed_report=False): """ Evaluate predictions. Does not support sample weights since this method reports a variety of metrics. Args: silent (bool): Should we print which metric is being used as well as performance. @@ -657,7 +697,7 @@ def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, f'which is not supported by `evaluate_predictions`.') if is_proba: y_pred_proba = y_pred - y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type) + y_pred = self.get_pred_from_proba(y_pred_proba=y_pred_proba, decision_threshold=decision_threshold) if self.problem_type == BINARY: # roc_auc crashes if this isn't done y_pred_proba = y_pred_proba[self.positive_class] @@ -723,6 +763,7 @@ def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, score = self._score_with_pred_proba( y_pred_proba_internal=y_pred_proba_internal, metric=aux_metric, + decision_threshold=decision_threshold, **scoring_args ) else: @@ -771,9 +812,11 @@ def extract_label(self, X, error_if_missing=True): X = X.drop(self.label, axis=1) return X, y - def leaderboard(self, X=None, y=None, extra_info=False, extra_metrics=None, only_pareto_frontier=False, skip_score=False, silent=False): + def leaderboard(self, X=None, y=None, extra_info=False, extra_metrics=None, decision_threshold=None, + only_pareto_frontier=False, skip_score=False, silent=False) -> pd.DataFrame: if X is not None: - leaderboard = self.score_debug(X=X, y=y, extra_info=extra_info, extra_metrics=extra_metrics, skip_score=skip_score, silent=True) + leaderboard = self.score_debug(X=X, y=y, extra_info=extra_info, extra_metrics=extra_metrics, + decision_threshold=decision_threshold, skip_score=skip_score, silent=True) else: if extra_metrics: raise AssertionError('`extra_metrics` is only valid when data is specified.') @@ -895,6 +938,47 @@ def distill(self, X=None, y=None, X_val=None, y_val=None, time_limit=None, hyper self.save_trainer(trainer=trainer) return distilled_model_names + def transform_labels(self, y, inverse=False, proba=False): + if inverse: + if proba: + y_transformed = self.label_cleaner.inverse_transform_proba(y=y, as_pandas=True) + else: + y_transformed = self.label_cleaner.inverse_transform(y=y) + else: + if proba: + y_transformed = self.label_cleaner.transform_proba(y=y, as_pandas=True) + else: + y_transformed = self.label_cleaner.transform(y=y) + return y_transformed + + def calibrate_decision_threshold(self, + data: pd.DataFrame | None = None, + metric: str | Scorer | None = None, + model: str = 'best', + decision_thresholds: int | List[float] = 50, + verbose: bool = True) -> float: + # TODO: docstring + if metric is None: + metric = self.eval_metric + + weights = None + if data is None: + X = None + y = None + else: + if self.weight_evaluation: + data, weights = extract_column(data, self.sample_weight) + X = self.transform_features(X=data) + y = self.transform_labels(y=data[self.label]) + + return self.load_trainer().calibrate_decision_threshold(X=X, + y=y, + metric=metric, + model=model, + weights=weights, + decision_thresholds=decision_thresholds, + verbose=verbose) + # TODO: Add data info gathering at beginning of .fit() that is used by all learners to add to get_info output # TODO: Add feature inference / feature engineering info to get_info output def get_info(self, **kwargs): diff --git a/tabular/src/autogluon/tabular/predictor/predictor.py b/tabular/src/autogluon/tabular/predictor/predictor.py --- a/tabular/src/autogluon/tabular/predictor/predictor.py +++ b/tabular/src/autogluon/tabular/predictor/predictor.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import inspect import logging @@ -22,6 +24,7 @@ from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT, PSEUDO_MODEL_SUFFIX, PROBLEM_TYPES_CLASSIFICATION from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary from autogluon.core.dataset import TabularDataset +from autogluon.core.metrics import Scorer from autogluon.core.problem_type import problem_type_info from autogluon.core.pseudolabeling.pseudolabeling import filter_pseudo, filter_ensemble_pseudo from autogluon.core.scheduler.scheduler_factory import scheduler_factory @@ -222,6 +225,7 @@ def __init__( logger.log(15, f"{AUTO_WEIGHT} currently does not use any sample weights.") self.sample_weight = sample_weight self.weight_evaluation = weight_evaluation # TODO: sample_weight and weight_evaluation can both be properties that link to self._learner.sample_weight, self._learner.weight_evaluation + self._decision_threshold = None # TODO: Each model should have its own decision threshold instead of one global threshold if self.sample_weight in [AUTO_WEIGHT, BALANCE_WEIGHT] and self.weight_evaluation: logger.warning( f"We do not recommend specifying weight_evaluation when sample_weight='{self.sample_weight}', instead specify appropriate eval_metric.") @@ -276,6 +280,40 @@ def original_features(self) -> List[str]: def problem_type(self): return self._learner.problem_type + @property + def decision_threshold(self) -> float: + """ + The decision threshold used to convert prediction probabilities to predictions. + Only relevant for binary classification, otherwise the value will be None. + Valid values are in the range [0.0, 1.0] + You can obtain an optimized `decision_threshold` by first calling `predictor.calibrate_decision_threshold()`. + Useful to set for metrics such as `balanced_accuracy` and `f1` as `0.5` is often not an optimal threshold. + Predictions are calculated via the following logic on the positive class: `1 if pred > decision_threshold else 0` + """ + if self._decision_threshold is not None: + return self._decision_threshold + elif self.problem_type == BINARY: + return 0.5 + else: + return None + + def set_decision_threshold(self, decision_threshold: float): + """ + Set `predictor.decision_threshold`. Problem type must be 'binary', and the value must be between 0 and 1. + """ + assert self.problem_type == BINARY + assert decision_threshold >= 0 + assert decision_threshold <= 1 + if decision_threshold != self.decision_threshold: + logger.log(20, f'Updating predictor.decision_threshold from {self.decision_threshold} -> {decision_threshold}\n' + f'\tThis will impact how prediction probabilities are converted to predictions in binary classification.\n' + f'\tPrediction probabilities of the positive class >{decision_threshold} ' + f'will be predicted as the positive class ({self.positive_class}). ' + f'This can significantly impact metric scores.\n' + f'\tYou can update this value via `predictor.set_decision_threshold`.\n' + f'\tYou can calculate an optimal decision threshold on the validation data via `predictor.calibrate_decision_threshold()`.') + self._decision_threshold = decision_threshold + def features(self, feature_stage: str = 'original'): """ Returns a list of feature names dependent on the value of feature_stage. @@ -324,6 +362,7 @@ def fit(self, infer_limit=None, infer_limit_batch_size=None, fit_weighted_ensemble=True, + calibrate_decision_threshold=False, num_cpus='auto', num_gpus='auto', **kwargs): @@ -570,6 +609,12 @@ def fit(self, If True, a WeightedEnsembleModel will be fit in each stack layer. A weighted ensemble will often be stronger than an individual model while being very fast to train. It is recommended to keep this value set to True to maximize predictive quality. + calibrate_decision_threshold : bool, default = False + [Experimental] This may be removed / changed without warning in a future release. + If True, will automatically calibrate the decision threshold at the end of fit for calls to `.predict` based on the evaluation metric. + By default, the decision threshold is `0.5`, however for some metrics such as `f1` and `balanced_accuracy`, + scores can be significantly improved by choosing a threshold other than `0.5`. + Only valid for `problem_type='binary'`. Ignored for all other problem types. num_cpus: int, default = "auto" The total amount of cpus you want AutoGluon predictor to use. Auto means AutoGluon will make the decision based on the total number of cpus available and the model requirement for best performance. @@ -900,13 +945,14 @@ def fit(self, set_best_to_refit_full=kwargs['set_best_to_refit_full'], save_space=kwargs['save_space'], calibrate=kwargs['calibrate'], + calibrate_decision_threshold=calibrate_decision_threshold, infer_limit=infer_limit, ) self.save() return self def _post_fit(self, keep_only_best=False, refit_full=False, set_best_to_refit_full=False, save_space=False, - calibrate=False, infer_limit=None): + calibrate=False, calibrate_decision_threshold=False, infer_limit=None): if refit_full is True: if keep_only_best is True: if set_best_to_refit_full is True: @@ -954,6 +1000,13 @@ def _post_fit(self, keep_only_best=False, refit_full=False, set_best_to_refit_fu else: logger.log(30, 'WARNING: `calibrate=True` is only applicable to classification or quantile regression problems. Skipping calibration...') + if calibrate_decision_threshold: + if self.problem_type != BINARY: + logger.log(30, 'WARNING: `calibrate_decision_threshold=True` is only applicable to binary classification. Skipping calibration...') + else: + best_threshold = self.calibrate_decision_threshold() + self.set_decision_threshold(decision_threshold=best_threshold) + if keep_only_best: self.delete_models(models_to_keep='best', dry_run=False) @@ -1380,7 +1433,13 @@ def fit_pseudolabel(self, pseudo_data: pd.DataFrame, max_iter: int = 5, return_p fit_ensemble=fit_ensemble, fit_ensemble_every_iter=fit_ensemble_every_iter, **fit_extra_kwargs) - def predict(self, data, model=None, as_pandas=True, transform_features=True): + def predict(self, + data: str | TabularDataset | pd.DataFrame, + model: str | None = None, + as_pandas: bool = True, + transform_features: bool = True, + *, + decision_threshold: float | None = None): """ Use trained models to produce predictions of `label` column values for new data. @@ -1399,6 +1458,14 @@ def predict(self, data, model=None, as_pandas=True, transform_features=True): If True, preprocesses data before predicting with models. If False, skips global feature preprocessing. This is useful to save on inference time if you have already called `data = predictor.transform_features(data)`. + decision_threshold : float, default = None + The decision threshold used to convert prediction probabilities to predictions. + Only relevant for binary classification, otherwise ignored. + If None, defaults to `predictor.decision_threshold`. + Valid values are in the range [0.0, 1.0] + You can obtain an optimized `decision_threshold` by first calling `predictor.calibrate_decision_threshold()`. + Useful to set for metrics such as `balanced_accuracy` and `f1` as `0.5` is often not an optimal threshold. + Predictions are calculated via the following logic on the positive class: `1 if pred > decision_threshold else 0` Returns ------- @@ -1406,9 +1473,16 @@ def predict(self, data, model=None, as_pandas=True, transform_features=True): """ self._assert_is_fit('predict') data = self._get_dataset(data) - return self._learner.predict(X=data, model=model, as_pandas=as_pandas, transform_features=transform_features) + if decision_threshold is None: + decision_threshold = self.decision_threshold + return self._learner.predict(X=data, model=model, as_pandas=as_pandas, transform_features=transform_features, decision_threshold=decision_threshold) - def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True, transform_features=True): + def predict_proba(self, + data: str | TabularDataset | pd.DataFrame, + model: str | None = None, + as_pandas: bool = True, + as_multiclass: bool = True, + transform_features: bool = True): """ Use trained models to produce predicted class probabilities rather than class-labels (if task is classification). If `predictor.problem_type` is regression or quantile, this will raise an AssertionError. @@ -1451,6 +1525,46 @@ def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True, tr data = self._get_dataset(data) return self._learner.predict_proba(X=data, model=model, as_pandas=as_pandas, as_multiclass=as_multiclass, transform_features=transform_features) + def get_pred_from_proba(self, + y_pred_proba: pd.DataFrame | np.ndarray, + decision_threshold: float | None = None) -> pd.Series | np.array: + """ + Given prediction probabilities, convert to predictions. + + Parameters + ---------- + y_pred_proba : :class:`pd.DataFrame` or :class:`np.ndarray` + The prediction probabilities to convert to predictions. + Obtainable via the output of `predictor.predict_proba`. + decision_threshold : float, default = None + The decision threshold used to convert prediction probabilities to predictions. + Only relevant for binary classification, otherwise ignored. + If None, defaults to `predictor.decision_threshold`. + Valid values are in the range [0.0, 1.0] + You can obtain an optimized `decision_threshold` by first calling `predictor.calibrate_decision_threshold()`. + Useful to set for metrics such as `balanced_accuracy` and `f1` as `0.5` is often not an optimal threshold. + Predictions are calculated via the following logic on the positive class: `1 if pred > decision_threshold else 0` + + Returns + ------- + Array of predictions, one corresponding to each row in given dataset. Either :class:`np.ndarray` or :class:`pd.Series` depending on `y_pred_proba` dtype. + + Examples + -------- + >>> from autogluon.tabular import TabularPredictor + >>> predictor = TabularPredictor(label='class').fit('train.csv', label='class') + >>> y_pred_proba = predictor.predict_proba('test.csv') + >>> + >>> # y_pred and y_pred_from_proba are identical + >>> y_pred = predictor.predict('test.csv') + >>> y_pred_from_proba = predictor.get_pred_from_proba(y_pred_proba=y_pred_proba) + """ + if not self.can_predict_proba: + raise AssertionError(f'`predictor.get_pred_from_proba` is not supported when problem_type="{self.problem_type}".') + if decision_threshold is None: + decision_threshold = self.decision_threshold + return self._learner.get_pred_from_proba(y_pred_proba=y_pred_proba, decision_threshold=decision_threshold) + @property def can_predict_proba(self) -> bool: """ @@ -1460,7 +1574,7 @@ def can_predict_proba(self) -> bool: self._assert_is_fit('can_predict_proba') return problem_type_info.can_predict_proba(problem_type=self.problem_type) - def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict: + def evaluate(self, data, model=None, decision_threshold=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict: """ Report the predictive performance evaluated over a given dataset. This is basically a shortcut for: `pred_proba = predict_proba(data); evaluate_predictions(data[label], pred_proba)`. @@ -1474,6 +1588,11 @@ def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detai model : str (optional) The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set. Valid models are listed in this `predictor` by calling `predictor.get_model_names()`. + decision_threshold : float, default = None + The decision threshold to use when converting prediction probabilities to predictions. + This will impact the scores of metrics such as `f1` and `accuracy`. + If None, defaults to `predictor.decision_threshold`. Ignored unless `problem_type='binary'`. + Refer to the `predictor.decision_threshold` docstring for more information. silent : bool, default = False If False, performance results are printed. auxiliary_metrics: bool, default = True @@ -1489,6 +1608,8 @@ def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detai """ self._assert_is_fit('evaluate') data = self._get_dataset(data) + if decision_threshold is None: + decision_threshold = self.decision_threshold if self.can_predict_proba: y_pred = self.predict_proba(data=data, model=model) else: @@ -1497,10 +1618,11 @@ def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detai sample_weight = data[self.sample_weight] else: sample_weight = None - return self.evaluate_predictions(y_true=data[self.label], y_pred=y_pred, sample_weight=sample_weight, silent=silent, + return self.evaluate_predictions(y_true=data[self.label], y_pred=y_pred, sample_weight=sample_weight, + decision_threshold=decision_threshold, silent=silent, auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report) - def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict: + def evaluate_predictions(self, y_true, y_pred, sample_weight=None, decision_threshold=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict: """ Evaluate the provided prediction probabilities against ground truth labels. Evaluation is based on the `eval_metric` previously specified in init, or default metrics if none was specified. @@ -1515,6 +1637,11 @@ def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, Caution: For certain types of `eval_metric` (such as 'roc_auc'), `y_pred` must be predicted-probabilities rather than predicted labels. sample_weight : :class:`pd.Series`, default = None Sample weight for each row of data. If None, uniform sample weights are used. + decision_threshold : float, default = None + The decision threshold to use when converting prediction probabilities to predictions. + This will impact the scores of metrics such as `f1` and `accuracy`. + If None, defaults to `predictor.decision_threshold`. Ignored unless `problem_type='binary'`. + Refer to the `predictor.decision_threshold` docstring for more information. silent : bool, default = False If False, performance results are printed. auxiliary_metrics: bool, default = True @@ -1528,13 +1655,17 @@ def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, NOTE: Metrics scores always show in higher is better form. This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative. """ - return self._learner.evaluate_predictions(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight, silent=silent, + if decision_threshold is None: + decision_threshold = self.decision_threshold + return self._learner.evaluate_predictions(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight, + decision_threshold=decision_threshold, silent=silent, auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report) def leaderboard(self, - data=None, + data: str | TabularDataset | pd.DataFrame | None = None, extra_info: bool = False, - extra_metrics: list = None, + extra_metrics: list | None = None, + decision_threshold: float | None = None, only_pareto_frontier: bool = False, skip_score: bool = False, silent: bool = False) -> pd.DataFrame: @@ -1645,6 +1776,14 @@ def leaderboard(self, NOTE: Metrics scores always show in higher is better form. This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative. This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard. + decision_threshold : float, default = None + The decision threshold to use when converting prediction probabilities to predictions. + This will impact the scores of metrics such as `f1` and `accuracy`. + If None, defaults to `predictor.decision_threshold`. Ignored unless `problem_type='binary'`. + Refer to the `predictor.decision_threshold` docstring for more information. + NOTE: `score_val` will not be impacted by this value in v0.8. + `score_val` will always show the validation scores achieved with a decision threshold of `0.5`. + Only test scores will be properly updated. only_pareto_frontier : bool, default = False If `True`, only return model information of models in the Pareto frontier of the accuracy/latency trade-off (models which achieve the highest score within their end-to-end inference time). At minimum this will include the model with the highest score and the model with the lowest inference time. @@ -1663,7 +1802,9 @@ def leaderboard(self, """ self._assert_is_fit('leaderboard') data = self._get_dataset(data, allow_nan=True) - return self._learner.leaderboard(X=data, extra_info=extra_info, extra_metrics=extra_metrics, + if decision_threshold is None: + decision_threshold = self.decision_threshold + return self._learner.leaderboard(X=data, extra_info=extra_info, extra_metrics=extra_metrics, decision_threshold=decision_threshold, only_pareto_frontier=only_pareto_frontier, skip_score=skip_score, silent=silent) def predict_proba_multi(self, @@ -1737,7 +1878,9 @@ def predict_multi(self, models: List[str] = None, as_pandas: bool = True, transform_features: bool = True, - inverse_transform: bool = True) -> dict: + inverse_transform: bool = True, + *, + decision_threshold: float = None) -> dict: """ Returns a dictionary of predictions where the key is the model name and the value is the model's prediction probabilities on the data. @@ -1773,18 +1916,29 @@ def predict_multi(self, inverse_transform : bool, default = True If True, will return predictions in the original format. If False (advanced), will return predictions in AutoGluon's internal format. + decision_threshold : float, default = None + The decision threshold used to convert prediction probabilities to predictions. + Only relevant for binary classification, otherwise ignored. + If None, defaults to `0.5`. + Valid values are in the range [0.0, 1.0] + You can obtain an optimized `decision_threshold` by first calling `predictor.calibrate_decision_threshold()`. + Useful to set for metrics such as `balanced_accuracy` and `f1` as `0.5` is often not an optimal threshold. + Predictions are calculated via the following logic on the positive class: `1 if pred > decision_threshold else 0` Returns ------- Dictionary with model names as keys and model predictions as values. """ self._assert_is_fit('predict_multi') + if decision_threshold is None: + decision_threshold = self.decision_threshold data = self._get_dataset(data, allow_nan=True) return self._learner.predict_multi(X=data, models=models, as_pandas=as_pandas, transform_features=transform_features, - inverse_transform=inverse_transform) + inverse_transform=inverse_transform, + decision_threshold=decision_threshold) def fit_summary(self, verbosity=3, show_plot=False): """ @@ -2018,17 +2172,7 @@ def transform_labels(self, labels, inverse=False, proba=False): """ self._assert_is_fit('transform_labels') - if inverse: - if proba: - labels_transformed = self._learner.label_cleaner.inverse_transform_proba(y=labels, as_pandas=True) - else: - labels_transformed = self._learner.label_cleaner.inverse_transform(y=labels) - else: - if proba: - labels_transformed = self._learner.label_cleaner.transform_proba(y=labels, as_pandas=True) - else: - labels_transformed = self._learner.label_cleaner.transform(y=labels) - return labels_transformed + return self._learner.transform_labels(y=labels, inverse=inverse, proba=proba) def feature_importance(self, data=None, model=None, features=None, feature_stage='original', subsample_size=5000, time_limit=None, num_shuffle_sets=None, include_confidence_band=True, confidence_level=0.99, @@ -2507,7 +2651,77 @@ def fit_weighted_ensemble(self, base_models: list = None, name_suffix='Best', ex return models - def get_oof_pred(self, model: str = None, transformed=False, train_data=None, internal_oof=False, can_infer=None) -> pd.Series: + def calibrate_decision_threshold(self, + data: str | TabularDataset | pd.DataFrame | None = None, + metric: str | Scorer | None = None, + model: str = 'best', + decision_thresholds: int | List[float] = 50, + verbose: bool = True) -> float: + """ + Calibrate the decision threshold in binary classification to optimize a given metric. + You can pass the output of this method as input to `predictor.set_decision_threshold` to update the predictor. + Will raise an AssertionError if `predictor.problem_type != 'binary'`. + + Note that while calibrating the decision threshold can help to improve a given metric, + other metrics may end up having worse scores. + For example, calibrating on `balanced_accuracy` will often harm `accuracy`. + Users should keep this in mind while leveraging decision threshold calibration. + + Parameters + ---------- + data : Union[str, pd.DataFrame], default = None + The data to use for calibration. Must contain the label column. + We recommend to keep this value as None unless you are an advanced user and understand the implications. + If None, will use internal data such as the holdout validation data or out-of-fold predictions. + metric : autogluon.core.metrics.Scorer or str, default = None + The metric to optimize during calibration. + If None, uses `predictor.eval_metric`. + model : str, default = 'best' + The model to use prediction probabilities of when calibrating the threshold. + If 'best', will use `predictor.get_model_best()`. + decision_thresholds : Union[int, List[float]], default = 50 + The number of decision thresholds on either side of `0.5` to search. + The default of 50 will result in 101 searched thresholds: [0.00, 0.01, 0.02, ..., 0.49, 0.50, 0.51, ..., 0.98, 0.99, 1.00] + Alternatively, a list of decision thresholds can be passed and only the thresholds in the list will be searched. + verbose : bool, default = True + If True, will log information about the calibration process. + + Returns + ------- + Decision Threshold: A float between 0 and 1 defining the decision boundary for predictions that + maximizes the `metric` score on the `data` for the `model`. + """ + # TODO: v0.8 + # add tutorial section + # + # TODO: v0.9 + # Calculate optimal threshold for each model separately when deciding best model + # sampling/time limit + # update validation scores of models based on threshold + # speed up the logic / search for optimal threshold more efficiently + # make threshold calibration part of internal optimization, such as during fit_weighted_ensemble. + # precision has strange edge-cases where it flips from 1.0 to 0.0 score due to becoming undefined + # consider warning users who pass this metric, + # or edit this metric so they do not flip value when undefined. + # UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 due to no predicted samples. + # Use `zero_division` parameter to control this behavior. + + self._assert_is_fit('calibrate_decision_threshold') + assert self.problem_type == BINARY, f'calibrate_decision_threshold is only available for `problem_type="{BINARY}"`' + data = self._get_dataset(data, allow_nan=True) + + if metric is None: + metric = self.eval_metric + if model == 'best': + model = self.get_model_best() + + return self._learner.calibrate_decision_threshold(data=data, + metric=metric, + model=model, + decision_thresholds=decision_thresholds, + verbose=verbose) + + def get_oof_pred(self, model: str = None, transformed=False, train_data=None, internal_oof=False, decision_threshold=None, can_infer=None) -> pd.Series: """ Note: This is advanced functionality not intended for normal usage. @@ -2525,6 +2739,8 @@ def get_oof_pred(self, model: str = None, transformed=False, train_data=None, in Refer to `get_oof_pred_proba()` documentation. internal_oof : bool, default = False Refer to `get_oof_pred_proba()` documentation. + decision_threshold : float, default = None + Refer to `predict_multi` documentation. can_infer : bool, default = None Refer to `get_oof_pred_proba()` documentation. @@ -2533,13 +2749,17 @@ def get_oof_pred(self, model: str = None, transformed=False, train_data=None, in :class:`pd.Series` object of the out-of-fold training predictions of the model. """ self._assert_is_fit('get_oof_pred') + if decision_threshold is None: + decision_threshold = self.decision_threshold y_pred_proba_oof = self.get_oof_pred_proba(model=model, transformed=transformed, as_multiclass=True, train_data=train_data, internal_oof=internal_oof, can_infer=can_infer) - y_pred_oof = get_pred_from_proba_df(y_pred_proba_oof, problem_type=self.problem_type) + y_pred_oof = get_pred_from_proba_df(y_pred_proba_oof, + problem_type=self.problem_type, + decision_threshold=decision_threshold) if transformed: return self._learner.label_cleaner.to_transformed_dtype(y_pred_oof) return y_pred_oof
diff --git a/core/tests/unittests/calibrate/test_decision_threshold.py b/core/tests/unittests/calibrate/test_decision_threshold.py new file mode 100644 --- /dev/null +++ b/core/tests/unittests/calibrate/test_decision_threshold.py @@ -0,0 +1,114 @@ +import numpy as np +import pytest + +from autogluon.core.calibrate import calibrate_decision_threshold +from autogluon.core.metrics import balanced_accuracy, f1, roc_auc + + +def _get_sample_data(): + y = np.array([ + 1, + 0, + 1, + 1, + 1, + 0, + ]) + y_pred_proba = np.array([ + 0.0, + 0.24, + 0.25, + 0.25, + 0.5, + 1.0, + ]) + return y, y_pred_proba + + +def test_calibrate_decision_threshold(): + y, y_pred_proba = _get_sample_data() + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=f1, + ) + assert decision_threshold == 0.24 + + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + ) + assert decision_threshold == 0.24 + + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + decision_thresholds=10, + ) + assert decision_threshold == 1.0 + + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + decision_thresholds=[0.88], + ) + assert decision_threshold == 0.88 + + +def test_calibrate_decision_threshold_select_closer_to_0_5(): + """Test that calibration will choose the threshold closer to 0.5 in the case of a tie""" + y, y_pred_proba = _get_sample_data() + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + decision_thresholds=[0.5, 0.244, 0.247], + ) + assert decision_threshold == 0.247 + + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + decision_thresholds=[0.5, 0.247, 0.244], + ) + assert decision_threshold == 0.247 + + +def test_calibrate_decision_threshold_proba_metric_0_5(): + """Test that non-pred metrics will always return 0.5""" + y, y_pred_proba = _get_sample_data() + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=roc_auc, + ) + assert decision_threshold == 0.5 + decision_threshold = calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=roc_auc, + decision_thresholds=[0.1], + ) + assert decision_threshold == 0.5 + + +def test_calibrate_decision_threshold_out_of_bounds(): + y, y_pred_proba = _get_sample_data() + with pytest.raises(ValueError): + calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + decision_thresholds=[1.0, 0.5, 2.0], + ) + with pytest.raises(ValueError): + calibrate_decision_threshold( + y=y, + y_pred_proba=y_pred_proba, + metric=balanced_accuracy, + decision_thresholds=[-0.01, 0.5], + ) diff --git a/tabular/tests/unittests/models/test_lightgbm.py b/tabular/tests/unittests/models/test_lightgbm.py --- a/tabular/tests/unittests/models/test_lightgbm.py +++ b/tabular/tests/unittests/models/test_lightgbm.py @@ -62,3 +62,100 @@ def test_lightgbm_quantile(fit_helper): dataset_name = 'ames' init_args = dict(problem_type='quantile', quantile_levels=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, init_args=init_args) + + +def test_lightgbm_binary_with_calibrate_decision_threshold(fit_helper): + """Tests that calibrate_decision_threshold works and does not make the validation score worse on the given metric""" + fit_args = dict( + hyperparameters={LGBModel: {}}, + ) + dataset_name = 'adult' + + predictor = fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, delete_directory=False, refit_full=False) + + for metric in [None, 'f1', 'balanced_accuracy', 'mcc', 'recall', 'precision']: + decision_threshold = predictor.calibrate_decision_threshold(metric=metric) + if metric is None: + metric = predictor.eval_metric.name + assert decision_threshold >= 0 + assert decision_threshold <= 1 + + X_val, y_val = predictor.load_data_internal(data='val', return_X=True, return_y=True) + y_val = predictor.transform_labels(labels=y_val, inverse=True) + + y_pred_val = predictor.predict(data=X_val, transform_features=False) + y_pred_val_w_decision_threshold = predictor.predict(data=X_val, decision_threshold=decision_threshold, transform_features=False) + y_pred_multi_val_w_decision_threshold = predictor.predict_multi(data=X_val, decision_threshold=decision_threshold, transform_features=False) + y_pred_multi_val_w_decision_threshold_cache = predictor.predict_multi(decision_threshold=decision_threshold) + + y_pred_proba_val = predictor.predict_proba(data=X_val, transform_features=False) + y_pred_val_w_decision_threshold_from_proba = predictor.get_pred_from_proba(y_pred_proba=y_pred_proba_val, decision_threshold=decision_threshold) + + assert y_pred_val_w_decision_threshold.equals(y_pred_multi_val_w_decision_threshold[predictor.get_model_best()]) + assert y_pred_val_w_decision_threshold.equals(y_pred_multi_val_w_decision_threshold_cache[predictor.get_model_best()]) + assert y_pred_val_w_decision_threshold.equals(y_pred_val_w_decision_threshold_from_proba) + + result = predictor.evaluate_predictions(y_true=y_val, y_pred=y_pred_val) + result_calibrated = predictor.evaluate_predictions(y_true=y_val, y_pred=y_pred_val_w_decision_threshold) + + # Ensure validation score never becomes worse on the calibrated metric + assert result[metric] <= result_calibrated[metric] + if metric in ['recall']: + # recall should always be able to achieve a perfect validation score + assert result_calibrated[metric] == 1.0 + + assert predictor.calibrate_decision_threshold(metric='roc_auc') == 0.5 + + +def test_lightgbm_binary_with_calibrate_decision_threshold_bagged_refit(fit_helper, dataset_loader_helper): + """Tests that calibrate_decision_threshold works and does not make the validation score worse on the given metric""" + fit_args = dict( + hyperparameters={LGBModel: {}}, + num_bag_folds=2, + calibrate_decision_threshold=True, + ) + init_args = dict( + eval_metric='f1' + ) + dataset_name = 'adult' + + directory_prefix = './datasets/' + train_data, test_data, dataset_info = dataset_loader_helper.load_dataset(name=dataset_name, + directory_prefix=directory_prefix) + label = dataset_info['label'] + predictor = fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, init_args=init_args, fit_args=fit_args, + delete_directory=False, refit_full=True) + + assert predictor._decision_threshold is not None + assert predictor.decision_threshold == predictor._decision_threshold + optimal_decision_threshold = predictor.calibrate_decision_threshold() + assert optimal_decision_threshold == predictor.decision_threshold + og_threshold = predictor.decision_threshold + + y_pred_test = predictor.predict(test_data) + + scores_predictions = predictor.evaluate_predictions(y_true=test_data[label], y_pred=y_pred_test) + scores = predictor.evaluate(test_data) + scores_05 = predictor.evaluate(test_data, decision_threshold=0.5) + + for k in scores_predictions: + assert scores[k] == scores_predictions[k] + assert scores['f1'] > scores_05['f1'] # Calibration should help f1 + assert scores['accuracy'] < scores_05['accuracy'] # Calibration should harm accuracy + + predictor.set_decision_threshold(0.5) + assert predictor.decision_threshold == 0.5 + assert predictor._decision_threshold == 0.5 + scores_05_native = predictor.evaluate(test_data) + + for k in scores_05: + assert scores_05[k] == scores_05_native[k] + + leaderboard_05 = predictor.leaderboard(test_data) + lb_score_05 = leaderboard_05[leaderboard_05['model'] == predictor.get_model_best()].iloc[0]['score_test'] + assert lb_score_05 == scores_05['f1'] + + predictor.set_decision_threshold(og_threshold) + leaderboard = predictor.leaderboard(test_data) + lb_score = leaderboard[leaderboard['model'] == predictor.get_model_best()].iloc[0]['score_test'] + assert lb_score == scores['f1']
threshold for classifier Is there a way to get the threshold for a trained classifier, i.e. to convert predict_proba returned values to binary (or multi-class) predictions? I can't find anything in the docs or in the issues for this. I'd rather not fork and modify the code if possible.
Currently it is always 0.5 for binary or the max for multiclass. We don't adjust thresholds when making predictions. If you would like to use a different threshold, please use `predict_proba()` and then apply your own threshold to decide how to map these predicted class-probabilities into predicted class-labels. Yes, I was already doing that. I was just curious if that gets adjusted internally as part of optimization. Is it not done because you've seen that causing overfitting? Or is there a theoretical ERM reason not to? I assume you're not doing it because you don't know the cost of misclassification for each category. @rxjx I think we just haven't adjusted threshold internally simply due to lack of bandwidth :) If you'd like to help contribute this, that would be awesome! The steps needed would be something like: 1) add optional `class_threshold` argument to `predict()` function. Make sure `class_threshold` can also take in array & pd.Series of thresholds for each class in multiclass classification (and include check to ensure these thresholds are actually realizable for multiclass such that one of the classes will always be predicted for every row). 2) automatically set this `class_threshold` argument based on `eval_metric` for only those evaluation-metrics that depend on `predict()` rather than `predict_proba()`. For example: `accuracy`, `balanced_accuracy`, `f1_score`, etc. See possible metrics in: https://github.com/awslabs/autogluon/blob/master/core/src/autogluon/core/metrics/__init__.py https://github.com/awslabs/autogluon/blob/master/core/src/autogluon/core/metrics/classification_metrics.py Note that for many metrics, a reasonable `class_threshold` to use can simply be hard-coded (Eg: = 0.5 for binary classification accuracy, = class frequency for balanced_accuracy, etc), so I'd start with that before trying to iteratively optimize the threshold based on computed validation-scores. Thanks for the response. I can definitely take a look at doing this, although not in a prompt manner since work and another side project claim time. Might be nice to return confusion matrices for some thresholds too or perhaps graphical representations. Awesome to hear you're interested 👍 I agree graphical representations would be nice to show, note that we do already offer confusion-matrix functionality inside our `evaluate_predictions()` function. You can also email us at: [email protected] for private Qs or to receive an invite to our Slack channel for contributors. I think the multiclass case gets complicated fast. Let's restrict it to binary for the first pass? Yes I think an initial PR can just add it this functionality for only binary classification since it's more straightforward and the most common use-case. For other problem-types you could for now simply print error message if the user has specified the optional threshold argument. Hello, has this functionality been implemented? It would be very useful. Sorry, I really haven't made much progress at all due to work/life constraints. I'll probably be able to jump on this in a month or so. Hello, any updates on this functionality? Thanks in advance! Sorry, haven't found time to work on it at all. Work has been overloaded and the advent of LLMs has added to the load. Realistically, it doesn't look like I can get to it anytime soon.
2023-06-10T01:41:08Z
[]
[]
autogluon/autogluon
3,575
autogluon__autogluon-3575
[ "3564" ]
2d8bc463000191e3b94839831df499857f5a688e
diff --git a/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py b/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py --- a/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +++ b/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py @@ -19,8 +19,9 @@ from autogluon.common.loaders import load_pkl from autogluon.common.utils.log_utils import set_logger_verbosity -from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TimeSeriesDataFrame +from autogluon.timeseries.dataset.ts_dataframe import ITEMID, TIMESTAMP, TimeSeriesDataFrame from autogluon.timeseries.models.abstract import AbstractTimeSeriesModel +from autogluon.timeseries.utils.datetime import norm_freq_str from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_ts_dataframe from autogluon.timeseries.utils.warning_filters import disable_root_logger, warning_filter @@ -35,9 +36,7 @@ class SimpleGluonTSDataset(GluonTSDataset): - """A simple GluonTS dataset that wraps a TimeSeriesDataFrame and implements the - GluonTS Dataset protocol via lazy iterations. - """ + """Wrapper for TimeSeriesDataFrame that is compatible with the GluonTS Dataset API.""" def __init__( self, @@ -47,65 +46,78 @@ def __init__( feat_static_real: Optional[pd.DataFrame] = None, feat_dynamic_real: Optional[pd.DataFrame] = None, past_feat_dynamic_real: Optional[pd.DataFrame] = None, - float_dtype: Type = np.float64, - int_dtype: Type = np.int64, + includes_future: bool = False, + prediction_length: int = None, ): assert target_df is not None assert target_df.freq, "Initializing GluonTS data sets without freq is not allowed" # Convert TimeSeriesDataFrame to pd.Series for faster processing - self.target_series = target_df[target_column] - self.item_ids = target_df.item_ids - self.freq_ = target_df.freq - self.feat_static_cat = feat_static_cat - self.feat_static_real = feat_static_real - self.feat_dynamic_real = feat_dynamic_real - self.past_feat_dynamic_real = past_feat_dynamic_real + self.target_array = self._to_array(target_df[target_column], dtype=np.float32) + self.feat_static_cat = self._to_array(feat_static_cat, dtype=np.int64) + self.feat_static_real = self._to_array(feat_static_real, dtype=np.float32) + self.feat_dynamic_real = self._to_array(feat_dynamic_real, dtype=np.float32) + self.past_feat_dynamic_real = self._to_array(past_feat_dynamic_real, dtype=np.float32) + self.freq = self._to_gluonts_freq(target_df.freq) + + # Necessary to compute indptr for known_covariates at prediction time + self.includes_future = includes_future + self.prediction_length = prediction_length + + # Replace inefficient groupby ITEMID with indptr that stores start:end of each time series + item_id_index = target_df.index.get_level_values(ITEMID) + indices_sizes = item_id_index.value_counts(sort=False) + self.item_ids = indices_sizes.index # shape [num_items] + cum_sizes = indices_sizes.values.cumsum() + self.indptr = np.append(0, cum_sizes).astype(np.int32) + self.timestamps = target_df.index.get_level_values(TIMESTAMP) # shape [len(target_df)] - self.int_dtype = int_dtype - self.float_dtype = float_dtype + @staticmethod + def _to_array(df: Optional[pd.DataFrame], dtype: np.dtype) -> Optional[np.ndarray]: + if df is None: + return None + else: + return df.to_numpy(dtype=dtype) - @property - def freq(self): + @staticmethod + def _to_gluonts_freq(freq: str) -> str: # FIXME: GluonTS expects a frequency string, but only supports a limited number of such strings # for feature generation. If the frequency string doesn't match or is not provided, it raises an exception. # Here we bypass this by issuing a default "yearly" frequency, tricking it into not producing # any lags or features. - pd_offset = to_offset(self.freq_) + pd_offset = to_offset(freq) # normalize freq str to handle peculiarities such as W-SUN - offset_base_alias = pd_offset.name.split("-")[0] - - return "A" if offset_base_alias is None or offset_base_alias not in GLUONTS_SUPPORTED_OFFSETS else self.freq_ + offset_base_alias = norm_freq_str(pd_offset) + if offset_base_alias not in GLUONTS_SUPPORTED_OFFSETS: + return "A" + else: + return f"{pd_offset.n}{offset_base_alias}" def __len__(self): - return len(self.item_ids) # noqa + return len(self.indptr) - 1 # noqa def __iter__(self) -> Iterator[Dict[str, Any]]: - for item_id in self.item_ids: # noqa - ts = self.target_series.loc[item_id] - time_series = { - FieldName.ITEM_ID: item_id, - FieldName.TARGET: ts.to_numpy(dtype=self.float_dtype).ravel(), - FieldName.START: pd.Period(ts.index[0], freq=self.freq), + for j in range(len(self.indptr) - 1): + start_idx = self.indptr[j] + end_idx = self.indptr[j + 1] + # GluonTS expects item_id to be a string + ts = { + FieldName.ITEM_ID: str(self.item_ids[j]), + FieldName.START: pd.Period(self.timestamps[j], freq=self.freq), + FieldName.TARGET: self.target_array[start_idx:end_idx], } if self.feat_static_cat is not None: - time_series[FieldName.FEAT_STATIC_CAT] = self.feat_static_cat.loc[item_id].to_numpy( - dtype=self.int_dtype - ) + ts[FieldName.FEAT_STATIC_CAT] = self.feat_static_cat[j] if self.feat_static_real is not None: - time_series[FieldName.FEAT_STATIC_REAL] = self.feat_static_real.loc[item_id].to_numpy( - dtype=self.float_dtype - ) - if self.feat_dynamic_real is not None: - time_series[FieldName.FEAT_DYNAMIC_REAL] = ( - self.feat_dynamic_real.loc[item_id].to_numpy(dtype=self.float_dtype).T - ) + ts[FieldName.FEAT_STATIC_REAL] = self.feat_static_real[j] if self.past_feat_dynamic_real is not None: - time_series[FieldName.PAST_FEAT_DYNAMIC_REAL] = ( - self.past_feat_dynamic_real.loc[item_id].to_numpy(dtype=self.float_dtype).T - ) - - yield time_series + ts[FieldName.PAST_FEAT_DYNAMIC_REAL] = self.past_feat_dynamic_real[start_idx:end_idx].T + if self.feat_dynamic_real is not None: + if self.includes_future: + start_idx = start_idx + j * self.prediction_length + end_idx = end_idx + (j + 1) * self.prediction_length + ts[FieldName.FEAT_DYNAMIC_REAL] = self.feat_dynamic_real[start_idx:end_idx].T + yield ts class AbstractGluonTSModel(AbstractTimeSeriesModel): @@ -133,9 +145,6 @@ class AbstractGluonTSModel(AbstractTimeSeriesModel): """ gluonts_model_path = "gluon_ts" - # datatype of floating point and integers passed internally to GluonTS - float_dtype: Type = np.float32 - int_dtype: Type = np.int64 # default number of samples for prediction default_num_samples: int = 1000 supports_known_covariates: bool = False @@ -168,6 +177,12 @@ def __init__( self.num_past_feat_dynamic_real = 0 self.feat_static_cat_cardinality: List[int] = [] + if 0.5 not in self.quantile_levels: + self.must_drop_median = True + self.quantile_levels = sorted(set([0.5] + self.quantile_levels)) + else: + self.must_drop_median = False + def save(self, path: str = None, verbose: bool = True) -> str: # we flush callbacks instance variable if it has been set. it can keep weak references which breaks training self.callbacks = [] @@ -233,10 +248,12 @@ def default_context_length(self) -> int: def _get_model_params(self) -> dict: """Gets params that are passed to the inner model.""" - args = super()._get_model_params().copy() - args.setdefault("batch_size", 64) - args.setdefault("context_length", self.default_context_length) - args.update( + init_args = super()._get_model_params().copy() + init_args.setdefault("batch_size", 64) + init_args.setdefault("context_length", self.default_context_length) + init_args.setdefault("predict_batch_size", 500) + init_args.setdefault("early_stopping_patience", 20) + init_args.update( dict( freq=self.freq, prediction_length=self.prediction_length, @@ -244,16 +261,15 @@ def _get_model_params(self) -> dict: callbacks=self.callbacks, ) ) - return args + # Support MXNet kwarg names for backwards compatibility + init_args.setdefault("lr", init_args.get("learning_rate", 1e-3)) + init_args.setdefault("max_epochs", init_args.get("epochs")) + return init_args def _get_estimator_init_args(self) -> Dict[str, Any]: """Get GluonTS specific constructor arguments for estimator objects, an alias to `self._get_model_params` for better readability.""" - init_kwargs = self._get_model_params() - # Map MXNet kwarg names to PyTorch Lightning kwarg names - init_kwargs.setdefault("lr", init_kwargs.get("learning_rate", 1e-3)) - init_kwargs.setdefault("max_epochs", init_kwargs.get("epochs")) - return init_kwargs + return self._get_model_params() def _get_estimator_class(self) -> Type[GluonTSEstimator]: raise NotImplementedError @@ -267,25 +283,25 @@ def _get_estimator(self) -> GluonTSEstimator: init_args = self._get_estimator_init_args() - trainer_kwargs = {} - epochs = init_args.get("max_epochs") - callbacks = init_args.get("callbacks", []) - - # TODO: Provide trainer_kwargs outside the function (e.g., to specify # of GPUs)? - if epochs is not None: - trainer_kwargs.update({"max_epochs": epochs}) - trainer_kwargs.update({"callbacks": callbacks, "enable_progress_bar": False}) - trainer_kwargs["default_root_dir"] = self.path + default_trainer_kwargs = { + "max_epochs": init_args["max_epochs"], + "callbacks": init_args["callbacks"], + "enable_progress_bar": False, + "default_root_dir": self.path, + } if torch.cuda.is_available(): - trainer_kwargs["accelerator"] = "gpu" - trainer_kwargs["devices"] = 1 + default_trainer_kwargs["accelerator"] = "gpu" + default_trainer_kwargs["devices"] = 1 else: - trainer_kwargs["accelerator"] = "cpu" + default_trainer_kwargs["accelerator"] = "cpu" + + default_trainer_kwargs.update(init_args.pop("trainer_kwargs", {})) + logger.debug(f"\tTraining on device '{default_trainer_kwargs['accelerator']}'") return from_hyperparameters( self._get_estimator_class(), - trainer_kwargs=trainer_kwargs, + trainer_kwargs=default_trainer_kwargs, **init_args, ) @@ -331,8 +347,8 @@ def _to_gluonts_dataset( feat_static_real=feat_static_real, feat_dynamic_real=feat_dynamic_real, past_feat_dynamic_real=past_feat_dynamic_real, - float_dtype=self.float_dtype, - int_dtype=self.int_dtype, + includes_future=known_covariates is not None, + prediction_length=self.prediction_length, ) else: return None @@ -363,9 +379,12 @@ def _fit( self._check_fit_params() # update auxiliary parameters - self._deferred_init_params_aux( - dataset=train_data, callbacks=self._get_callbacks(time_limit=time_limit), **kwargs + init_args = self._get_estimator_init_args() + callbacks = self._get_callbacks( + time_limit=time_limit, + early_stopping_patience=None if val_data is None else init_args["early_stopping_patience"], ) + self._deferred_init_params_aux(dataset=train_data, callbacks=callbacks) estimator = self._get_estimator() with warning_filter(), disable_root_logger(), gluonts.core.settings.let(gluonts.env.env, use_tqdm=False): @@ -374,17 +393,29 @@ def _fit( validation_data=self._to_gluonts_dataset(val_data), cache_data=True, ) + # Increase batch size during prediction to speed up inference + if init_args["predict_batch_size"] is not None: + self.gts_predictor.batch_size = init_args["predict_batch_size"] lightning_logs_dir = Path(self.path) / "lightning_logs" if lightning_logs_dir.exists() and lightning_logs_dir.is_dir(): logger.debug(f"Removing lightning_logs directory {lightning_logs_dir}") shutil.rmtree(lightning_logs_dir) - def _get_callbacks(self, time_limit: int, *args, **kwargs) -> List[Callable]: + def _get_callbacks( + self, + time_limit: int, + early_stopping_patience: Optional[int] = None, + ) -> List[Callable]: """Retrieve a list of callback objects for the GluonTS trainer""" - from pytorch_lightning.callbacks import Timer + from pytorch_lightning.callbacks import EarlyStopping, Timer - return [Timer(timedelta(seconds=time_limit))] if time_limit is not None else [] + callbacks = [] + if time_limit is not None: + callbacks.append(Timer(timedelta(seconds=time_limit))) + if early_stopping_patience is not None: + callbacks.append(EarlyStopping(monitor="val_loss", patience=early_stopping_patience)) + return callbacks def predict( self, @@ -407,10 +438,8 @@ def predict( raise ValueError("Invalid quantile value specified. Quantiles must be between 0 and 1 (exclusive).") predicted_targets = self._predict_gluonts_forecasts(data, known_covariates=known_covariates, **kwargs) - df = self._gluonts_forecasts_to_data_frame( predicted_targets, - quantile_levels=quantile_levels or self.quantile_levels, forecast_index=get_forecast_horizon_index_ts_dataframe(data, self.prediction_length), ) @@ -426,72 +455,95 @@ def _predict_gluonts_forecasts( return list(self.gts_predictor.predict(**predictor_kwargs)) - @staticmethod - def _sample_to_quantile_forecast(forecast: SampleForecast, quantile_levels: List[float]) -> QuantileForecast: - forecast_arrays = [forecast.mean] - - quantile_keys = [str(q) for q in quantile_levels] - for q in quantile_keys: - forecast_arrays.append(forecast.quantile(q)) - - forecast_init_args = dict( - forecast_arrays=np.array(forecast_arrays), - start_date=forecast.start_date, - forecast_keys=["mean"] + quantile_keys, - item_id=str(forecast.item_id), - ) - return QuantileForecast(**forecast_init_args) - - @staticmethod - def _distribution_to_quantile_forecast(forecast: Forecast, quantile_levels: List[float]) -> QuantileForecast: + def _stack_quantile_forecasts(self, forecasts: List[QuantileForecast], item_ids: pd.Index) -> pd.DataFrame: + # GluonTS always saves item_id as a string + item_id_to_forecast = {str(f.item_id): f for f in forecasts} + result_dfs = [] + for item_id in item_ids: + forecast = item_id_to_forecast[str(item_id)] + result_dfs.append(pd.DataFrame(forecast.forecast_array.T, columns=forecast.forecast_keys)) + forecast_df = pd.concat(result_dfs) + if "mean" not in forecast_df.columns: + forecast_df["mean"] = forecast_df["0.5"] + columns_order = ["mean"] + [str(q) for q in self.quantile_levels] + return forecast_df[columns_order] + + def _stack_sample_forecasts(self, forecasts: List[SampleForecast], item_ids: pd.Index) -> pd.DataFrame: + item_id_to_forecast = {str(f.item_id): f for f in forecasts} + samples_per_item = [] + for item_id in item_ids: + forecast = item_id_to_forecast[str(item_id)] + samples_per_item.append(forecast.samples.T) + samples = np.concatenate(samples_per_item, axis=0) + quantiles = np.quantile(samples, self.quantile_levels, axis=1).T + mean = samples.mean(axis=1, keepdims=True) + forecast_array = np.concatenate([mean, quantiles], axis=1) + return pd.DataFrame(forecast_array, columns=["mean"] + [str(q) for q in self.quantile_levels]) + + def _stack_distribution_forecasts(self, forecasts: List[Forecast], item_ids: pd.Index) -> pd.DataFrame: import torch + from gluonts.torch.distributions import AffineTransformed + from torch.distributions import Distribution - # Compute all quantiles in parallel instead of a for-loop - quantiles = torch.tensor(quantile_levels, device=forecast.distribution.mean.device).reshape(-1, 1) - quantile_predictions = forecast.distribution.icdf(quantiles).cpu().detach().numpy() - forecast_arrays = np.vstack([forecast.mean, quantile_predictions]) - forecast_keys = ["mean"] + [str(q) for q in quantile_levels] - - forecast_init_args = dict( - forecast_arrays=forecast_arrays, - start_date=forecast.start_date, - forecast_keys=forecast_keys, - item_id=str(forecast.item_id), - ) - return QuantileForecast(**forecast_init_args) + # Sort forecasts in the same order as in the dataset + item_id_to_forecast = {str(f.item_id): f for f in forecasts} + forecasts = [item_id_to_forecast[str(item_id)] for item_id in item_ids] + + def stack_distributions(distributions: List[Distribution]) -> Distribution: + """Stack multiple torch.Distribution objects into a single distribution""" + params_per_dist = [] + for dist in distributions: + params = {name: getattr(dist, name) for name in dist.arg_constraints.keys()} + params_per_dist.append(params) + # Make sure that all distributions have same keys + assert len(set(tuple(p.keys()) for p in params_per_dist)) == 1 + + stacked_params = {} + for key in dist.arg_constraints.keys(): + stacked_params[key] = torch.cat([p[key] for p in params_per_dist]) + return dist.__class__(**stacked_params) + + if not isinstance(forecasts[0].distribution, AffineTransformed): + raise AssertionError("Expected forecast.distribution to be an instance of AffineTransformed") + + # We stack all forecast distribution into a single Distribution object. + # This dramatically speeds up the quantiles calculation. + stacked_base_dist = stack_distributions([f.distribution.base_dist for f in forecasts]) + + stacked_loc = torch.cat([f.distribution.loc for f in forecasts]) + if stacked_loc.shape != stacked_base_dist.batch_shape: + stacked_loc = stacked_loc.repeat_interleave(self.prediction_length) + + stacked_scale = torch.cat([f.distribution.scale for f in forecasts]) + if stacked_scale.shape != stacked_base_dist.batch_shape: + stacked_scale = stacked_scale.repeat_interleave(self.prediction_length) + + stacked_dist = AffineTransformed(stacked_base_dist, loc=stacked_loc, scale=stacked_scale) + + mean_prediction = stacked_dist.mean.cpu().detach().numpy() + quantiles = torch.tensor(self.quantile_levels, device=stacked_dist.mean.device).reshape(-1, 1) + quantile_predictions = stacked_dist.icdf(quantiles).cpu().detach().numpy() + forecast_array = np.vstack([mean_prediction, quantile_predictions]).T + return pd.DataFrame(forecast_array, columns=["mean"] + [str(q) for q in self.quantile_levels]) def _gluonts_forecasts_to_data_frame( self, forecasts: List[Forecast], - quantile_levels: List[float], forecast_index: pd.MultiIndex, ) -> TimeSeriesDataFrame: from gluonts.torch.model.forecast import DistributionForecast - # TODO: Concatenate all forecasts into a single tensor/object before converting? - # Especially for DistributionForecast this could result in massive speedups + item_ids = forecast_index.unique(level=ITEMID) if isinstance(forecasts[0], SampleForecast): - forecasts = [self._sample_to_quantile_forecast(f, quantile_levels) for f in forecasts] + forecast_df = self._stack_sample_forecasts(forecasts, item_ids) + elif isinstance(forecasts[0], QuantileForecast): + forecast_df = self._stack_quantile_forecasts(forecasts, item_ids) elif isinstance(forecasts[0], DistributionForecast): - forecasts = [self._distribution_to_quantile_forecast(f, quantile_levels) for f in forecasts] + forecast_df = self._stack_distribution_forecasts(forecasts, item_ids) else: - assert isinstance(forecasts[0], QuantileForecast), f"Unrecognized forecast type {type(forecasts[0])}" + raise ValueError(f"Unrecognized forecast type {type(forecasts[0])}") - # sanity check to ensure all quantiles are accounted for - assert all(str(q) in forecasts[0].forecast_keys for q in quantile_levels), ( - "Some forecast quantiles are missing from GluonTS forecast outputs. Was" - " the model trained to forecast all quantiles?" - ) - item_id_to_forecast = {str(f.item_id): f for f in forecasts} - result_dfs = [] - for item_id in forecast_index.unique(level=ITEMID): - # GluonTS always saves item_id as a string - forecast = item_id_to_forecast[str(item_id)] - item_forecast_dict = {"mean": forecast.mean} - for quantile in quantile_levels: - item_forecast_dict[str(quantile)] = forecast.quantile(str(quantile)) - result_dfs.append(pd.DataFrame(item_forecast_dict)) - - result = pd.concat(result_dfs) - result.index = forecast_index - return TimeSeriesDataFrame(result) + forecast_df.index = forecast_index + if self.must_drop_median: + forecast_df = forecast_df.drop("0.5", axis=1) + return TimeSeriesDataFrame(forecast_df) diff --git a/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py b/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py --- a/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py +++ b/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py @@ -64,10 +64,16 @@ class DeepARModel(AbstractGluonTSModel): Number of epochs the model will be trained for batch_size : int, default = 64 Size of batches used during training + predict_batch_size : int, default = 500 + Size of batches used during prediction. num_batches_per_epoch : int, default = 50 Number of batches processed every epoch learning_rate : float, default = 1e-3, Learning rate used during training + trainer_kwargs : dict, optional + Optional keyword arguments passed to ``lightning.Trainer``. + early_stopping_patience : int or None, default = 20 + Early stop training if the validation loss doesn't improve for this many epochs. """ default_num_samples: int = 250 @@ -84,8 +90,8 @@ def _get_estimator_init_args(self) -> Dict[str, Any]: init_kwargs["num_feat_static_real"] = self.num_feat_static_real init_kwargs["cardinality"] = self.feat_static_cat_cardinality init_kwargs["num_feat_dynamic_real"] = self.num_feat_dynamic_real - init_kwargs["lags_seq"] = get_lags_for_frequency(self.freq) - init_kwargs["time_features"] = get_time_features_for_frequency(self.freq) + init_kwargs.setdefault("lags_seq", get_lags_for_frequency(self.freq)) + init_kwargs.setdefault("time_features", get_time_features_for_frequency(self.freq)) return init_kwargs @@ -112,10 +118,16 @@ class SimpleFeedForwardModel(AbstractGluonTSModel): Number of epochs the model will be trained for batch_size : int, default = 64 Size of batches used during training + predict_batch_size : int, default = 500 + Size of batches used during prediction. num_batches_per_epoch : int, default = 50 Number of batches processed every epoch learning_rate : float, default = 1e-3, Learning rate used during training + trainer_kwargs : dict, optional + Optional keyword arguments passed to ``lightning.Trainer``. + early_stopping_patience : int or None, default = 20 + Early stop training if the validation loss doesn't improve for this many epochs. """ def _get_estimator_class(self) -> Type[GluonTSEstimator]: @@ -163,10 +175,16 @@ class TemporalFusionTransformerModel(AbstractGluonTSModel): Number of epochs the model will be trained for batch_size : int, default = 64 Size of batches used during training + predict_batch_size : int, default = 500 + Size of batches used during prediction. num_batches_per_epoch : int, default = 50 Number of batches processed every epoch learning_rate : float, default = 1e-3, Learning rate used during training + trainer_kwargs : dict, optional + Optional keyword arguments passed to ``lightning.Trainer``. + early_stopping_patience : int or None, default = 20 + Early stop training if the validation loss doesn't improve for this many epochs. """ supports_known_covariates = True @@ -191,7 +209,7 @@ def _get_estimator_init_args(self) -> Dict[str, Any]: init_kwargs["static_dims"] = [self.num_feat_static_real] if len(self.feat_static_cat_cardinality): init_kwargs["static_cardinalities"] = self.feat_static_cat_cardinality - init_kwargs["time_features"] = get_time_features_for_frequency(self.freq) + init_kwargs.setdefault("time_features", get_time_features_for_frequency(self.freq)) return init_kwargs @@ -221,10 +239,16 @@ class DLinearModel(AbstractGluonTSModel): Number of epochs the model will be trained for batch_size : int, default = 64 Size of batches used during training + predict_batch_size : int, default = 500 + Size of batches used during prediction. num_batches_per_epoch : int, default = 50 Number of batches processed every epoch learning_rate : float, default = 1e-3, Learning rate used during training + trainer_kwargs : dict, optional + Optional keyword arguments passed to ``lightning.Trainer``. + early_stopping_patience : int or None, default = 20 + Early stop training if the validation loss doesn't improve for this many epochs. weight_decay : float, default = 1e-8 Weight decay regularization parameter. """
diff --git a/timeseries/tests/unittests/models/test_gluonts.py b/timeseries/tests/unittests/models/test_gluonts.py --- a/timeseries/tests/unittests/models/test_gluonts.py +++ b/timeseries/tests/unittests/models/test_gluonts.py @@ -105,7 +105,7 @@ def df_with_covariates(): @pytest.mark.parametrize("model_class", MODELS_WITH_STATIC_FEATURES) def test_when_static_features_present_then_they_are_passed_to_dataset(model_class, df_with_static): df, metadata = df_with_static - model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata) + model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata, freq=df.freq) with mock.patch( "autogluon.timeseries.models.gluonts.abstract_gluonts.SimpleGluonTSDataset.__init__" ) as patch_dataset: @@ -124,7 +124,7 @@ def test_when_static_features_present_then_they_are_passed_to_dataset(model_clas @pytest.mark.parametrize("model_class", MODELS_WITH_STATIC_FEATURES) def test_given_fit_with_static_features_when_predicting_then_static_features_are_used(model_class, df_with_static): df, metadata = df_with_static - model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata) + model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata, freq=df.freq) model.fit(train_data=df) predictor_method = "gluonts.torch.model.predictor.PyTorchPredictor.predict" with mock.patch(predictor_method) as mock_predict: @@ -142,7 +142,7 @@ def test_given_fit_with_static_features_when_predicting_then_static_features_are @pytest.mark.parametrize("model_class", MODELS_WITH_STATIC_FEATURES) def test_when_static_features_present_then_model_attributes_set_correctly(model_class, df_with_static): df, metadata = df_with_static - model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata) + model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata, freq=df.freq) model.fit(train_data=df) assert model.num_feat_static_cat > 0 assert model.num_feat_static_real > 0 @@ -153,7 +153,9 @@ def test_when_static_features_present_then_model_attributes_set_correctly(model_ @pytest.mark.parametrize("model_class", MODELS_WITH_STATIC_FEATURES) def test_when_disable_static_features_set_to_true_then_static_features_are_not_used(model_class, df_with_static): df, metadata = df_with_static - model = model_class(hyperparameters={**DUMMY_HYPERPARAMETERS, "disable_static_features": True}, metadata=metadata) + model = model_class( + hyperparameters={**DUMMY_HYPERPARAMETERS, "disable_static_features": True}, metadata=metadata, freq=df.freq + ) with mock.patch( "autogluon.timeseries.models.gluonts.abstract_gluonts.SimpleGluonTSDataset.__init__" ) as patch_dataset: @@ -172,7 +174,7 @@ def test_when_disable_static_features_set_to_true_then_static_features_are_not_u @pytest.mark.parametrize("model_class", MODELS_WITH_KNOWN_COVARIATES) def test_when_known_covariates_present_then_they_are_passed_to_dataset(model_class, df_with_covariates): df, metadata = df_with_covariates - model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata) + model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata, freq=df.freq) with mock.patch( "autogluon.timeseries.models.gluonts.abstract_gluonts.SimpleGluonTSDataset.__init__" ) as patch_dataset: @@ -189,15 +191,31 @@ def test_when_known_covariates_present_then_they_are_passed_to_dataset(model_cla @pytest.mark.parametrize("model_class", MODELS_WITH_KNOWN_COVARIATES) def test_when_known_covariates_present_then_model_attributes_set_correctly(model_class, df_with_covariates): df, metadata = df_with_covariates - model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata) + model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata, freq=df.freq) model.fit(train_data=df) assert model.num_feat_dynamic_real > 0 [email protected]("model_class", MODELS_WITH_KNOWN_COVARIATES) +def test_when_known_covariates_present_for_predict_then_covariates_have_correct_shape(model_class, df_with_covariates): + df, metadata = df_with_covariates + prediction_length = 5 + past_data, known_covariates = df.get_model_inputs_for_scoring(prediction_length, metadata.known_covariates_real) + model = model_class( + hyperparameters=DUMMY_HYPERPARAMETERS, metadata=metadata, freq=df.freq, prediction_length=prediction_length + ) + model.fit(train_data=past_data) + for ts in model._to_gluonts_dataset(past_data, known_covariates=known_covariates): + expected_length = len(ts["target"]) + prediction_length + assert ts["feat_dynamic_real"].shape == (len(metadata.known_covariates_real), expected_length) + + @pytest.mark.parametrize("model_class", MODELS_WITH_KNOWN_COVARIATES) def test_when_disable_known_covariates_set_to_true_then_known_covariates_are_not_used(model_class, df_with_covariates): df, metadata = df_with_covariates - model = model_class(hyperparameters={**DUMMY_HYPERPARAMETERS, "disable_known_covariates": True}, metadata=metadata) + model = model_class( + hyperparameters={**DUMMY_HYPERPARAMETERS, "disable_known_covariates": True}, metadata=metadata, freq=df.freq + ) with mock.patch( "autogluon.timeseries.models.gluonts.abstract_gluonts.SimpleGluonTSDataset.__init__" ) as patch_dataset: @@ -223,6 +241,60 @@ def test_when_static_and_dynamic_covariates_present_then_model_trains_normally(m gen = TimeSeriesFeatureGenerator(target="target", known_covariates_names=known_covariates_names) df = gen.fit_transform(dataframe_with_static_and_covariates) - model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=gen.covariate_metadata) + model = model_class(hyperparameters=DUMMY_HYPERPARAMETERS, metadata=gen.covariate_metadata, freq=df.freq) model.fit(train_data=df) model.score_and_cache_oof(df) + + [email protected]("predict_batch_size", [30, 200]) +def test_given_custom_predict_batch_size_then_predictor_uses_correct_batch_size(predict_batch_size): + model = PatchTSTModel(hyperparameters={"predict_batch_size": predict_batch_size, **DUMMY_HYPERPARAMETERS}) + model.fit(train_data=DUMMY_TS_DATAFRAME) + assert model.gts_predictor.batch_size == predict_batch_size + + +def catch_trainer_kwargs(model): + with mock.patch("pytorch_lightning.Trainer") as mock_trainer: + try: + model.fit(train_data=DUMMY_TS_DATAFRAME, val_data=DUMMY_TS_DATAFRAME) + except IsADirectoryError: + # Training fails because Trainer is a mock object + pass + return mock_trainer.call_args[1] + + +def test_when_custom_callbacks_passed_via_trainer_kwargs_then_trainer_receives_them(): + from pytorch_lightning.callbacks import RichModelSummary + + callback = RichModelSummary() + model = DLinearModel(hyperparameters={"trainer_kwargs": {"callbacks": [callback]}, **DUMMY_HYPERPARAMETERS}) + received_trainer_kwargs = catch_trainer_kwargs(model) + assert any(isinstance(cb, RichModelSummary) for cb in received_trainer_kwargs["callbacks"]) + + +def test_when_early_stopping_patience_provided_then_early_stopping_callback_created(): + from pytorch_lightning.callbacks import EarlyStopping + + patience = 7 + model = SimpleFeedForwardModel(hyperparameters={"early_stopping_patience": patience, **DUMMY_HYPERPARAMETERS}) + received_trainer_kwargs = catch_trainer_kwargs(model) + es_callbacks = [cb for cb in received_trainer_kwargs["callbacks"] if isinstance(cb, EarlyStopping)] + assert len(es_callbacks) == 1 + assert es_callbacks[0].patience == patience + + +def test_when_early_stopping_patience_is_none_then_early_stopping_callback_not_created(): + from pytorch_lightning.callbacks import EarlyStopping + + model = SimpleFeedForwardModel(hyperparameters={"early_stopping_patience": None, **DUMMY_HYPERPARAMETERS}) + received_trainer_kwargs = catch_trainer_kwargs(model) + es_callbacks = [cb for cb in received_trainer_kwargs["callbacks"] if isinstance(cb, EarlyStopping)] + assert len(es_callbacks) == 0 + + +def test_when_custom_trainer_kwargs_given_then_trainer_receives_them(): + trainer_kwargs = {"max_epochs": 5, "limit_train_batches": 100} + model = PatchTSTModel(hyperparameters={"trainer_kwargs": trainer_kwargs, **DUMMY_HYPERPARAMETERS}) + received_trainer_kwargs = catch_trainer_kwargs(model) + for k, v in trainer_kwargs.items(): + assert received_trainer_kwargs[k] == v diff --git a/timeseries/tests/unittests/models/test_multi_window_model.py b/timeseries/tests/unittests/models/test_multi_window_model.py --- a/timeseries/tests/unittests/models/test_multi_window_model.py +++ b/timeseries/tests/unittests/models/test_multi_window_model.py @@ -36,7 +36,9 @@ def test_when_mw_model_trained_then_oof_predictions_and_stats_are_saved( temp_model_path, prediction_length, num_val_windows ): val_splitter = ExpandingWindowSplitter(prediction_length=prediction_length, num_val_windows=num_val_windows) - mw_model = get_multi_window_deepar(path=temp_model_path, prediction_length=prediction_length) + mw_model = get_multi_window_deepar( + path=temp_model_path, prediction_length=prediction_length, freq=DUMMY_TS_DATAFRAME.freq + ) mw_model.fit(train_data=DUMMY_TS_DATAFRAME, val_splitter=val_splitter) assert len(mw_model.get_oof_predictions()) == num_val_windows @@ -54,7 +56,7 @@ def test_when_val_data_passed_to_mw_model_fit_then_exception_is_raised(temp_mode def test_when_saved_model_moved_then_model_can_be_loaded_with_updated_path(): original_path = tempfile.mkdtemp() + os.sep - model = get_multi_window_deepar(path=original_path) + model = get_multi_window_deepar(path=original_path, freq=DUMMY_TS_DATAFRAME.freq) model.fit(train_data=DUMMY_TS_DATAFRAME) model.save() new_path = tempfile.mkdtemp() + os.sep diff --git a/timeseries/tests/unittests/test_evaluator.py b/timeseries/tests/unittests/test_evaluator.py --- a/timeseries/tests/unittests/test_evaluator.py +++ b/timeseries/tests/unittests/test_evaluator.py @@ -2,8 +2,7 @@ import pandas as pd import pytest from gluonts.evaluation import Evaluator as GluonTSEvaluator -from gluonts.evaluation import make_evaluation_predictions -from gluonts.model.forecast import SampleForecast +from gluonts.model.forecast import QuantileForecast from autogluon.timeseries import TimeSeriesPredictor from autogluon.timeseries.evaluator import TimeSeriesEvaluator, in_sample_abs_seasonal_error @@ -47,36 +46,45 @@ def deepar_trained_zero_data() -> AbstractGluonTSModel: return pred._trainer.load_model("DeepAR") +def to_gluonts_forecast(forecast_df, freq): + forecast_list = [] + for item_id, fcast in forecast_df.groupby(level="item_id", sort=False): + start_date = fcast.index[0][1].to_period(freq=freq) + qf = QuantileForecast( + forecast_arrays=fcast.values.T, + start_date=start_date, + forecast_keys=fcast.columns, + item_id=item_id, + ) + forecast_list.append(qf) + return forecast_list + + +def to_gluonts_test_set(data): + ts_list = [] + for item_id, ts in data.groupby(level="item_id", sort=False): + ts = ts.loc[item_id]["target"] + ts.index = ts.index.to_period(freq=data.freq) + ts_list.append(ts) + return ts_list + + @pytest.mark.parametrize("metric_name", GLUONTS_PARITY_METRICS) def test_when_given_learned_model_when_evaluator_called_then_output_equal_to_gluonts(metric_name, deepar_trained): model = deepar_trained + data_train, data_test = DUMMY_TS_DATAFRAME.train_test_split(model.prediction_length) - forecast_iter, ts_iter = make_evaluation_predictions( - dataset=model._to_gluonts_dataset(DUMMY_TS_DATAFRAME), - predictor=model.gts_predictor, - num_samples=100, - ) - fcast_list, ts_list = list(forecast_iter), list(ts_iter) - prediction_length = 2 + forecast_df = model.predict(data_train) seasonal_period = 3 - forecast_index = DUMMY_TS_DATAFRAME.slice_by_timestep(-prediction_length, None).index - forecast_df = model._gluonts_forecasts_to_data_frame( - fcast_list, - quantile_levels=model.quantile_levels, - forecast_index=forecast_index, - ) - ag_evaluator = TimeSeriesEvaluator( - eval_metric=metric_name, prediction_length=prediction_length, eval_metric_seasonal_period=seasonal_period + eval_metric=metric_name, prediction_length=model.prediction_length, eval_metric_seasonal_period=seasonal_period ) ag_value = ag_evaluator(DUMMY_TS_DATAFRAME, forecast_df) + forecast_list = to_gluonts_forecast(forecast_df, freq=data_train.freq) + ts_list = to_gluonts_test_set(data_test) gts_evaluator = GluonTSEvaluator(seasonality=seasonal_period) - gts_results, _ = gts_evaluator( - ts_iterator=ts_list, - fcst_iterator=fcast_list, - ) - + gts_results, _ = gts_evaluator(ts_iterator=ts_list, fcst_iterator=forecast_list) gts_metric_name = AG_TO_GLUONTS_METRIC.get(metric_name, metric_name) assert np.isclose(gts_results[gts_metric_name], ag_value, atol=1e-5) @@ -90,26 +98,19 @@ def test_when_given_all_zero_data_when_evaluator_called_then_output_equal_to_glu model = deepar_trained_zero_data data = DUMMY_TS_DATAFRAME.copy() * 0 + data_train, data_test = data.train_test_split(model.prediction_length) - forecast_iter, ts_iter = make_evaluation_predictions( - dataset=model._to_gluonts_dataset(data), - predictor=model.gts_predictor, - num_samples=100, - ) - fcast_list, ts_list = list(forecast_iter), list(ts_iter) - prediction_length = 2 - forecast_index = DUMMY_TS_DATAFRAME.slice_by_timestep(-prediction_length, None).index - forecast_df = model._gluonts_forecasts_to_data_frame( - fcast_list, - quantile_levels=model.quantile_levels, - forecast_index=forecast_index, + forecast_df = model.predict(data_train) + seasonal_period = 3 + ag_evaluator = TimeSeriesEvaluator( + eval_metric=metric_name, prediction_length=model.prediction_length, eval_metric_seasonal_period=seasonal_period ) - - ag_evaluator = TimeSeriesEvaluator(eval_metric=metric_name, prediction_length=prediction_length) ag_value = ag_evaluator(data, forecast_df) - gts_evaluator = GluonTSEvaluator() - gts_results, _ = gts_evaluator(ts_iterator=ts_list, fcst_iterator=fcast_list) + forecast_list = to_gluonts_forecast(forecast_df, freq=data_train.freq) + ts_list = to_gluonts_test_set(data_test) + gts_evaluator = GluonTSEvaluator(seasonality=seasonal_period) + gts_results, _ = gts_evaluator(ts_iterator=ts_list, fcst_iterator=forecast_list) gts_metric_name = AG_TO_GLUONTS_METRIC.get(metric_name, metric_name) assert np.isclose(gts_results[gts_metric_name], ag_value, atol=1e-5, equal_nan=True) @@ -118,39 +119,19 @@ def test_when_given_all_zero_data_when_evaluator_called_then_output_equal_to_glu @pytest.mark.parametrize("metric_name", GLUONTS_PARITY_METRICS) def test_when_given_zero_forecasts_when_evaluator_called_then_output_equal_to_gluonts(metric_name, deepar_trained): model = deepar_trained - forecast_iter, ts_iter = make_evaluation_predictions( - dataset=model._to_gluonts_dataset(DUMMY_TS_DATAFRAME), - predictor=model.gts_predictor, - num_samples=100, - ) - fcast_list, ts_list = list(forecast_iter), list(ts_iter) - - zero_forecast_list = [] - for s in fcast_list: - zero_forecast_list.append( - SampleForecast( - samples=np.zeros_like(s.samples), # noqa - start_date=pd.Period(s.start_date, freq=s.freq), - item_id=s.item_id, - ) - ) - prediction_length = 2 - seasonal_period = 3 - forecast_index = DUMMY_TS_DATAFRAME.slice_by_timestep(-prediction_length, None).index - forecast_df = model._gluonts_forecasts_to_data_frame( - zero_forecast_list, - quantile_levels=model.quantile_levels, - forecast_index=forecast_index, - ) + data_train, data_test = DUMMY_TS_DATAFRAME.train_test_split(model.prediction_length) + forecast_df = model.predict(data_train) * 0 + seasonal_period = 3 ag_evaluator = TimeSeriesEvaluator( - eval_metric=metric_name, prediction_length=prediction_length, eval_metric_seasonal_period=seasonal_period + eval_metric=metric_name, prediction_length=model.prediction_length, eval_metric_seasonal_period=seasonal_period ) ag_value = ag_evaluator(DUMMY_TS_DATAFRAME, forecast_df) + forecast_list = to_gluonts_forecast(forecast_df, freq=data_train.freq) + ts_list = to_gluonts_test_set(data_test) gts_evaluator = GluonTSEvaluator(seasonality=seasonal_period) - gts_results, _ = gts_evaluator(ts_iterator=ts_list, fcst_iterator=zero_forecast_list) - + gts_results, _ = gts_evaluator(ts_iterator=ts_list, fcst_iterator=forecast_list) gts_metric_name = AG_TO_GLUONTS_METRIC.get(metric_name, metric_name) assert np.isclose(gts_results[gts_metric_name], ag_value, atol=1e-5)
[timeseries] Fix slow inference of GluonTS models on large datasets Inference can become extremely slow for GluonTS models on datasets with >20K item_ids. The problem arises because of the inefficient `.loc[item_id]` access in https://github.com/autogluon/autogluon/blob/master/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py#L85. This issue can be fixed by replacing the slow `loc[item_id]` accesses with a single `groupby(level="item_id")` operation.
2023-10-10T13:07:20Z
[]
[]
autogluon/autogluon
3,769
autogluon__autogluon-3769
[ "1493" ]
49ada1e6e0e782480636f2a199b8ff2f1c014e7f
diff --git a/tabular/src/autogluon/tabular/experimental/__init__.py b/tabular/src/autogluon/tabular/experimental/__init__.py new file mode 100644 --- /dev/null +++ b/tabular/src/autogluon/tabular/experimental/__init__.py @@ -0,0 +1,2 @@ +from ._tabular_classifier import TabularClassifier +from ._tabular_regressor import TabularRegressor diff --git a/tabular/src/autogluon/tabular/experimental/_scikit_mixin.py b/tabular/src/autogluon/tabular/experimental/_scikit_mixin.py new file mode 100644 --- /dev/null +++ b/tabular/src/autogluon/tabular/experimental/_scikit_mixin.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import pandas as pd +from sklearn.utils.validation import check_array, check_is_fitted + + +class ScikitMixin: + def _get_init_args(self, problem_type: str) -> dict: + init_args = self.init_args + if init_args is None: + init_args = dict() + init_args = init_args.copy() + if "label" not in init_args: + init_args["label"] = "_target_" + if "problem_type" not in init_args: + init_args["problem_type"] = problem_type + if "eval_metric" not in init_args: + init_args["eval_metric"] = self.eval_metric + if "path" not in init_args: + init_args["path"] = self.path + if "verbosity" not in init_args: + init_args["verbosity"] = self.verbosity + return init_args + + def _get_fit_args(self) -> dict: + fit_args = self.fit_args + if fit_args is None: + fit_args = dict() + fit_args = fit_args.copy() + + if "time_limit" not in fit_args: + fit_args["time_limit"] = self.time_limit + if "presets" not in fit_args: + fit_args["presets"] = self.presets + if "hyperparameters" not in fit_args: + fit_args["hyperparameters"] = self.hyperparameters + if fit_args["time_limit"] is None: + # TODO: This isn't technically right if the user specified `None`. Can fix in future by setting Predictor's default `time_limit="auto"` + fit_args.pop("time_limit") + return fit_args + + def _validate_input(self, X): + check_is_fitted(self) + # Input validation + X = check_array(X) + if X.shape[1] != self.n_features_in_: + raise ValueError(f"Inconsistent number of features between fit and predict calls: ({self.n_features_in_}, {X.shape[1]})") + return X + + def _combine_X_y(self, X, y) -> pd.DataFrame: + label = self.predictor_.label + X = pd.DataFrame(X) + assert label not in list(X.columns), f"Cannot have column named {label}. Please rename the column to a different value." + X[label] = y + return X + + def leaderboard(self, X, y, **kwargs) -> pd.DataFrame: + data = self._combine_X_y(X=X, y=y) + return self.predictor_.leaderboard(data=data, **kwargs) diff --git a/tabular/src/autogluon/tabular/experimental/_tabular_classifier.py b/tabular/src/autogluon/tabular/experimental/_tabular_classifier.py new file mode 100644 --- /dev/null +++ b/tabular/src/autogluon/tabular/experimental/_tabular_classifier.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from typing import List + +import pandas as pd +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.utils.multiclass import unique_labels +from sklearn.utils.validation import check_array, check_is_fitted, check_X_y + +from autogluon.core.metrics import Scorer + +from .. import TabularPredictor +from ._scikit_mixin import ScikitMixin + + +class TabularClassifier(BaseEstimator, ClassifierMixin, ScikitMixin): + def __init__( + self, + eval_metric: str | Scorer = None, + time_limit: float = None, + presets: List[str] | str = None, + hyperparameters: dict | str = None, + path: str = None, + verbosity: int = 2, + init_args: dict = None, + fit_args: dict = None, + ): + self.eval_metric = eval_metric + self.time_limit = time_limit + self.presets = presets + self.hyperparameters = hyperparameters + self.path = path + self.verbosity = verbosity + self.init_args = init_args + self.fit_args = fit_args + + def fit(self, X, y): + # Check that X and y have correct shape + # X, y = check_X_y(X, y) # Commented out to allow for object dtypes + + # Store the classes seen during fit + self.n_features_in_ = X.shape[1] + self.classes_ = unique_labels(y) + + if len(self.classes_) == 1: + raise ValueError("Classifier can't train when only one class is present.") + if len(self.classes_) == 2: + problem_type = "binary" + else: + problem_type = "multiclass" + + init_args = self._get_init_args(problem_type=problem_type) + fit_args = self._get_fit_args() + + self.predictor_ = TabularPredictor(**init_args) + + train_data = self._combine_X_y(X=X, y=y) + + self.predictor_.fit(train_data, **fit_args) + + # Return the classifier + return self + + def predict(self, X): + # Check if fit has been called + check_is_fitted(self) + # Input validation + X = check_array(X) + if X.shape[1] != self.n_features_in_: + raise ValueError(f"Inconsistent number of features between fit and predict calls: ({self.n_features_in_}, {X.shape[1]})") + + data = pd.DataFrame(X) + y_pred = self.predictor_.predict(data=data).to_numpy() + return y_pred + + def predict_proba(self, X): + X = self._validate_input(X=X) + data = pd.DataFrame(X) + y_pred_proba = self.predictor_.predict_proba(data=data).to_numpy() + return y_pred_proba diff --git a/tabular/src/autogluon/tabular/experimental/_tabular_regressor.py b/tabular/src/autogluon/tabular/experimental/_tabular_regressor.py new file mode 100644 --- /dev/null +++ b/tabular/src/autogluon/tabular/experimental/_tabular_regressor.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import List + +import pandas as pd +from sklearn.base import BaseEstimator, RegressorMixin +from sklearn.utils.validation import check_array, check_is_fitted, check_X_y + +from autogluon.core.metrics import Scorer + +from .. import TabularPredictor +from ._scikit_mixin import ScikitMixin + + +class TabularRegressor(BaseEstimator, RegressorMixin, ScikitMixin): + def __init__( + self, + eval_metric: str | Scorer = None, + time_limit: float = None, + presets: List[str] | str = None, + hyperparameters: dict | str = None, + path: str = None, + verbosity: int = 2, + init_args: dict = None, + fit_args: dict = None, + ): + self.eval_metric = eval_metric + self.time_limit = time_limit + self.presets = presets + self.hyperparameters = hyperparameters + self.path = path + self.verbosity = verbosity + self.init_args = init_args + self.fit_args = fit_args + + def fit(self, X, y): + # Check that X and y have correct shape + # X, y = check_X_y(X, y) # Commented out to allow for object dtypes + + self.n_features_in_ = X.shape[1] + + init_args = self._get_init_args(problem_type="regression") + fit_args = self._get_fit_args() + + self.predictor_ = TabularPredictor(**init_args) + + train_data = self._combine_X_y(X=X, y=y) + + self.predictor_.fit(train_data, **fit_args) + + # Return the regressor + return self + + def predict(self, X): + # Check if fit has been called + check_is_fitted(self) + # Input validation + X = check_array(X) + if X.shape[1] != self.n_features_in_: + raise ValueError(f"Inconsistent number of features between fit and predict calls: ({self.n_features_in_}, {X.shape[1]})") + + data = pd.DataFrame(X) + y_pred = self.predictor_.predict(data=data).to_numpy() + return y_pred
diff --git a/tabular/tests/conftest.py b/tabular/tests/conftest.py --- a/tabular/tests/conftest.py +++ b/tabular/tests/conftest.py @@ -145,6 +145,7 @@ def fit_and_validate_dataset( allowed_dataset_features=None, expected_stacked_overfitting_at_test=None, expected_stacked_overfitting_at_val=None, + scikit_api=False, ): if compiler_configs is None: compiler_configs = {} @@ -170,7 +171,7 @@ def fit_and_validate_dataset( init_args["path"] = PathConverter.to_absolute(path=init_args["path"]) assert PathConverter._is_absolute(path=init_args["path"]) save_path = init_args["path"] - predictor = FitHelper.fit_dataset(train_data=train_data, init_args=init_args, fit_args=fit_args, sample_size=sample_size) + predictor = FitHelper.fit_dataset(train_data=train_data, init_args=init_args, fit_args=fit_args, sample_size=sample_size, scikit_api=scikit_api) if compile: predictor.compile(models="all", compiler_configs=compiler_configs) predictor.persist(models="all") @@ -231,10 +232,28 @@ def fit_and_validate_dataset_with_cascade( shutil.rmtree(predictor.path, ignore_errors=True) # Delete AutoGluon output directory to ensure runs' information has been removed. @staticmethod - def fit_dataset(train_data, init_args, fit_args, sample_size=None) -> TabularPredictor: + def fit_dataset(train_data, init_args, fit_args, sample_size=None, scikit_api=False) -> TabularPredictor: if sample_size is not None and sample_size < len(train_data): train_data = train_data.sample(n=sample_size, random_state=0) - return TabularPredictor(**init_args).fit(train_data, **fit_args) + if scikit_api: + from autogluon.tabular.experimental import TabularClassifier, TabularRegressor + + if "problem_type" in init_args: + problem_type = init_args["problem_type"] + else: + problem_type = infer_problem_type(train_data[init_args["label"]]) + X = train_data.drop(columns=[init_args["label"]]) + y = train_data[init_args["label"]] + if problem_type in [REGRESSION]: + regressor = TabularRegressor(init_args=init_args, fit_args=fit_args) + regressor.fit(X, y) + return regressor.predictor_ + else: + classifier = TabularClassifier(init_args=init_args, fit_args=fit_args) + classifier.fit(X, y) + return classifier.predictor_ + else: + return TabularPredictor(**init_args).fit(train_data, **fit_args) # Helper functions for training models outside of predictors diff --git a/tabular/tests/unittests/experimental/test_scikit_api.py b/tabular/tests/unittests/experimental/test_scikit_api.py new file mode 100644 --- /dev/null +++ b/tabular/tests/unittests/experimental/test_scikit_api.py @@ -0,0 +1,36 @@ +from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION +from autogluon.core.metrics import METRICS +from autogluon.tabular.models.lgb.lgb_model import LGBModel + + +def test_scikit_api_binary(fit_helper): + """Additionally tests that all binary metrics work""" + fit_args = dict( + hyperparameters={LGBModel: {}}, + ) + dataset_name = "adult" + extra_metrics = list(METRICS[BINARY]) + + fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, extra_metrics=extra_metrics, scikit_api=True) + + +def test_scikit_api_multiclass(fit_helper): + """Additionally tests that all multiclass metrics work""" + fit_args = dict( + hyperparameters={LGBModel: {}}, + ) + extra_metrics = list(METRICS[MULTICLASS]) + + dataset_name = "covertype_small" + fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, extra_metrics=extra_metrics, scikit_api=True) + + +def test_scikit_api_regression(fit_helper): + """Additionally tests that all regression metrics work""" + fit_args = dict( + hyperparameters={LGBModel: {}}, + ) + extra_metrics = list(METRICS[REGRESSION]) + + dataset_name = "ames" + fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, extra_metrics=extra_metrics, scikit_api=True)
Add scikit-learn compatible API Related: https://github.com/awslabs/autogluon/issues/1479 Add a scikit-learn compatible API wrapper of TabularPredictor: - [ ] TabularClassifier - [ ] TabularRegressor Required functionality (may need more than listed): - [ ] init API - [ ] fit API - [ ] predict API - [ ] works in sklearn pipelines
@Innixma Thanks. Here is the code. Its main purpose is to be able to be used with sklearn's Pipeline, MultiOutputRegressor and (GridSearchCV). ``` import numpy as np import pandas as pd import autogluon as ag from autogluon.tabular import TabularPredictor class AutoGluonPredictor(TabularPredictor): def __init__(self, label='', problem_type=None, eval_metric=None, path=None, verbosity=0, sample_weight=None, weight_evaluation=False, groups=None, presets='best_quality'): super().__init__(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path, verbosity=verbosity, sample_weight=sample_weight, weight_evaluation=weight_evaluation, groups=groups) self.presets = presets def fit(self, X, y=None, sample_weight=None, check_input=True): X_df = pd.DataFrame(X) y_df = pd.DataFrame(y) X_names = X_df.columns y_names = y_df.columns # This code has not been well considered, but is to resolve some errors. if len(X_names) != len(set(X_names)): X_names = [f'X{i}' for i in range(len(X_names))] else: if len(list(set(X_names) & set(y_names))) >= 1 : X_names = [f'X{i}' for i in range(len(X_names))] y_names = ['y0'] X_df.columns = X_names y_df.columns = y_names y_name = y_names[0] self.X_names = X_names self.y_names = y_names ########### self.__init__(label=y_name) train_data = pd.concat([X_df, y_df], axis=1) super().fit(train_data, presets=self.presets) return self def predict(self, X, y=None, sample_weight=None, check_input=True): X_df = pd.DataFrame(X) X_df.columns = self.X_names return super().predict(X_df) def set_params(self, **parameters): for parameter, value in parameters.items(): setattr(self, parameter, value) return self def get_params(self, deep=True): """ Parameters ---------- deep : Ignored. (for compatibility with sklearn) Returns ---------- self : returns an dictionary of parameters. """ params = {} return params ``` I'm not thinking of using GridSearchCV for AutoGluon, but I have defined set_params to describe it in the same notation as other estimators (like Lasso). Also, in my special usage, I sometimes connect variable selection with Pipeline before AutoGluon, so I added special processing in fit for X_names and y_names. If you don't need it, please ignore it. The usage example is described below. ``` estimator = AutoGluonPredictor() estimator = MultiOutputRegressor(estimator) param_grid = {} model_name = 'AutoGluon' from sklearn.pipeline import Pipeline pipe = Pipeline([('selector', SelectFromModel(Lasso()) ), ('estimator', estimator)]) gsv = GridSearchCV(pipe, param_grid= {}) # No-tuning gsv.fit(X,y) # No-tuning otherpipe = Pipeline([('selector', SelectFromModel(Lasso()) ), ('estimator', Lasso())]) othergsv = GridSearchCV(otherpipe , param_grid= {'alpha':[0.1,0.01]}) othergsv.fit(X,y) ``` @Innixma I'm not sure if it would be better to create separate issue, but I've noticed a few things regarding autogluon API compatibility with sklearn-API. Fixing those issues will allow use autogluon as drop-in replacements with following tools from sklearn ecosystem: a) **deepchecks** (https://github.com/deepchecks/deepchecks), here is discussion https://github.com/deepchecks/deepchecks/issues/2364 but gist of issuse that autogluon lacks from sklearn API: - `classes_` property (I think it could be provided as alias for `class_labels`) - `feature_importances_` property (I think it could be provided as alias for `feature_importance` with some defaults, I'm not sure what is better `original` or `transformed`) b) **cleanlab** (https://github.com/cleanlab/cleanlab) I've tried to run it with autogluon: ``` # cleanlab works with **any classifier**. Yup, you can use sklearn/PyTorch/TensorFlow/XGBoost/etc. cl = cleanlab.classification.CleanLearning(sklearn.YourFavoriteClassifier()) # cleanlab finds data and label issues in **any dataset**... in ONE line of code! label_issues = cl.find_label_issues(data, labels) # cleanlab trains a robust version of your model that works more reliably with noisy data. cl.fit(data, labels) ``` but I've got error that estimart is not clonable by `sklearn.base.clone` (https://docs.cleanlab.ai/stable/cleanlab/classification.html) details: https://github.com/cleanlab/cleanlab/blob/00776646a7764e07cc4078e8c25e873ee413c5f7/cleanlab/count.py Line 997: ``` for k, (cv_train_idx, cv_holdout_idx) in enumerate(kf.split(X=labels, y=labels)): try: clf_copy = sklearn.base.clone(clf) # fresh untrained copy of the model except Exception: raise ValueError( "`clf` must be clonable via: sklearn.base.clone(clf). " "You can either implement instance method `clf.get_params()` to produce a fresh untrained copy of this model, " "or you can implement the cross-validation outside of cleanlab " "and pass in the obtained `pred_probs` to skip cleanlab's internal cross-validation" ) ``` I didn't look into it, but maybe it is not hard to fix, as autogluon model seems to provide get_params: https://auto.gluon.ai/stable/_modules/autogluon/core/models/abstract/abstract_model.html#AbstractModel.get_params unfortunately `TabularPredictor` doesn't have it. Hi @Innixma @jckkvs I've implemented draft wrappers for: - classifier https://colab.research.google.com/drive/1tMVNbYhu9sLsUWdq1tPabid7-eb8ZPo3?usp=sharing (autogluon with cleanlab that requires scikit-learn API) - regressor https://colab.research.google.com/drive/1zXUTrkKmAG472VgxwCwOGyevE7BDmWgy?usp=sharing (gridSearchCV, sklearn pipeline example) Both wrapper follow the same pattern, instead of "inheritance" from autogluon like in jckvs example, I use composition and inheritance only from "scikit mixins". **Classifier:** ``` class AutogluonClassifier(BaseEstimator, ClassifierMixin): def __init__(self, eval_metric=None, time_limit=60, hyperparameters=None, verbosity=2): self.eval_metric = eval_metric self.time_limit = time_limit self.hyperparameters = hyperparameters self.verbosity = verbosity self.tabular_predictor = None self.classes_ = None self.n_features_in_ = None self.n_classes_ = None self.feature_names_in_ = None def fit(self, X, y): self.n_features_in_ = X.shape[1] self.feature_names_in_ = X.columns.tolist() X = pd.DataFrame(X) X['target'] = y self.tabular_predictor = TabularPredictor(label='target', eval_metric=self.eval_metric) self.tabular_predictor.fit(train_data=X, time_limit=self.time_limit, hyperparameters=self.hyperparameters, verbosity=self.verbosity) self.classes_ = self.tabular_predictor._learner.label_cleaner.ordered_class_labels self.n_classes_ = len(self.classes_) def predict(self, X): if self.tabular_predictor is None: raise NotFittedError("Model has not been fitted. Call 'fit' before 'predict'.") return self.tabular_predictor.predict(X) def predict_proba(self, X): if self.tabular_predictor is None: raise NotFittedError("Model has not been fitted. Call 'fit' before 'predict_proba'.") return self.tabular_predictor.predict_proba(X) ``` **Regressor:** ``` class AutogluonRegressor(BaseEstimator, RegressorMixin): def __init__(self, eval_metric=None, time_limit=60, hyperparameters=None, verbosity=2): self.eval_metric = eval_metric self.time_limit = time_limit self.hyperparameters = hyperparameters self.verbosity = verbosity self.tabular_predictor = None def fit(self, X, y): X = pd.DataFrame(X) X['target'] = y self.tabular_predictor = TabularPredictor(problem_type='regression', label='target', eval_metric=self.eval_metric) self.tabular_predictor.fit(train_data=X, time_limit=self.time_limit, hyperparameters=self.hyperparameters, verbosity=self.verbosity) def predict(self, X): if self.model is None: raise NotFittedError("Model has not been fitted. Call 'fit' before 'predict'.") return self.tabular_predictor.predict(X) ``` Note that is draft version and it requires further testing/checking, also it could be extended in two ways: - expose more autogluon "internal parameters" e.g. `sample_weight` in wrapper __init__ - add more scikit-learn methods and attributes e.g. `feature_importances_` I think those wrappers could be added to Tutorials section (like this extension: https://auto.gluon.ai/stable/tutorials/tabular/advanced/tabular-multilabel.html) with some additional info regarding differences between autogluon and scikit-learn APIs and design consideration. Additional rationale: - additional tutorials/documentation releases don't have to be tightly coupled with autogluon releases - I don't think that is possible to create perfect wrapper for everybody, either some internals will be hidden or "init" will be bloated, so those wrapper are starter-kit - with tutorial format is easier to provide some nuance, e.g. in GridSearchCV it is better to not provide `scoring` to it, but initialize autgluon wrapper with proper `eval_metric` What do you think about it? Example of wrapper used in AutoGluon-Timeseries for Tabular Predictor: https://github.com/autogluon/autogluon/blob/4cb0fc82ecd89025a37f8c474a23896b089afb2b/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py#L30
2023-11-28T03:05:35Z
[]
[]
autogluon/autogluon
3,859
autogluon__autogluon-3859
[ "3847" ]
26ef76d0a5c11dc256a4962154fce9789af1bfe0
diff --git a/multimodal/src/autogluon/multimodal/models/ft_transformer.py b/multimodal/src/autogluon/multimodal/models/ft_transformer.py --- a/multimodal/src/autogluon/multimodal/models/ft_transformer.py +++ b/multimodal/src/autogluon/multimodal/models/ft_transformer.py @@ -1,3 +1,5 @@ +import os +import tempfile from typing import List, Optional import torch @@ -457,6 +459,8 @@ def __init__( additive_attention: Optional[bool] = False, share_qv_weights: Optional[bool] = False, pooling_mode: Optional[str] = "cls", + checkpoint_name: str = None, + pretrained: bool = False, ) -> None: """ Parameters @@ -594,12 +598,25 @@ def __init__( initialization="uniform", ) - # init weights + # init tokenizer and head weights if self.numerical_feature_tokenizer: self.numerical_adapter.apply(init_weights) if self.categorical_feature_tokenizer: self.categorical_adapter.apply(init_weights) self.head.apply(init_weights) + # init transformer backbone from provided checkpoint + from autogluon.multimodal.utils.download import download + + if pretrained and checkpoint_name: + if os.path.exists(checkpoint_name): + ckpt = torch.load(checkpoint_name) + else: + with tempfile.TemporaryDirectory() as tmpdirname: + checkpoint_path = os.path.join(tmpdirname, "./ft_transformer_pretrained.ckpt") + download(checkpoint_name, checkpoint_path) + ckpt = torch.load(checkpoint_path) + self.transformer.load_state_dict(ckpt["state_dict"]) + self.name_to_id = self.get_layer_ids() @property diff --git a/multimodal/src/autogluon/multimodal/models/utils.py b/multimodal/src/autogluon/multimodal/models/utils.py --- a/multimodal/src/autogluon/multimodal/models/utils.py +++ b/multimodal/src/autogluon/multimodal/models/utils.py @@ -237,8 +237,11 @@ def group_param_names( # split blocks at the first level children_prefix = [] for n in selected_names: - child_name = n[len(model_prefix) + 1 :].split(".")[0] - child_prefix = f"{model_prefix}.{child_name}" + if model_prefix is not None: + child_name = n[len(model_prefix) + 1 :].split(".")[0] + child_prefix = f"{model_prefix}.{child_name}" + else: + child_prefix = n.split(".")[0] if child_prefix not in children_prefix: children_prefix.append(child_prefix) diff --git a/multimodal/src/autogluon/multimodal/utils/model.py b/multimodal/src/autogluon/multimodal/utils/model.py --- a/multimodal/src/autogluon/multimodal/utils/model.py +++ b/multimodal/src/autogluon/multimodal/utils/model.py @@ -372,6 +372,8 @@ def create_model( additive_attention=OmegaConf.select(model_config, "additive_attention", default=False), share_qv_weights=OmegaConf.select(model_config, "share_qv_weights", default=False), pooling_mode=OmegaConf.select(model_config, "pooling_mode", default="cls"), + checkpoint_name=model_config.checkpoint_name, + pretrained=pretrained, ) elif model_name.lower().startswith(SAM): model = SAMForSemanticSegmentation(
diff --git a/multimodal/tests/unittests/predictor/test_predictor.py b/multimodal/tests/unittests/predictor/test_predictor.py --- a/multimodal/tests/unittests/predictor/test_predictor.py +++ b/multimodal/tests/unittests/predictor/test_predictor.py @@ -677,6 +677,29 @@ def test_load_ckpt(): npt.assert_equal(predictions_prob, predictions2_prob) +def test_fttransformer_load_ckpt(): + dataset = ALL_DATASETS["petfinder"] + metric_name = dataset.metric + + predictor = MultiModalPredictor( + label=dataset.label_columns[0], + problem_type=dataset.problem_type, + eval_metric=metric_name, + ) + hyperparameters = { + "model.names": ["ft_transformer"], + "model.ft_transformer.checkpoint_name": "https://automl-mm-bench.s3.amazonaws.com/ft_transformer_pretrained_ckpt/iter_2k.ckpt", + "data.categorical.convert_to_text": False, # ensure the categorical model is used. + "data.numerical.convert_to_text": False, # ensure the numerical model is used. + } + predictor.fit( + dataset.train_df, + hyperparameters=hyperparameters, + time_limit=10, + ) + predictor.evaluate(dataset.test_df) + + @pytest.mark.parametrize( "hyperparameters", [
How to read pre v1 FTTransformer weights with v1 autogluon ## Description I have pretrained weights obtained using older version of autogluon's FTTransformer implementation (v0.6). Now I would like to load the weights using v1. I tried something basic like the following which worked with older version but cannot load it with v1: ```python from autogluon.tabular import TabularPredictor, TabularDataset df_train = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv') hyperparameters = {} hyperparameters['FT_TRANSFORMER'] = { "env.per_gpu_batch_size": 128, "env.num_workers": 0, "env.num_workers_evaluation": 0, "optimization.max_epochs": 3, 'finetune_on': './iter_2k.ckpt', } predictor = TabularPredictor(label="class", eval_metric="roc_auc", ) df_train = df_train.dropna(subset=["class"]) predictor.fit(train_data=df_train, hyperparameters=hyperparameters, time_limit=60) ``` It complaints: `'"finetune_on" is not found in the config. You may need to check the overrides. `
@taoyang1122 would be best to answer. My understanding is that we improved the architecture implementation of FT-Transformer in v1.0 and thus the weights are no longer compatible between v1.0 and prior versions, you would need to pre-train new weights for v1.0.
2024-01-13T00:29:43Z
[]
[]
autogluon/autogluon
3,930
autogluon__autogluon-3930
[ "3897" ]
06e5f7420edf43b853efcbf739d1dfc96dc33d6a
diff --git a/core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py b/core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py --- a/core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py +++ b/core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py @@ -20,6 +20,7 @@ from ...constants import MULTICLASS, QUANTILE, REFIT_FULL_SUFFIX, REGRESSION, SOFTCLASS from ...hpo.exceptions import EmptySearchSpace +from ...pseudolabeling.pseudolabeling import assert_pseudo_column_match from ...utils.exceptions import TimeLimitExceeded from ...utils.loaders import load_pkl from ...utils.savers import save_pkl @@ -144,12 +145,12 @@ def n_children(self) -> int: def is_valid_oof(self): return self.is_fit() and (self._child_oof or self._bagged_mode) - def predict_proba_oof(self, **kwargs): + def predict_proba_oof(self, **kwargs) -> np.array: # TODO: Require is_valid == True (add option param to ignore is_valid) return self._predict_proba_oof(self._oof_pred_proba, self._oof_pred_model_repeats) @staticmethod - def _predict_proba_oof(oof_pred_proba, oof_pred_model_repeats, return_type=np.float32): + def _predict_proba_oof(oof_pred_proba, oof_pred_model_repeats, return_type=np.float32) -> np.array: oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats) if oof_pred_proba.ndim == 2: oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None] @@ -254,7 +255,16 @@ def _fit( save_bag_folds = self.params.get("save_bag_folds", True) if k_fold == 1: - self._fit_single(X=X, y=y, model_base=model_base, use_child_oof=use_child_oof, skip_oof=_skip_oof, **kwargs) + self._fit_single( + X=X, + y=y, + X_pseudo=X_pseudo, + y_pseudo=y_pseudo, + model_base=model_base, + use_child_oof=use_child_oof, + skip_oof=_skip_oof, + **kwargs, + ) return self else: refit_folds = self.params.get("refit_folds", False) @@ -463,7 +473,7 @@ def score_with_oof(self, y, sample_weight=None): sample_weight = sample_weight[valid_indices] return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba, sample_weight=sample_weight) - def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, skip_oof=False, **kwargs): + def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, skip_oof=False, X_pseudo=None, y_pseudo=None, **kwargs): if self.is_fit(): raise AssertionError("Model is already fit.") if self._n_repeats != 0: @@ -471,7 +481,20 @@ def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, skip_oof model_base.name = f"{model_base.name}S1F1" model_base.set_contexts(path_context=os.path.join(self.path, model_base.name)) time_start_fit = time.time() - model_base.fit(X=X, y=y, time_limit=time_limit, **kwargs) + + is_pseudo = X_pseudo is not None and y_pseudo is not None + # FIXME: This can lead to poor performance. Probably not bugged, but rather all pseudolabels can come from the same class... + # Consider pseudolabelling to respect the original distribution + if is_pseudo: + # FIXME: Consider use_child_oof with pseudo labels! Need to keep track of indices + logger.log(15, f"{len(X_pseudo)} extra rows of pseudolabeled data added to training set for {self.name}") + assert_pseudo_column_match(X=X, X_pseudo=X_pseudo) + X_fit = pd.concat([X, X_pseudo], axis=0, ignore_index=True) + y_fit = pd.concat([y, y_pseudo], axis=0, ignore_index=True) + else: + X_fit = X + y_fit = y + model_base.fit(X=X_fit, y=y_fit, time_limit=time_limit, **kwargs) model_base.fit_time = time.time() - time_start_fit model_base.predict_time = None if not skip_oof: @@ -507,7 +530,9 @@ def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, skip_oof ) time_start_predict = time.time() if model_base._get_tags().get("valid_oof", False): - self._oof_pred_proba = model_base.predict_proba_oof(X=X, y=y) + # For models with the ability to produce their own OOF, such as RandomForest OOB and KNN-LOO, + # we get their OOF predictions on the full data, then limit to the original training data. + self._oof_pred_proba = model_base.predict_proba_oof(X=X_fit, y=y_fit)[: len(X)] else: logger.warning( "\tWARNING: `use_child_oof` was specified but child model does not have a dedicated `predict_proba_oof` method. This model may have heavily overfit validation scores." diff --git a/core/src/autogluon/core/models/ensemble/fold_fitting_strategy.py b/core/src/autogluon/core/models/ensemble/fold_fitting_strategy.py --- a/core/src/autogluon/core/models/ensemble/fold_fitting_strategy.py +++ b/core/src/autogluon/core/models/ensemble/fold_fitting_strategy.py @@ -17,6 +17,7 @@ from autogluon.common.utils.s3_utils import download_s3_folder, s3_path_to_bucket_prefix, upload_s3_folder from autogluon.common.utils.try_import import try_import_ray +from ...pseudolabeling.pseudolabeling import assert_pseudo_column_match from ...ray.resources_calculator import ResourceCalculatorFactory from ...utils.exceptions import NotEnoughCudaMemoryError, NotEnoughMemoryError, TimeLimitExceeded from ..abstract.abstract_model import AbstractModel @@ -338,6 +339,7 @@ def _fit(self, model_base, time_start_fold, time_limit_fold, fold_ctx, kwargs): if is_pseudo: logger.log(15, f"{len(self.X_pseudo)} extra rows of pseudolabeled data added to training set for {fold_model.name}") + assert_pseudo_column_match(X=X_fold, X_pseudo=self.X_pseudo) X_fold = pd.concat([X_fold, self.X_pseudo], axis=0, ignore_index=True) y_fold = pd.concat([y_fold, self.y_pseudo], axis=0, ignore_index=True) diff --git a/core/src/autogluon/core/pseudolabeling/pseudolabeling.py b/core/src/autogluon/core/pseudolabeling/pseudolabeling.py --- a/core/src/autogluon/core/pseudolabeling/pseudolabeling.py +++ b/core/src/autogluon/core/pseudolabeling/pseudolabeling.py @@ -237,3 +237,25 @@ def filter_ensemble_classification(predictor, unlabeled_data: pd.DataFrame, lead test_pseudo_indices = sample_bins_uniformly(y_pred_proba=y_pred_proba_ensemble, df_indexes=pseudo_indexes) return test_pseudo_indices[test_pseudo_indices], y_pred_proba_ensemble, y_pred_ensemble + + +def assert_pseudo_column_match(X: pd.DataFrame, X_pseudo: pd.DataFrame): + """ + Raises an AssertionError if X and X_pseudo don't share the same columns. + Useful to call prior to concatenating the data together to avoid unexpected behavior. + + Parameters + ---------- + X: pd.DataFrame + The original training data + X_pseudo: pd.DataFrame + Additional training data with pseudo-labelled targets + """ + if set(X.columns) != set(X_pseudo.columns): + X_unique_cols = sorted(set(X.columns).difference(X_pseudo.columns)) + X_pseudo_unique_cols = sorted(set(X_pseudo.columns).difference(X.columns)) + raise AssertionError( + f"X and X_pseudo columns are mismatched!\n" + f"\tUnexpected Columns in X_pseudo: {X_pseudo_unique_cols}\n" + f"\t Missing Columns in X_pseudo: {X_unique_cols}" + ) diff --git a/core/src/autogluon/core/trainer/abstract_trainer.py b/core/src/autogluon/core/trainer/abstract_trainer.py --- a/core/src/autogluon/core/trainer/abstract_trainer.py +++ b/core/src/autogluon/core/trainer/abstract_trainer.py @@ -46,6 +46,7 @@ StackerEnsembleModel, WeightedEnsembleModel, ) +from ..pseudolabeling.pseudolabeling import assert_pseudo_column_match from ..utils import ( compute_permutation_feature_importance, compute_weighted_metric, @@ -1245,7 +1246,7 @@ def _get_stack_column_names(self, models: List[str]) -> Tuple[List[str], int]: # You must have previously called fit() with cache_data=True # Fits _FULL versions of specified models, but does NOT link them (_FULL stackers will still use normal models as input) - def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled=None, models=None) -> List[str]: + def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled=None, models=None, **kwargs) -> List[str]: if X is None: X = self.load_X() if X_val is None: @@ -1323,6 +1324,7 @@ def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled= n_repeats=1, ensemble_type=type(model), refit_full=True, + **kwargs, ) if len(models_trained) == 1: model_refit_map[model_name] = models_trained[0] @@ -1347,7 +1349,7 @@ def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled= # Fits _FULL models and links them in the stack so _FULL models only use other _FULL models as input during stacking # If model is specified, will fit all _FULL models that are ancestors of the provided model, automatically linking them. # If no model is specified, all models are refit and linked appropriately. - def refit_ensemble_full(self, model: str | List[str] = "all") -> dict: + def refit_ensemble_full(self, model: str | List[str] = "all", **kwargs) -> dict: if model == "all": ensemble_set = self.get_model_names() elif isinstance(model, list): @@ -1365,7 +1367,7 @@ def refit_ensemble_full(self, model: str | List[str] = "all") -> dict: else: ensemble_set_valid.append(model) if ensemble_set_valid: - models_trained_full = self.refit_single_full(models=ensemble_set_valid) + models_trained_full = self.refit_single_full(models=ensemble_set_valid, **kwargs) else: models_trained_full = [] @@ -1807,6 +1809,7 @@ def _train_and_save( # Bagged model does validation on the fit level where as single models do it separately. Hence this if statement # is required if not isinstance(model, BaggedEnsembleModel) and X_pseudo is not None and y_pseudo is not None and X_pseudo.columns.equals(X.columns): + assert_pseudo_column_match(X=X, X_pseudo=X_pseudo) X_w_pseudo = pd.concat([X, X_pseudo]) y_w_pseudo = pd.concat([y, y_pseudo]) model_fit_kwargs.pop("X_pseudo") @@ -1814,6 +1817,11 @@ def _train_and_save( logger.log(15, f"{len(X_pseudo)} extra rows of pseudolabeled data added to training set for {model.name}") model = self._train_single(X_w_pseudo, y_w_pseudo, model, X_val, y_val, **model_fit_kwargs) else: + if level > 1: + if X_pseudo is not None and y_pseudo is not None: + logger.log(15, f"Dropping pseudo in stacking layer due to missing out-of-fold predictions") + model_fit_kwargs.pop("X_pseudo", None) + model_fit_kwargs.pop("y_pseudo", None) model = self._train_single(X, y, model, X_val, y_val, total_resources=total_resources, **model_fit_kwargs) fit_end_time = time.time() diff --git a/tabular/src/autogluon/tabular/learner/abstract_learner.py b/tabular/src/autogluon/tabular/learner/abstract_learner.py --- a/tabular/src/autogluon/tabular/learner/abstract_learner.py +++ b/tabular/src/autogluon/tabular/learner/abstract_learner.py @@ -446,8 +446,8 @@ def get_inputs_to_stacker(self, dataset=None, model=None, base_models: list = No # Fits _FULL models and links them in the stack so _FULL models only use other _FULL models as input during stacking # If model is specified, will fit all _FULL models that are ancestors of the provided model, automatically linking them. # If no model is specified, all models are refit and linked appropriately. - def refit_ensemble_full(self, model: str | List[str] = "all"): - return self.load_trainer().refit_ensemble_full(model=model) + def refit_ensemble_full(self, model: str | List[str] = "all", **kwargs): + return self.load_trainer().refit_ensemble_full(model=model, **kwargs) def fit_transform_features(self, X, y=None, **kwargs): if self.label in X: diff --git a/tabular/src/autogluon/tabular/predictor/predictor.py b/tabular/src/autogluon/tabular/predictor/predictor.py --- a/tabular/src/autogluon/tabular/predictor/predictor.py +++ b/tabular/src/autogluon/tabular/predictor/predictor.py @@ -1356,7 +1356,10 @@ def _post_fit( calibrate=False, calibrate_decision_threshold=False, infer_limit=None, + refit_full_kwargs: dict = None, ): + if refit_full_kwargs is None: + refit_full_kwargs = {} if not self.model_names(): logger.log(30, "Warning: No models found, skipping post_fit logic...") return @@ -1383,9 +1386,9 @@ def _post_fit( else: _set_best_to_refit_full = False if refit_full == "best": - self.refit_full(model=trainer_model_best, set_best_to_refit_full=_set_best_to_refit_full) + self.refit_full(model=trainer_model_best, set_best_to_refit_full=_set_best_to_refit_full, **refit_full_kwargs) else: - self.refit_full(model=refit_full, set_best_to_refit_full=_set_best_to_refit_full) + self.refit_full(model=refit_full, set_best_to_refit_full=_set_best_to_refit_full, **refit_full_kwargs) if calibrate == "auto": if self.problem_type in PROBLEM_TYPES_CLASSIFICATION and self.eval_metric.needs_proba: @@ -1509,23 +1512,11 @@ def fit_extra( # labeled pseudo data has new labels unseen in the original train. Probably need to refit # data preprocessor if this is the case. if pseudo_data is not None: - assert isinstance(pseudo_data, pd.DataFrame) - if self.label not in pseudo_data.columns: - raise ValueError("'pseudo_data' does not contain the labeled column.") - - if self.sample_weight is not None: - raise ValueError("Applying 'sample_weight' while calling 'fit_pseudolabel' is not supported") - - X_pseudo = pseudo_data.drop(columns=[self.label]) - y_pseudo_og = pseudo_data[self.label] - X_pseudo = self._learner.transform_features(X_pseudo) - y_pseudo = self._learner.label_cleaner.transform(y_pseudo_og) - - if np.isnan(y_pseudo.unique()).any(): - raise Exception("NaN was found in the label column for pseudo labeled data." "Please ensure no NaN values in target column") + X_pseudo, y_pseudo, y_pseudo_og = self._sanitize_pseudo_data(pseudo_data=pseudo_data) else: X_pseudo = None y_pseudo = None + y_pseudo_og = None if ag_args is None: ag_args = {} @@ -1547,7 +1538,7 @@ def fit_extra( for key in hyperparameter_keys: if isinstance(key, int): highest_level = max(key, highest_level) - num_stack_levels = highest_level + num_stack_levels = highest_level - 1 # TODO: make core_kwargs a kwargs argument to predictor.fit, add aux_kwargs to predictor.fit core_kwargs = { @@ -1588,7 +1579,7 @@ def fit_extra( base_model_names=base_model_names, time_limit=time_limit, relative_stack=True, - level_end=num_stack_levels, + level_end=num_stack_levels + 1, core_kwargs=core_kwargs, aux_kwargs=aux_kwargs, name_suffix=name_suffix, @@ -1604,12 +1595,18 @@ def fit_extra( time_limit_weighted = None fit_models += self.fit_weighted_ensemble(time_limit=time_limit_weighted) + refit_full_kwargs = dict( + X_pseudo=X_pseudo, + y_pseudo=y_pseudo, + ) + self._post_fit( keep_only_best=kwargs["keep_only_best"], refit_full=kwargs["refit_full"], set_best_to_refit_full=kwargs["set_best_to_refit_full"], save_space=kwargs["save_space"], calibrate=kwargs["calibrate"], + refit_full_kwargs=refit_full_kwargs, ) self.save() return self @@ -1641,6 +1638,23 @@ def _fit_weighted_ensemble_pseudo(self): else: logger.log(15, "Weighted ensemble was not the best model for current iteration of pseudo labeling") + def _predict_pseudo(self, X_test: pd.DataFrame, use_ensemble: bool): + if use_ensemble: + if self.problem_type in PROBLEM_TYPES_CLASSIFICATION: + test_pseudo_idxes_true, y_pred_proba, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test) + else: + test_pseudo_idxes_true, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test) + y_pred_proba = y_pred.copy() + else: + if self.can_predict_proba: + y_pred_proba = self.predict_proba(data=X_test, as_multiclass=True) + y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type) + else: + y_pred = self.predict(data=X_test) + y_pred_proba = y_pred + test_pseudo_idxes_true = filter_pseudo(y_pred_proba_og=y_pred_proba, problem_type=self.problem_type) + return y_pred, y_pred_proba, test_pseudo_idxes_true + def _run_pseudolabeling( self, unlabeled_data: pd.DataFrame, @@ -1682,7 +1696,7 @@ def _run_pseudolabeling( -------- self: TabularPredictor """ - previous_score = self.info()["best_model_score_val"] + previous_score = self.leaderboard(set_refit_score_to_parent=True).set_index("model", drop=True).loc[self.model_best]["score_val"] y_pseudo_og = pd.Series() y_pred_proba_og = None if return_pred_prob: @@ -1696,44 +1710,25 @@ def _run_pseudolabeling( if len(X_test) == 0: logger.log(20, f"No more unlabeled data to pseudolabel. Done with pseudolabeling...") break + if i == 0: + y_pred, y_pred_proba, test_pseudo_idxes_true = self._predict_pseudo(X_test=X_test, use_ensemble=use_ensemble) + y_pred_proba_og = y_pred_proba iter_print = str(i + 1) - logger.log(20, f"Beginning iteration {iter_print} of pseudolabeling out of max: {max_iter}") - - if use_ensemble: - if self.problem_type in PROBLEM_TYPES_CLASSIFICATION: - test_pseudo_idxes_true, y_pred_proba, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test) - else: - test_pseudo_idxes_true, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test) - y_pred_proba = y_pred.copy() - else: - if self.can_predict_proba: - y_pred_proba = self.predict_proba(data=X_test, as_multiclass=True) - y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type) - else: - y_pred = self.predict(data=X_test) - y_pred_proba = y_pred - test_pseudo_idxes_true = filter_pseudo(y_pred_proba_og=y_pred_proba, problem_type=self.problem_type) - - if return_pred_prob: - if i == 0: - y_pred_proba_og = y_pred_proba - else: - y_pred_proba_og.loc[test_pseudo_idxes_true.index] = y_pred_proba.loc[test_pseudo_idxes_true.index] + logger.log(20, f"Beginning iteration {iter_print} of pseudolabeling out of max {max_iter}") if len(test_pseudo_idxes_true) < 1: - logger.log( - 20, f"Could not confidently assign pseudolabels for any of the provided rows in iteration: {iter_print}. Done with pseudolabeling..." - ) + logger.log(20, f"Could not confidently assign pseudolabels for any of the provided rows in iteration {iter_print}. Done with pseudolabeling...") break else: logger.log( 20, - f"Pseudolabeling algorithm confidently assigned pseudolabels to: {len(test_pseudo_idxes_true)} rows of data" - f"on iteration: {iter_print}. Adding to train data", + f"Pseudolabeling algorithm confidently assigned pseudolabels to {len(test_pseudo_idxes_true)} rows of data " + f"on iteration {iter_print}. Adding to train data", ) test_pseudo_idxes = pd.Series(data=False, index=y_pred_proba.index) + test_pseudo_idxes_false = test_pseudo_idxes[~test_pseudo_idxes.index.isin(test_pseudo_idxes_true.index)] test_pseudo_idxes[test_pseudo_idxes_true.index] = True y_pseudo_og = pd.concat([y_pseudo_og, y_pred.loc[test_pseudo_idxes_true.index]], verify_integrity=True) @@ -1745,7 +1740,7 @@ def _run_pseudolabeling( if fit_ensemble and fit_ensemble_every_iter: self._fit_weighted_ensemble_pseudo() - current_score = self.info()["best_model_score_val"] + current_score = self.leaderboard(set_refit_score_to_parent=True).set_index("model", drop=True).loc[self.model_best]["score_val"] logger.log( 20, f"Pseudolabeling algorithm changed validation score from: {previous_score}, to: {current_score}" @@ -1753,28 +1748,40 @@ def _run_pseudolabeling( ) if previous_score >= current_score: + # No improvement from pseudo labelling this iteration, stop iterating break else: # Cut down X_test to not include pseudo labeled data X_test = X_test.loc[test_pseudo_idxes[~test_pseudo_idxes].index] previous_score = current_score + # Update y_pred_proba and test_pseudo_idxes_true based on the latest pseudolabelled iteration + y_pred, y_pred_proba, test_pseudo_idxes_true = self._predict_pseudo(X_test=X_test, use_ensemble=use_ensemble) + # Update the y_pred_proba_og variable if an improvement was achieved + if return_pred_prob and test_pseudo_idxes_false is not None: + y_pred_proba_og.loc[test_pseudo_idxes_false.index] = y_pred_proba.loc[test_pseudo_idxes_false.index] + if fit_ensemble and not fit_ensemble_every_iter: self._fit_weighted_ensemble_pseudo() - if self.can_predict_proba: - y_pred_proba_og = self.predict_proba(unlabeled_data) - else: - y_pred_proba_og = self.predict(unlabeled_data) + if return_pred_prob: + if self.can_predict_proba: + y_pred_proba_og = self.predict_proba(unlabeled_data) + else: + y_pred_proba_og = self.predict(unlabeled_data) if return_pred_prob: return self, y_pred_proba_og else: return self + # TODO: `fit_ensemble` and `use_ensemble` seem redundant, and don't use calibration, making them worse than when they are disabled. + # TODO: Supporting L2+ models is very complicated. It requires predicting with the original models via `predictor.predict_proba_multi` on `pseudo_data`, + # then keeping track of these pred_proba and passing them to the appropriate models at fit time. + @apply_presets(tabular_presets_dict, tabular_presets_alias) def fit_pseudolabel( self, pseudo_data: pd.DataFrame, - max_iter: int = 5, + max_iter: int = 3, return_pred_prob: bool = False, use_ensemble: bool = False, fit_ensemble: bool = False, @@ -1782,21 +1789,53 @@ def fit_pseudolabel( **kwargs, ): """ - If 'pseudo_data' is labeled then incorporates all test_data into train_data for - newly fit models. If 'pseudo_data' is unlabeled then 'fit_pseudolabel' will self label the - data and will augment the original training data by adding all the self labeled - data that meets a criteria (For example all rows with predictive prob above 95%). If - predictor is fit then will call fit_extra with added training data, if predictor - is not fit then will fit model on train_data then run. + [Advanced] Uses additional data (`pseudo_data`) to try to achieve better model quality. + Pseudo data can come either with or without the `label` column. + + If `pseudo_data` is labeled, then models will be refit using the `pseudo_data` as additional training data. + If bagging, each fold of the bagged ensemble will use all the `pseudo_data` as additional training data. + `pseudo_data` will never be used for validation/scoring. + + If the data is unlabeled, such as providing the batched test data without ground truth available, then transductive learning is leveraged. + In transductive learning, the existing predictor will predict on `pseudo_data` + to identify the most confident rows (For example all rows with predictive probability above 95%). + These rows will then be pseudo-labelled, given the label of the most confident class. + The pseudo-labelled rows will then be used as additional training data when fitting the models. + Then, if `max_iter > 1`, this process can repeat itself, using the new models to predict on the unused `pseudo_data` rows + to see if any new rows should be used in the next iteration as training data. + We recommend specifying `return_pred_prob=True` if the data is unlabeled to get the correct prediction probabilities on the `pseudo_data`, + rather than calling `predictor.predict_proba(pseudo_data)`. + + For example: + Original fit: 10000 `train_data` rows with 10-fold bagging + Bagged fold models will use 9000 `train_data` rows for training, and 1000 for validation. + `fit_pseudolabel` is called with 5000 row labelled `pseudo_data`. + Bagged fold models are then fit again with `_PSEUDO` suffix. + 10000 train_data rows with 10-fold bagging + 5000 `pseudo_data` rows. + Bagged fold models will use 9000 `train_data` rows + 5000 `pseudo_data` rows = 14000 rows for training, and 1000 for validation. + Note: The same validation rows will be used as was done in the original fit, so that validation scores are directly comparable. + Alternatively, `fit_pseduolabel` is called with 5000 rows unlabelled `pseudo_data`. + Predictor predicts on the `pseudo_data`, finds 965 rows with confident predictions. + Set the ground truth of those 965 rows as the most confident prediction. + Bagged fold models are then fit with `_PSEUDO` suffix. + 10000 train_data rows with 10-fold bagging + 965 labelled `pseudo_data` rows. + Bagged fold models will use 9000 `train_data` rows + 965 `pseudo_data` rows = 9965 rows for training, and 1000 for validation. + Note: The same validation rows will be used as was done in the original fit, so that validation scores are directly comparable. + Repeat the process using the new pseudo-labelled predictor on the remaining `pseudo_data`. + In the example, lets assume 188 new `pseudo_data` rows have confident predictions. + Now the total labelled `pseudo_data` rows is 965 + 188 = 1153. + Then repeat the process, up to `max_iter` times: ex 10000 train_data rows with 10-fold bagging + 1153 `pseudo_data` rows. + Early stopping will trigger if validation score improvement is not observed. + + Note: pseudo_data is only used for L1 models. Support for L2+ models is not yet implemented. L2+ models will only use the original train_data. Parameters ---------- pseudo_data : str or :class:`TabularDataset` or :class:`pd.DataFrame` Extra data to incorporate into training. Pre-labeled test data allowed. If no labels - then pseudolabeling algorithm will predict and filter out which rows to incorporate into - training - max_iter: int, default = 5 - Maximum iterations of pseudolabeling allowed + then pseudo-labeling algorithm will predict and filter out which rows to incorporate into training + max_iter: int, default = 3 + Maximum iterations of pseudo-labeling allowed return_pred_prob: bool, default = False Returns held-out predictive probabilities from pseudo-labeling. If test_data is labeled then returns model's predictive probabilities. @@ -1806,12 +1845,12 @@ def fit_pseudolabel( fit_ensemble: bool, default = False If True with fit weighted ensemble model using combination of best models. Fitting weighted ensemble will be done after fitting has - being completed unless otherwise specified. If False will not fit weighted ensemble + been completed unless otherwise specified. If False will not fit weighted ensemble over models trained with pseudo labeling and models trained without it. fit_ensemble_every_iter: bool, default = False If True fits weighted ensemble model for every iteration of pseudo labeling algorithm. If False and fit_ensemble is True will fit after all pseudo labeling training is done. - kwargs: dict + **kwargs: If predictor is not already fit, then kwargs are for the functions 'fit' and 'fit_extra': Refer to parameters documentation in :meth:`TabularPredictor.fit`. Refer to parameters documentation in :meth:`TabularPredictor.fit_extra`. @@ -1828,7 +1867,8 @@ def fit_pseudolabel( self._validate_unique_indices(pseudo_data, "pseudo_data") - if not self.is_fit: + was_fit = self.is_fit + if not was_fit: if "train_data" not in kwargs.keys(): Exception( "Autogluon is required to be fit or given 'train_data' in order to run 'fit_pseudolabel'." @@ -1857,6 +1897,14 @@ def fit_pseudolabel( kwargs["hyperparameters"] = hyperparameters fit_extra_args = self._get_all_fit_extra_args() fit_extra_kwargs = {key: value for key, value in kwargs.items() if key in fit_extra_args} + + # If first fit was in this method call and `num_stack_levels` wasn't specified, reuse the number of stack levels used in the first fit. + # TODO: Consider making calculating this information easier, such as keeping track of meta-info from the latest/original fit call. + # Currently we use `stack_name == core` to figure out the number of stack levels, but this is somewhat brittle. + if "num_stack_levels" not in fit_extra_kwargs and not was_fit: + models_core: List[str] = [m for m, stack_name in self._trainer.get_models_attribute_dict(attribute="stack_name").items() if stack_name == "core"] + num_stack_levels = max(self._trainer.get_models_attribute_dict(attribute="level", models=models_core).values()) - 1 + fit_extra_kwargs["num_stack_levels"] = num_stack_levels if is_labeled: logger.log(20, "Fitting predictor using the provided pseudolabeled examples as extra training data...") self.fit_extra(pseudo_data=pseudo_data, name_suffix=PSEUDO_MODEL_SUFFIX.format(iter="")[:-1], **fit_extra_kwargs) @@ -3012,7 +3060,15 @@ def unpersist(self, models="all") -> List[str]: self._assert_is_fit("unpersist") return self._learner.load_trainer().unpersist(model_names=models) - def refit_full(self, model: str | List[str] = "all", set_best_to_refit_full: bool = True) -> Dict[str, str]: + # TODO: `total_resources = None` during refit, fix this. + # refit_full doesn't account for user-specified resources at fit time, nor does it allow them to specify for refit. + def refit_full( + self, + model: str | List[str] = "all", + set_best_to_refit_full: bool = True, + train_data_extra: pd.DataFrame = None, + **kwargs, + ) -> Dict[str, str]: """ Retrain model on all of the data (training + validation). For bagged models: @@ -3047,6 +3103,11 @@ def refit_full(self, model: str | List[str] = "all", set_best_to_refit_full: boo This means the model used when `predictor.predict(data)` is called will be the refit_full version instead of the original version of the model. Ignored if `model` is not the best model. If str, interprets as a model name and sets best model to the refit_full version of the model `set_best_to_refit_full`. + train_data_extra : pd.DataFrame, default = None + If specified, will be used as additional rows of training data when refitting models. + Requires label column. Will only be used for L1 models. + **kwargs + [Advanced] Developer debugging arguments. Returns ------- @@ -3064,7 +3125,13 @@ def refit_full(self, model: str | List[str] = "all", set_best_to_refit_full: boo "\tThis process is not bound by time_limit, but should take less time than the original `predictor.fit` call.\n" '\tTo learn more, refer to the `.refit_full` method docstring which explains how "_FULL" models differ from normal models.', ) - refit_full_dict = self._learner.refit_ensemble_full(model=model) + if train_data_extra is not None: + assert kwargs.get("X_pseudo", None) is None, f"Cannot pass both train_data_extra and X_pseudo arguments" + assert kwargs.get("y_pseudo", None) is None, f"Cannot pass both train_data_extra and y_pseudo arguments" + X_pseudo, y_pseudo, _ = self._sanitize_pseudo_data(pseudo_data=train_data_extra, name="train_data_extra") + kwargs["X_pseudo"] = X_pseudo + kwargs["y_pseudo"] = y_pseudo + refit_full_dict = self._learner.refit_ensemble_full(model=model, **kwargs) if set_best_to_refit_full: if isinstance(set_best_to_refit_full, str): @@ -3076,7 +3143,7 @@ def refit_full(self, model: str | List[str] = "all", set_best_to_refit_full: boo self._trainer.model_best = model_refit_map[model_to_set_best] # Note: model_best will be overwritten if additional training is done with new models, # since model_best will have validation score of None and any new model will have a better validation score. - # This has the side-effect of having the possibility of model_best being overwritten by a worse model than the original model_best. + # This has the side effect of having the possibility of model_best being overwritten by a worse model than the original model_best. self._trainer.save() logger.log( 20, @@ -4734,6 +4801,23 @@ def _check_if_hyperparameters_handle_text(hyperparameters: dict) -> bool: else: return False + def _sanitize_pseudo_data(self, pseudo_data: pd.DataFrame, name="pseudo_data") -> Tuple[pd.DataFrame, pd.Series, pd.Series]: + assert isinstance(pseudo_data, pd.DataFrame) + if self.label not in pseudo_data.columns: + raise ValueError(f"'{name}' does not contain the labeled column.") + + if self.sample_weight is not None: + raise ValueError(f"Applying 'sample_weight' with {name} is not supported.") + + X_pseudo = pseudo_data.drop(columns=[self.label]) + y_pseudo_og = pseudo_data[self.label] + X_pseudo = self._learner.transform_features(X_pseudo) + y_pseudo = self._learner.label_cleaner.transform(y_pseudo_og) + + if np.isnan(y_pseudo.unique()).any(): + raise Exception(f"NaN was found in the label column for {name}." "Please ensure no NaN values in target column") + return X_pseudo, y_pseudo, y_pseudo_og + def _assert_is_fit(self, message_suffix: str = None): if not self.is_fit: error_message = "Predictor is not fit. Call `.fit` before calling"
diff --git a/tabular/tests/unittests/test_tabular.py b/tabular/tests/unittests/test_tabular.py --- a/tabular/tests/unittests/test_tabular.py +++ b/tabular/tests/unittests/test_tabular.py @@ -306,7 +306,18 @@ def test_advanced_functionality(): predictor.delete_models(models_to_keep=[]) # Test that dry-run doesn't delete models assert len(predictor.model_names()) == num_models * 2 predictor.predict(data=test_data) - predictor.delete_models(models_to_keep=[], dry_run=False) # Test that dry-run deletes models + + # Test refit_full with train_data_extra argument + refit_full_models = list(predictor.model_refit_map().values()) + predictor.delete_models(models_to_delete=refit_full_models, dry_run=False) + assert len(predictor.model_names()) == num_models + assert predictor.model_refit_map() == dict() + predictor.refit_full(train_data_extra=test_data) # train_data_extra argument + assert len(predictor.model_names()) == num_models * 2 + assert len(predictor.model_refit_map()) == num_models + predictor.predict(data=test_data) + + predictor.delete_models(models_to_keep=[], dry_run=False) # Test that dry_run=False deletes models assert len(predictor.model_names()) == 0 assert len(predictor.leaderboard()) == 0 assert len(predictor.leaderboard(extra_info=True)) == 0
[BUG] Document fit_pseudolabel() and fix compatibility with refit_full() **Bug Report Checklist** <!-- Please ensure at least one of the following to help the developers troubleshoot the problem: --> - [ x] I provided code that demonstrates a minimal reproducible example. <!-- Ideal, especially via source install --> - [ ] I confirmed bug exists on the latest mainline of AutoGluon via source install. <!-- Preferred --> - [ x] I confirmed bug exists on the latest stable version of AutoGluon. <!-- Unnecessary if prior items are checked --> **Describe the bug** <!-- A clear and concise description of what the bug is. --> Two issues regarding [fit_pseudolabel](https://auto.gluon.ai/stable/api/autogluon.tabular.TabularPredictor.fit_pseudolabel.html): - Could you please add at least couple sentences sketching out the basic approach that AG Tabular takes when leveraging pseudolabeled data in [the API documentation](https://auto.gluon.ai/stable/api/autogluon.tabular.TabularPredictor.fit_pseudolabel.html)? For instance, how is train_data used and how is pseudo_data used? - Secondly, there appears to be a bug with the interaction with refit_full(). This doesn't generate an error, but my accuracy metrics show that it completely lose the benefits of the pseudolabels. Attached please find three scripts which I have run against the dataset "%SC%ProductSafety%RegulatoryEngagement%US_Multiple_BathroomShelves", which the AutoGluon team has access to: 1. minimal_demo_500_labels_best_quality_no_pseudo.ipynb shows baseline performance with 500 ground-truth labels of [bug_report.zip](https://github.com/autogluon/autogluon/files/14118032/bug_report.zip) ``` log_loss:0.18833911123873076 roc_auc_score:0.7489790735740716 AUPRC:0.1754345219105115 ``` 2. minimal_demo_500_labels_best_quality_with_pseudo.ipynb shows the same with the addition of 8824 pseudolabeled labeled rows into which we have introduced a 2% error rate. With the addition of this data, we now have the following improved metrics: ``` log_loss:0.1609988051772629 roc_auc_score:0.8246199979999895 AUPRC:0.3470127101871933 ``` 3. On the other hand, minimal_demo_500_labels_best_quality_with_pseudo_with_separate_refit_full.ipynb shows us first using fit_pseudolabel as above and getting the same level of accuracy. We then call `predictor.refit_full()` and the benefit of the pseudolabeled data completely disappear. In fact, metrics are worse than the baseline. Metrics are now: ``` log_loss:0.19120844475255883 roc_auc_score:0.7376429612787436 AUPRC:0.164376187655637 ```
Thanks for submitting! We will track and prioritize a fix and doc improvements.
2024-02-16T22:19:59Z
[]
[]
autogluon/autogluon
3,937
autogluon__autogluon-3937
[ "3853" ]
97a724302cdd62842cbbb5aa840e209b9686aeed
diff --git a/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py b/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py --- a/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +++ b/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py @@ -393,6 +393,7 @@ def _fit( self._check_fit_params() # update auxiliary parameters init_args = self._get_estimator_init_args() + keep_lightning_logs = init_args.pop("keep_lightning_logs", False) callbacks = self._get_callbacks( time_limit=time_limit, early_stopping_patience=None if val_data is None else init_args["early_stopping_patience"], @@ -411,7 +412,7 @@ def _fit( self.gts_predictor.batch_size = init_args["predict_batch_size"] lightning_logs_dir = Path(self.path) / "lightning_logs" - if lightning_logs_dir.exists() and lightning_logs_dir.is_dir(): + if not keep_lightning_logs and lightning_logs_dir.exists() and lightning_logs_dir.is_dir(): logger.debug(f"Removing lightning_logs directory {lightning_logs_dir}") shutil.rmtree(lightning_logs_dir) diff --git a/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py b/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py --- a/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py +++ b/timeseries/src/autogluon/timeseries/models/gluonts/torch/models.py @@ -1,6 +1,7 @@ """ Module including wrappers for PyTorch implementations of models in GluonTS """ + import logging from typing import Any, Dict, Type @@ -78,6 +79,8 @@ class DeepARModel(AbstractGluonTSModel): Optional keyword arguments passed to ``lightning.Trainer``. early_stopping_patience : int or None, default = 20 Early stop training if the validation loss doesn't improve for this many epochs. + keep_lightning_logs : bool, default = False + If True, ``lightning_logs`` directory will NOT be removed after the model finished training. """ supports_known_covariates = True @@ -131,6 +134,8 @@ class SimpleFeedForwardModel(AbstractGluonTSModel): Optional keyword arguments passed to ``lightning.Trainer``. early_stopping_patience : int or None, default = 20 Early stop training if the validation loss doesn't improve for this many epochs. + keep_lightning_logs : bool, default = False + If True, ``lightning_logs`` directory will NOT be removed after the model finished training. """ def _get_estimator_class(self) -> Type[GluonTSEstimator]: @@ -188,6 +193,8 @@ class TemporalFusionTransformerModel(AbstractGluonTSModel): Optional keyword arguments passed to ``lightning.Trainer``. early_stopping_patience : int or None, default = 20 Early stop training if the validation loss doesn't improve for this many epochs. + keep_lightning_logs : bool, default = False + If True, ``lightning_logs`` directory will NOT be removed after the model finished training. """ supports_known_covariates = True @@ -254,6 +261,8 @@ class DLinearModel(AbstractGluonTSModel): Early stop training if the validation loss doesn't improve for this many epochs. weight_decay : float, default = 1e-8 Weight decay regularization parameter. + keep_lightning_logs : bool, default = False + If True, ``lightning_logs`` directory will NOT be removed after the model finished training. """ @property @@ -306,6 +315,8 @@ class PatchTSTModel(AbstractGluonTSModel): Learning rate used during training weight_decay : float, default = 1e-8 Weight decay regularization parameter. + keep_lightning_logs : bool, default = False + If True, ``lightning_logs`` directory will NOT be removed after the model finished training. """ @property @@ -377,6 +388,8 @@ class WaveNetModel(AbstractGluonTSModel): Early stop training if the validation loss doesn't improve for this many epochs. weight_decay : float, default = 1e-8 Weight decay regularization parameter. + keep_lightning_logs : bool, default = False + If True, ``lightning_logs`` directory will NOT be removed after the model finished training. """ supports_known_covariates = True
diff --git a/timeseries/tests/unittests/models/test_gluonts.py b/timeseries/tests/unittests/models/test_gluonts.py --- a/timeseries/tests/unittests/models/test_gluonts.py +++ b/timeseries/tests/unittests/models/test_gluonts.py @@ -1,3 +1,4 @@ +from pathlib import Path from unittest import mock import numpy as np @@ -309,3 +310,22 @@ def test_when_custom_trainer_kwargs_given_then_trainer_receives_them(): received_trainer_kwargs = catch_trainer_kwargs(model) for k, v in trainer_kwargs.items(): assert received_trainer_kwargs[k] == v + + +def test_when_model_finishes_training_then_logs_are_removed(temp_model_path): + model = TemporalFusionTransformerModel( + freq=DUMMY_TS_DATAFRAME.freq, path=temp_model_path, hyperparameters=DUMMY_HYPERPARAMETERS + ) + model.fit(train_data=DUMMY_TS_DATAFRAME) + assert not (Path(model.path) / "lightning_logs").exists() + + [email protected]("keep_lightning_logs", [True, False]) +def test_when_keep_lightning_logs_set_then_logs_are_not_removed(keep_lightning_logs, temp_model_path): + model = DeepARModel( + freq=DUMMY_TS_DATAFRAME.freq, + path=temp_model_path, + hyperparameters={"keep_lightning_logs": keep_lightning_logs, **DUMMY_HYPERPARAMETERS}, + ) + model.fit(train_data=DUMMY_TS_DATAFRAME) + assert (Path(model.path) / "lightning_logs").exists() == keep_lightning_logs
Feature Request: Make Deletion of lightning_logs Directory Optional in TimeSeries Models ## Description The current implementation of AutoGluon's time series models, specifically those using the GluonTS torch backend, automatically deletes the `lightning_logs` directory after each training run. This directory contains logs that are essential for users who utilize TensorBoard to monitor and compare different training sessions. The automatic deletion of these logs makes it difficult to use TensorBoard effectively, as it relies on historical log data for comparison. ## Proposal I propose a feature for the `timeseries` module where the deletion of the `lightning_logs` directory is made optional. This could be implemented by adding a parameter to the model training functions and the `fit()` method, allowing users to choose whether to preserve the logs. By default, this parameter could be `True` to maintain the current behavior, but when set to `False`, it would keep the logs intact for further analysis with TensorBoard. ## Code this code is responsible for deletion of the logs: if lightning_logs_dir.exists() and lightning_logs_dir.is_dir(): logger.debug(f"Removing lightning_logs directory {lightning_logs_dir}") shutil.rmtree(lightning_logs_dir)
2024-02-20T10:36:50Z
[]
[]
autogluon/autogluon
3,977
autogluon__autogluon-3977
[ "3946" ]
c701aef777e536478c615d5c493971d58808dbab
diff --git a/core/src/autogluon/core/calibrate/_decision_threshold.py b/core/src/autogluon/core/calibrate/_decision_threshold.py --- a/core/src/autogluon/core/calibrate/_decision_threshold.py +++ b/core/src/autogluon/core/calibrate/_decision_threshold.py @@ -4,6 +4,7 @@ from typing import Callable, List, Union import numpy as np +import pandas as pd from ..constants import BINARY from ..metrics import Scorer @@ -20,13 +21,29 @@ def calibrate_decision_threshold( metric: Union[Callable, Scorer], metric_kwargs: dict | None = None, decision_thresholds: Union[int, List[float]] = 50, + subsample_size: int | None = None, + seed: int = 0, metric_name: str | None = None, verbose: bool = True, ) -> float: problem_type = BINARY + + if isinstance(y, pd.Series): + y = y.values + if isinstance(y_pred_proba, pd.Series): + y_pred_proba = y_pred_proba.values assert len(y_pred_proba.shape) == 1 assert len(y.shape) == 1 - assert len(y) == len(y_pred_proba) + + num_samples_total = len(y) + assert num_samples_total == len(y_pred_proba) + + if subsample_size is not None and subsample_size < num_samples_total: + logger.log(20, f"Subsampling y to {subsample_size} samples to speedup threshold calibration...") + rng = np.random.default_rng(seed=seed) + subsample_indices = rng.choice(num_samples_total, subsample_size, replace=False) + y = y[subsample_indices] + y_pred_proba = y_pred_proba[subsample_indices] if metric_kwargs is None: metric_kwargs = dict() diff --git a/core/src/autogluon/core/models/greedy_ensemble/ensemble_selection.py b/core/src/autogluon/core/models/greedy_ensemble/ensemble_selection.py --- a/core/src/autogluon/core/models/greedy_ensemble/ensemble_selection.py +++ b/core/src/autogluon/core/models/greedy_ensemble/ensemble_selection.py @@ -1,8 +1,12 @@ +from __future__ import annotations + import logging import time from collections import Counter +from typing import List import numpy as np +import pandas as pd from ...constants import PROBLEM_TYPES from ...metrics import log_loss @@ -35,6 +39,7 @@ def __init__( sorted_initialization: bool = False, bagging: bool = False, tie_breaker: str = "random", + subsample_size: int | None = None, random_state: np.random.RandomState = None, **kwargs, ): @@ -47,13 +52,14 @@ def __init__( if tie_breaker not in ["random", "second_metric"]: raise ValueError(f"Unknown tie_breaker value: {tie_breaker}. Must be one of: ['random', 'second_metric']") self.tie_breaker = tie_breaker + self.subsample_size = subsample_size if random_state is not None: self.random_state = random_state else: self.random_state = np.random.RandomState(seed=0) self.quantile_levels = kwargs.get("quantile_levels", None) - def fit(self, predictions, labels, time_limit=None, identifiers=None, sample_weight=None): + def fit(self, predictions: List[np.ndarray], labels: np.ndarray, time_limit=None, identifiers=None, sample_weight=None): self.ensemble_size = int(self.ensemble_size) if self.ensemble_size < 1: raise ValueError("Ensemble size cannot be less than one!") @@ -69,13 +75,23 @@ def fit(self, predictions, labels, time_limit=None, identifiers=None, sample_wei return self # TODO: Consider having a removal stage, remove each model and see if score is affected, if improves or not effected, remove it. - def _fit(self, predictions, labels, time_limit=None, sample_weight=None): + def _fit(self, predictions: List[np.ndarray], labels: np.ndarray, time_limit=None, sample_weight=None): ensemble_size = self.ensemble_size + if isinstance(labels, pd.Series): + labels = labels.values self.num_input_models_ = len(predictions) ensemble = [] trajectory = [] order = [] used_models = set() + num_samples_total = len(labels) + + if self.subsample_size is not None and self.subsample_size < num_samples_total: + logger.log(15, f"Subsampling to {self.subsample_size} samples to speedup ensemble selection...") + subsample_indices = self.random_state.choice(num_samples_total, self.subsample_size, replace=False) + labels = labels[subsample_indices] + for i in range(self.num_input_models_): + predictions[i] = predictions[i][subsample_indices] # if self.sorted_initialization: # n_best = 20 @@ -190,7 +206,7 @@ def _fit(self, predictions, labels, time_limit=None, sample_weight=None): logger.debug("Ensemble indices: " + str(self.indices_)) - def _calculate_regret(self, y_true, y_pred_proba, metric, sample_weight=None): + def _calculate_regret(self, y_true: np.ndarray, y_pred_proba: np.ndarray, metric, sample_weight=None): if metric.needs_pred or metric.needs_quantile: preds = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=self.problem_type) else: diff --git a/core/src/autogluon/core/models/greedy_ensemble/greedy_weighted_ensemble_model.py b/core/src/autogluon/core/models/greedy_ensemble/greedy_weighted_ensemble_model.py --- a/core/src/autogluon/core/models/greedy_ensemble/greedy_weighted_ensemble_model.py +++ b/core/src/autogluon/core/models/greedy_ensemble/greedy_weighted_ensemble_model.py @@ -18,7 +18,10 @@ def __init__(self, base_model_names=None, model_base=EnsembleSelection, **kwargs self.weights_ = None def _set_default_params(self): - default_params = {"ensemble_size": 100} + default_params = { + "ensemble_size": 25, # TabRepo reports that values above 25 lead to no measurable improvement. + "subsample_size": 1000000, # subsample to this many rows if training row count exceeds this value to speed up training + } for param, val in default_params.items(): self._set_default_param_value(param, val) diff --git a/core/src/autogluon/core/trainer/abstract_trainer.py b/core/src/autogluon/core/trainer/abstract_trainer.py --- a/core/src/autogluon/core/trainer/abstract_trainer.py +++ b/core/src/autogluon/core/trainer/abstract_trainer.py @@ -1290,15 +1290,7 @@ def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled= else: can_refit_full = model._get_tags().get("can_refit_full", False) reuse_first_fold = not can_refit_full - if reuse_first_fold: - # Perform fallback black-box refit logic that doesn't retrain. - model_full = model.convert_to_refit_full_via_copy() - # FIXME: validation time not correct for infer 1 batch time, needed to hack _is_refit=True to fix - logger.log(20, f"Fitting model: {model_full.name} | Skipping fit via cloning parent ...") - self._add_model(model_full, stack_name=REFIT_FULL_NAME, level=level, _is_refit=True) - self.save_model(model_full) - models_trained = [model_full.name] - else: + if not reuse_first_fold: model_full = model.convert_to_refit_full_template() # Mitigates situation where bagged models barely had enough memory and refit requires more. Worst case results in OOM, but this lowers chance of failure. model_full._user_params_aux["max_memory_usage_ratio"] = model.params_aux["max_memory_usage_ratio"] * 1.15 @@ -1326,6 +1318,30 @@ def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled= refit_full=True, **kwargs, ) + if len(models_trained) == 0: + reuse_first_fold = True + logger.log( + 30, + f"WARNING: Refit training failure detected for '{model_name}'... " + f"Falling back to using first fold to avoid downstream exception." + f"\n\tThis is likely due to an out-of-memory error or other memory related issue. " + f"\n\tPlease create a GitHub issue if this was triggered from a non-memory related problem.", + ) + if not model.params.get("save_bag_folds", True): + raise AssertionError( + f"Cannot avoid training failure during refit for '{model_name}' by falling back to " + f"copying the first fold because it does not exist! (save_bag_folds=False)" + f"\n\tPlease specify `save_bag_folds=True` in the `.fit` call to avoid this exception." + ) + + if reuse_first_fold: + # Perform fallback black-box refit logic that doesn't retrain. + model_full = model.convert_to_refit_full_via_copy() + # FIXME: validation time not correct for infer 1 batch time, needed to hack _is_refit=True to fix + logger.log(20, f"Fitting model: {model_full.name} | Skipping fit via cloning parent ...") + self._add_model(model_full, stack_name=REFIT_FULL_NAME, level=level, _is_refit=True) + self.save_model(model_full) + models_trained = [model_full.name] if len(models_trained) == 1: model_refit_map[model_name] = models_trained[0] for model_trained in models_trained: @@ -3759,6 +3775,7 @@ def calibrate_decision_threshold( weights=None, decision_thresholds: int | List[float] = 50, verbose: bool = True, + **kwargs, ) -> float: # TODO: Docstring assert self.problem_type == BINARY, f'calibrate_decision_threshold is only available for `problem_type="{BINARY}"`' @@ -3817,4 +3834,5 @@ def calibrate_decision_threshold( decision_thresholds=decision_thresholds, metric_name=metric.name, verbose=verbose, + **kwargs, ) diff --git a/core/src/autogluon/core/utils/utils.py b/core/src/autogluon/core/utils/utils.py --- a/core/src/autogluon/core/utils/utils.py +++ b/core/src/autogluon/core/utils/utils.py @@ -290,9 +290,9 @@ def get_pred_from_proba(y_pred_proba: np.ndarray, problem_type: str = BINARY, de if len(y_pred_proba.shape) == 2: assert y_pred_proba.shape[1] == 2 # Assume positive class is in 2nd position - y_pred = [1 if pred > decision_threshold else 0 for pred in y_pred_proba[:, 1]] + y_pred = (y_pred_proba[:, 1] > decision_threshold).astype(int) else: - y_pred = [1 if pred > decision_threshold else 0 for pred in y_pred_proba] + y_pred = (y_pred_proba > decision_threshold).astype(int) elif problem_type == REGRESSION: y_pred = y_pred_proba elif problem_type == QUANTILE: diff --git a/tabular/src/autogluon/tabular/configs/presets_configs.py b/tabular/src/autogluon/tabular/configs/presets_configs.py --- a/tabular/src/autogluon/tabular/configs/presets_configs.py +++ b/tabular/src/autogluon/tabular/configs/presets_configs.py @@ -5,7 +5,7 @@ # Aliases: best best_quality={ "auto_stack": True, - "dynamic_stacking": True, + "dynamic_stacking": "auto", "num_bag_sets": 1, "hyperparameters": "zeroshot", "time_limit": 3600, @@ -15,26 +15,26 @@ # Aliases: high high_quality={ "auto_stack": True, - "dynamic_stacking": True, + "dynamic_stacking": "auto", "num_bag_sets": 1, "hyperparameters": "zeroshot", "time_limit": 3600, "refit_full": True, "set_best_to_refit_full": True, - "_save_bag_folds": False, + "save_bag_folds": False, }, # Good predictive accuracy with very fast inference. ~4x faster training, ~8x faster inference and ~8x lower disk usage than `high_quality`. - # Recommended for applications that require veru fast inference speed. + # Recommended for applications that require very fast inference speed. # Aliases: good good_quality={ "auto_stack": True, - "dynamic_stacking": True, + "dynamic_stacking": "auto", "num_bag_sets": 1, "hyperparameters": "light", "time_limit": 3600, "refit_full": True, "set_best_to_refit_full": True, - "_save_bag_folds": False, + "save_bag_folds": False, }, # Medium predictive accuracy with very fast inference and very fast training time. ~20x faster training than `good_quality`. # This is the default preset in AutoGluon, but should generally only be used for quick prototyping, as `good_quality` results in significantly better predictive accuracy with similar inference time. @@ -67,10 +67,10 @@ best_quality_v082={"auto_stack": True}, # High predictive accuracy with fast inference. ~10x-200x faster inference and ~10x-200x lower disk usage than `best_quality`. # Recommended for applications that require reasonable inference speed and/or model size. - high_quality_v082={"auto_stack": True, "refit_full": True, "set_best_to_refit_full": True, "_save_bag_folds": False}, + high_quality_v082={"auto_stack": True, "refit_full": True, "set_best_to_refit_full": True, "save_bag_folds": False}, # Good predictive accuracy with very fast inference. ~4x faster inference and ~4x lower disk usage than `high_quality`. # Recommended for applications that require fast inference speed. - good_quality_v082={"auto_stack": True, "refit_full": True, "set_best_to_refit_full": True, "_save_bag_folds": False, "hyperparameters": "light"}, + good_quality_v082={"auto_stack": True, "refit_full": True, "set_best_to_refit_full": True, "save_bag_folds": False, "hyperparameters": "light"}, # ------------------------------------------ # Experimental presets. Only use these presets if you are ok with unstable and potentially poor performing presets. # Experimental presets can be removed or changed without warning. diff --git a/tabular/src/autogluon/tabular/learner/abstract_learner.py b/tabular/src/autogluon/tabular/learner/abstract_learner.py --- a/tabular/src/autogluon/tabular/learner/abstract_learner.py +++ b/tabular/src/autogluon/tabular/learner/abstract_learner.py @@ -600,8 +600,8 @@ def score_debug( data={ "model": model_names_final, "score_test": list(scores.values()), - "pred_time_test": [pred_time_test[model] for model in model_names_final], - "pred_time_test_marginal": [pred_time_test_marginal[model] for model in model_names_final], + "pred_time_test": [pred_time_test.get(model, np.nan) for model in model_names_final], + "pred_time_test_marginal": [pred_time_test_marginal.get(model, np.nan) for model in model_names_final], } ) if df_extra_scores is not None: @@ -879,8 +879,12 @@ def leaderboard( inplace=True, ) if "metric_error_test" in leaderboard: - leaderboard["metric_error_test"] = leaderboard["metric_error_test"].apply(self.eval_metric.convert_score_to_error) - leaderboard["metric_error_val"] = leaderboard["metric_error_val"].apply(self.eval_metric.convert_score_to_error) + leaderboard.loc[leaderboard["metric_error_test"].notnull(), "metric_error_test"] = leaderboard.loc[ + leaderboard["metric_error_test"].notnull(), "metric_error_test" + ].apply(self.eval_metric.convert_score_to_error) + leaderboard.loc[leaderboard["metric_error_val"].notnull(), "metric_error_val"] = leaderboard.loc[ + leaderboard["metric_error_val"].notnull(), "metric_error_val" + ].apply(self.eval_metric.convert_score_to_error) if display: with pd.option_context("display.max_rows", None, "display.max_columns", None, "display.width", 1000): print(leaderboard) @@ -1040,6 +1044,7 @@ def calibrate_decision_threshold( model: str = "best", decision_thresholds: int | List[float] = 50, verbose: bool = True, + **kwargs, ) -> float: # TODO: docstring if metric is None: @@ -1056,7 +1061,14 @@ def calibrate_decision_threshold( y = self.transform_labels(y=data[self.label]) return self.load_trainer().calibrate_decision_threshold( - X=X, y=y, metric=metric, model=model, weights=weights, decision_thresholds=decision_thresholds, verbose=verbose + X=X, + y=y, + metric=metric, + model=model, + weights=weights, + decision_thresholds=decision_thresholds, + verbose=verbose, + **kwargs, ) # TODO: Add data info gathering at beginning of .fit() that is used by all learners to add to get_info output diff --git a/tabular/src/autogluon/tabular/predictor/predictor.py b/tabular/src/autogluon/tabular/predictor/predictor.py --- a/tabular/src/autogluon/tabular/predictor/predictor.py +++ b/tabular/src/autogluon/tabular/predictor/predictor.py @@ -403,7 +403,7 @@ def fit( fit_weighted_ensemble: bool = True, fit_full_last_level_weighted_ensemble: bool = True, full_weighted_ensemble_additionally: bool = False, - dynamic_stacking: bool = False, + dynamic_stacking: bool | str = False, calibrate_decision_threshold: bool = False, num_cpus="auto", num_gpus="auto", @@ -446,11 +446,11 @@ def fit( Best predictive accuracy with little consideration to inference time or disk usage. Achieve even better results by specifying a large time_limit value. Recommended for applications that benefit from the best possible model accuracy. - high_quality={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False} + high_quality={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, 'save_bag_folds': False} High predictive accuracy with fast inference. ~10x-200x faster inference and ~10x-200x lower disk usage than `best_quality`. Recommended for applications that require reasonable inference speed and/or model size. - good_quality={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False, 'hyperparameters': 'light'} + good_quality={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, 'save_bag_folds': False, 'hyperparameters': 'light'} Good predictive accuracy with very fast inference. ~4x faster inference and ~4x lower disk usage than `high_quality`. Recommended for applications that require fast inference speed. @@ -660,14 +660,16 @@ def fit( If True, AutoGluon will fit two WeightedEnsembleModels after training all stacking levels. Setting this to True, simulates calling `fit_weighted_ensemble()` after calling `fit()`. Has no affect if `fit_full_last_level_weighted_ensemble` is False and does not fit an additional WeightedEnsembleModel if stacking is disabled. - dynamic_stacking: bool, default = False + dynamic_stacking: bool | str, default = False If True and `num_stack_levels` > 0, AutoGluon will dynamically determine whether to use stacking or not by first validating AutoGluon's stacking behavior. This is done to avoid so-called stacked overfitting that can make traditional multi-layer stacking, as used in AutoGluon, fail drastically and produce unreliable validation scores. - It is recommended to keep this value set to True when using stacking, as long as it is unknown whether the data is affected by stacked overfitting. + It is recommended to keep this value set to True or "auto" when using stacking, + as long as it is unknown whether the data is affected by stacked overfitting. If it is known that the data is unaffected by stacked overfitting, then setting this value to False is expected to maximize predictive quality. If enabled, by default, AutoGluon performs dynamic stacking by spending 25% of the provided time limit for detection and all remaining time for fitting AutoGluon. This can be adjusted by specifying `ds_args` with different parameters to `fit()`. + If "auto", will be set to `not use_bag_holdout`. See the documentation of `ds_args` for more information. calibrate_decision_threshold : bool, default = False [Experimental] This may be removed / changed without warning in a future release. @@ -710,12 +712,13 @@ def fit( Default value (if None) is selected based on the number of rows in the training data. Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows. Default value is doubled if `hyperparameter_tune_kwargs` is set, up to a maximum of 0.2. Disabled if `num_bag_folds >= 2` unless `use_bag_holdout == True`. - use_bag_holdout : bool, default = False + use_bag_holdout : bool | str, default = False If True, a `holdout_frac` portion of the data is held-out from model bagging. This held-out data is only used to score models and determine weighted ensemble weights. Enable this if there is a large gap between score_val and score_test in stack models. Note: If `tuning_data` was specified, `tuning_data` is used as the holdout data. Disabled if not bagging. + If "auto", will be set to True if the training data has >= 1000000 rows, else it will be set to False. hyperparameter_tune_kwargs : str or dict, default = None Hyperparameter tuning strategy and kwargs (for example, how many HPO trials to run). If None, then hyperparameter tuning will not be performed. @@ -836,6 +839,16 @@ def fit( If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model. Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model. The time taken by this process is not enforced by `time_limit`. + save_bag_folds : bool, default = True + If True, will save the bagged fold models to disk. + If False, will not save the bagged fold models, only keeping their metadata and out-of-fold predictions. + Note: The bagged models will not be available for prediction, only use this if you intend to call `refit_full`. + The purpose of setting it to False is that it greatly decreases the peak disk usage of the predictor during the fit call when bagging. + Note that this makes refit_full slightly more likely to crash in scenarios where the dataset is large relative to available system memory. + This is because by default, refit_full will fall back to cloning the first fold of the bagged model in case it lacks memory to refit. + However, if `save_bag_folds=False`, this fallback isn't possible, as there is not fold model to clone because it wasn't saved. + In this scenario, refit will raise an exception for `save_bag_folds=False`, but will succeed if `save_bag_folds=True`. + Final disk usage of predictor will be identical regardless of the setting after `predictor.delete_models(models_to_keep="best", dry_run=False)` is called post-fit. set_best_to_refit_full : bool, default = False If True, will change the default model that Predictor uses for prediction when model is not specified to the refit_full version of the model that exhibited the highest validation score. Only valid if `refit_full` is set. @@ -986,8 +999,6 @@ def fit( if isinstance(hyperparameters, str): hyperparameters = get_hyperparameter_config(hyperparameters) - # TODO: Hyperparam could have non-serializble objects. Save as pkl and loaded on demand - # in case the hyperprams are large in memory self.fit_hyperparameters_ = hyperparameters if "enable_raw_text_features" not in feature_generator_init_kwargs: @@ -1003,7 +1014,7 @@ def fit( else: inferred_problem_type = self._learner.infer_problem_type(y=train_data[self.label], silent=True) - num_bag_folds, num_bag_sets, num_stack_levels, dynamic_stacking = self._sanitize_stack_args( + num_bag_folds, num_bag_sets, num_stack_levels, dynamic_stacking, use_bag_holdout = self._sanitize_stack_args( num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets, num_stack_levels=num_stack_levels, @@ -1012,6 +1023,7 @@ def fit( num_train_rows=len(train_data), problem_type=inferred_problem_type, dynamic_stacking=dynamic_stacking, + use_bag_holdout=use_bag_holdout, ) if auto_stack: logger.log( @@ -1023,16 +1035,32 @@ def fit( if holdout_frac is None: holdout_frac = default_holdout_frac(len(train_data), ag_args.get("hyperparameter_tune_kwargs", None) is not None) - if kwargs["_save_bag_folds"] is not None: - if use_bag_holdout and not kwargs["_save_bag_folds"]: + if kwargs["save_bag_folds"] is not None and kwargs["_save_bag_folds"] is not None: + raise ValueError( + f"Cannot specify both `save_bag_folds` and `_save_bag_folds` at the same time. " + f"(save_bag_folds={kwargs['save_bag_folds']}, _save_bag_folds={kwargs['_save_bag_folds']}" + ) + elif kwargs["_save_bag_folds"] is not None: + kwargs["save_bag_folds"] = kwargs["_save_bag_folds"] + + if kwargs["save_bag_folds"] is not None: + assert isinstance(kwargs["save_bag_folds"], bool), f"save_bag_folds must be a bool, found: {type(kwargs['save_bag_folds'])}" + if use_bag_holdout and not kwargs["save_bag_folds"]: logger.log( 30, f"WARNING: Attempted to disable saving of bagged fold models when `use_bag_holdout=True`. Forcing `save_bag_folds=True` to avoid errors.", ) else: + if num_bag_folds > 0 and not kwargs["save_bag_folds"]: + logger.log( + 20, + f"Note: `save_bag_folds=False`! This will greatly reduce peak disk usage during fit (by ~{num_bag_folds}x), " + f"but runs the risk of an out-of-memory error during model refit if memory is small relative to the data size.\n" + f"\tYou can avoid this risk by setting `save_bag_folds=True`.", + ) if ag_args_ensemble is None: ag_args_ensemble = {} - ag_args_ensemble["save_bag_folds"] = kwargs["_save_bag_folds"] + ag_args_ensemble["save_bag_folds"] = kwargs["save_bag_folds"] if time_limit is None: mb_mem_usage_train_data = get_approximate_df_mem_usage(train_data, sample_ratio=0.2).sum() / 1e6 @@ -3346,6 +3374,7 @@ def calibrate_decision_threshold( metric: str | Scorer | None = None, model: str = "best", decision_thresholds: int | List[float] = 50, + subsample_size: int | None = 1000000, verbose: bool = True, ) -> float: """ @@ -3374,6 +3403,9 @@ def calibrate_decision_threshold( The number of decision thresholds on either side of `0.5` to search. The default of 50 will result in 101 searched thresholds: [0.00, 0.01, 0.02, ..., 0.49, 0.50, 0.51, ..., 0.98, 0.99, 1.00] Alternatively, a list of decision thresholds can be passed and only the thresholds in the list will be searched. + subsample_size : int | None, default = 1000000 + When `subsample_size` is not None and `data` contains more rows than `subsample_size`, samples to `subsample_size` rows to speed up calibration. + Usually it is not necessary to use more than 1 million rows for calibration. verbose : bool, default = True If True, will log information about the calibration process. @@ -3382,12 +3414,9 @@ def calibrate_decision_threshold( Decision Threshold: A float between 0 and 1 defining the decision boundary for predictions that maximizes the `metric` score on the `data` for the `model`. """ - # TODO: v0.8 - # add tutorial section - # - # TODO: v0.9 + # TODO: v1.2 # Calculate optimal threshold for each model separately when deciding best model - # sampling/time limit + # time limit # update validation scores of models based on threshold # speed up the logic / search for optimal threshold more efficiently # make threshold calibration part of internal optimization, such as during fit_weighted_ensemble. @@ -3406,7 +3435,14 @@ def calibrate_decision_threshold( if model == "best": model = self.model_best - return self._learner.calibrate_decision_threshold(data=data, metric=metric, model=model, decision_thresholds=decision_thresholds, verbose=verbose) + return self._learner.calibrate_decision_threshold( + data=data, + metric=metric, + model=model, + decision_thresholds=decision_thresholds, + subsample_size=subsample_size, + verbose=verbose, + ) def predict_oof(self, model: str = None, *, transformed=False, train_data=None, internal_oof=False, decision_threshold=None, can_infer=None) -> pd.Series: """ @@ -4354,6 +4390,7 @@ def _fit_extra_kwargs_dict(self): keep_only_best=False, save_space=False, refit_full=False, + save_bag_folds=None, # other verbosity=self.verbosity, feature_prune_kwargs=None, @@ -4562,27 +4599,42 @@ def _set_feature_generator(self, feature_generator="auto", feature_metadata=None feature_generator=feature_generator, feature_metadata=feature_metadata, init_kwargs=init_kwargs ) - def _sanitize_stack_args(self, num_bag_folds, num_bag_sets, num_stack_levels, time_limit, auto_stack, num_train_rows, problem_type, dynamic_stacking): + def _sanitize_stack_args( + self, + num_bag_folds: int, + num_bag_sets: int, + num_stack_levels: int, + time_limit: float | None, + auto_stack: bool, + num_train_rows: int, + problem_type: str, + dynamic_stacking: bool | str, + use_bag_holdout: bool | str, + ): + use_bag_holdout_auto_threshold = 1000000 + use_bag_holdout_was_auto = False + dynamic_stacking_was_auto = False + if isinstance(use_bag_holdout, str) and use_bag_holdout == "auto": + # Leverage use_bag_holdout when data is large to safeguard against stack leakage + use_bag_holdout = num_train_rows >= use_bag_holdout_auto_threshold + use_bag_holdout_was_auto = True + if isinstance(dynamic_stacking, str) and dynamic_stacking == "auto": + dynamic_stacking = not use_bag_holdout + dynamic_stacking_was_auto = True if auto_stack: # TODO: What about datasets that are 100k+? At a certain point should we not bag? # TODO: What about time_limit? Metalearning can tell us expected runtime of each model, then we can select optimal folds + stack levels to fit time constraint if num_bag_folds is None: num_bag_folds = min(8, max(5, math.floor(num_train_rows / 10))) - # TODO: Leverage use_bag_holdout when data is large to enable multi-layer stacking - # if num_train_rows >= 100000 and num_val_rows is None and use_bag_holdout is None: - # use_bag_holdout = True if num_stack_levels is None: if dynamic_stacking: num_stack_levels = 1 else: - if problem_type == BINARY: + if use_bag_holdout or problem_type != BINARY: + num_stack_levels = min(1, max(0, math.floor(num_train_rows / 750))) + else: # Disable multi-layer stacking to avoid stack info leakage num_stack_levels = 0 - # TODO: - # if use_bag_holdout: - # num_stack_levels = min(1, max(0, math.floor(num_train_rows / 750))) - else: - num_stack_levels = min(1, max(0, math.floor(num_train_rows / 750))) if num_bag_folds is None: num_bag_folds = 0 if num_stack_levels is None: @@ -4605,11 +4657,31 @@ def _sanitize_stack_args(self, num_bag_folds, num_bag_sets, num_stack_levels, ti num_bag_sets = 1 if not isinstance(num_bag_sets, int): raise ValueError(f"num_bag_sets must be an integer. (num_bag_sets={num_bag_sets})") - + if not isinstance(dynamic_stacking, bool): + raise ValueError(f"dynamic_stacking must be a bool. (dynamic_stacking={dynamic_stacking})") + if not isinstance(use_bag_holdout, bool): + raise ValueError(f"use_bag_holdout must be a bool. (use_bag_holdout={use_bag_holdout})") + + if use_bag_holdout_was_auto and num_bag_folds != 0: + if use_bag_holdout: + log_extra = f"Reason: num_train_rows >= {use_bag_holdout_auto_threshold}. (num_train_rows={num_train_rows})" + else: + log_extra = f"Reason: num_train_rows < {use_bag_holdout_auto_threshold}. (num_train_rows={num_train_rows})" + logger.log(20, f"Setting use_bag_holdout from 'auto' to {use_bag_holdout}. {log_extra}") + log_extra_ds = None if dynamic_stacking and num_stack_levels < 1: - logger.log(20, "Dynamic stacking was enabled but stacking is disabled. Setting dynamic stacking to False.") + log_extra_ds = f"Reason: Stacking is not enabled. (num_stack_levels={num_stack_levels})" + if not dynamic_stacking_was_auto: + logger.log(20, f"Forcing dynamic_stacking to False. {log_extra_ds}") dynamic_stacking = False - return num_bag_folds, num_bag_sets, num_stack_levels, dynamic_stacking + elif dynamic_stacking_was_auto: + if dynamic_stacking: + log_extra_ds = f"Reason: Enable dynamic_stacking when use_bag_holdout is disabled. (use_bag_holdout={use_bag_holdout})" + else: + log_extra_ds = f"Reason: Skip dynamic_stacking when use_bag_holdout is enabled. (use_bag_holdout={use_bag_holdout})" + logger.log(20, f"Setting dynamic_stacking from 'auto' to {dynamic_stacking}. {log_extra_ds}") + + return num_bag_folds, num_bag_sets, num_stack_levels, dynamic_stacking, use_bag_holdout # TODO: Add .delete() method to easily clean-up clones? # Would need to be careful that user doesn't delete important things accidentally.
diff --git a/tabular/tests/conftest.py b/tabular/tests/conftest.py --- a/tabular/tests/conftest.py +++ b/tabular/tests/conftest.py @@ -146,7 +146,7 @@ def fit_and_validate_dataset( expected_stacked_overfitting_at_test=None, expected_stacked_overfitting_at_val=None, scikit_api=False, - ): + ) -> TabularPredictor: if compiler_configs is None: compiler_configs = {} directory_prefix = "./datasets/" @@ -171,7 +171,9 @@ def fit_and_validate_dataset( init_args["path"] = PathConverter.to_absolute(path=init_args["path"]) assert PathConverter._is_absolute(path=init_args["path"]) save_path = init_args["path"] - predictor = FitHelper.fit_dataset(train_data=train_data, init_args=init_args, fit_args=fit_args, sample_size=sample_size, scikit_api=scikit_api) + predictor: TabularPredictor = FitHelper.fit_dataset( + train_data=train_data, init_args=init_args, fit_args=fit_args, sample_size=sample_size, scikit_api=scikit_api + ) if compile: predictor.compile(models="all", compiler_configs=compiler_configs) predictor.persist(models="all") diff --git a/tabular/tests/unittests/models/test_lightgbm.py b/tabular/tests/unittests/models/test_lightgbm.py --- a/tabular/tests/unittests/models/test_lightgbm.py +++ b/tabular/tests/unittests/models/test_lightgbm.py @@ -1,5 +1,6 @@ from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION from autogluon.core.metrics import METRICS +from autogluon.tabular import TabularPredictor from autogluon.tabular.models.lgb.lgb_model import LGBModel @@ -70,7 +71,7 @@ def test_lightgbm_binary_with_calibrate_decision_threshold(fit_helper): ) dataset_name = "adult" - predictor = fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, delete_directory=False, refit_full=False) + predictor: TabularPredictor = fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args, delete_directory=False, refit_full=False) for metric in [None, "f1", "balanced_accuracy", "mcc", "recall", "precision"]: decision_threshold = predictor.calibrate_decision_threshold(metric=metric) @@ -119,7 +120,9 @@ def test_lightgbm_binary_with_calibrate_decision_threshold_bagged_refit(fit_help directory_prefix = "./datasets/" train_data, test_data, dataset_info = dataset_loader_helper.load_dataset(name=dataset_name, directory_prefix=directory_prefix) label = dataset_info["label"] - predictor = fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, init_args=init_args, fit_args=fit_args, delete_directory=False, refit_full=True) + predictor: TabularPredictor = fit_helper.fit_and_validate_dataset( + dataset_name=dataset_name, init_args=init_args, fit_args=fit_args, delete_directory=False, refit_full=True + ) assert predictor._decision_threshold is not None assert predictor.decision_threshold == predictor._decision_threshold
Tabular: Fallback to bag fold model for refit if out-of-memory / not-enough-memory error occurs when fitting `_FULL` model. Fallback to bag fold model for refit if out-of-memory / not-enough-memory error occurs when fitting `_FULL` model. This will safeguard against low-memory scenarios where there isn't sufficient memory to train the `_FULL` model, but refit_full is specified. Currently this will lead to a later exception. Instead, we can fallback to the first fold model trained in the bag to ensure it doesn't crash. Related: This issue is a band-aid solution to #3935, which is the overarching memory error problem.
2024-03-12T23:44:44Z
[]
[]
autogluon/autogluon
3,995
autogluon__autogluon-3995
[ "3886" ]
7890b93a9464ae813837e98f3efcd5e3b6982a80
diff --git a/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py b/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py --- a/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py +++ b/timeseries/src/autogluon/timeseries/dataset/ts_dataframe.py @@ -765,11 +765,19 @@ def fill_missing_values(self, method: str = "auto", value: float = 0.0) -> TimeS "(for example, using the `convert_frequency` method)." ) - grouped_df = pd.DataFrame(self).groupby(level=ITEMID, sort=False, group_keys=False) + # Convert to pd.DataFrame for faster processing + df = pd.DataFrame(self) + + # Skip filling if there are no NaNs + if not df.isna().any(axis=None): + return self + + grouped_df = df.groupby(level=ITEMID, sort=False, group_keys=False) if method == "auto": filled_df = grouped_df.ffill() - # Fill missing values at the start of each time series with bfill - filled_df = filled_df.groupby(level=ITEMID, sort=False, group_keys=False).bfill() + # If necessary, fill missing values at the start of each time series with bfill + if filled_df.isna().any(axis=None): + filled_df = filled_df.groupby(level=ITEMID, sort=False, group_keys=False).bfill() elif method in ["ffill", "pad"]: filled_df = grouped_df.ffill() elif method in ["bfill", "backfill"]: diff --git a/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py b/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py --- a/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +++ b/timeseries/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py @@ -201,7 +201,9 @@ def get_info(self) -> dict: } return info - def fit(self, **kwargs) -> "AbstractTimeSeriesModel": + def fit( + self, train_data: TimeSeriesDataFrame, val_data: Optional[TimeSeriesDataFrame] = None, **kwargs + ) -> "AbstractTimeSeriesModel": """Fit timeseries model. Models should not override the `fit` method, but instead override the `_fit` method which @@ -235,7 +237,10 @@ def fit(self, **kwargs) -> "AbstractTimeSeriesModel": model: AbstractTimeSeriesModel The fitted model object """ - return super().fit(**kwargs) + train_data = self.preprocess(train_data, is_train=True) + if self._get_tags()["can_use_val_data"] and val_data is not None: + val_data = self.preprocess(val_data, is_train=False) + return super().fit(train_data=train_data, val_data=val_data, **kwargs) def _fit( self, @@ -290,6 +295,7 @@ def predict( data is given as a separate forecast item in the dictionary, keyed by the `item_id`s of input items. """ + data = self.preprocess(data, is_train=False) predictions = self._predict(data=data, known_covariates=known_covariates, **kwargs) logger.debug(f"Predicting with model {self.name}") # "0.5" might be missing from the quantiles if self is a wrapper (MultiWindowBacktestingModel or ensemble) @@ -488,7 +494,7 @@ def _hyperparameter_tune( return hpo_models, analysis - def preprocess(self, data: Any, **kwargs) -> Any: + def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any: return data def get_memory_size(self, **kwargs) -> Optional[int]: @@ -506,3 +512,20 @@ def get_user_params(self) -> dict: return {} else: return self._user_params.copy() + + def _more_tags(self) -> dict: + """Encode model properties using tags, similar to sklearn & autogluon.tabular. + + For more details, see `autogluon.core.models.abstract.AbstractModel._get_tags()` and https://scikit-learn.org/stable/_sources/developers/develop.rst.txt. + + List of currently supported tags: + - allow_nan: Can the model handle data with missing values represented by np.nan? + - can_refit_full: Does it make sense to retrain the model without validation data? + See `autogluon.core.models.abstract._tags._DEFAULT_TAGS` for more details. + - can_use_val_data: Can model use val_data if it's provided to model.fit()? + """ + return { + "allow_nan": False, + "can_refit_full": False, + "can_use_val_data": False, + } diff --git a/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py b/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py --- a/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +++ b/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py @@ -85,6 +85,21 @@ def __init__( self._scaler: Optional[BaseTargetTransform] = None self._residuals_std_per_item: Optional[pd.Series] = None self._avg_residuals_std: Optional[float] = None + self._train_target_median: Optional[float] = None + + def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any: + if is_train: + # All-NaN series are removed; partially-NaN series in train_data are handled inside _generate_train_val_dfs + all_nan_items = data.item_ids[data[self.target].isna().groupby(ITEMID, sort=False).all()] + if len(all_nan_items): + data = data.query("item_id not in @all_nan_items") + return data + else: + data = data.fill_missing_values() + # Fill time series consisting of all NaNs with the median of target in train_data + if data.isna().any(axis=None): + data[self.target] = data[self.target].fillna(value=self._train_target_median) + return data def _get_extra_tabular_init_kwargs(self) -> dict: raise NotImplementedError @@ -98,8 +113,6 @@ def _get_model_params(self) -> dict: return model_params def _get_mlforecast_init_args(self, train_data: TimeSeriesDataFrame, model_params: dict) -> dict: - # TODO: Support lag generation for all pandas frequencies - # TODO: Support date_feature generation for all pandas frequencies from mlforecast.target_transforms import Differences from .utils import MeanAbsScaler, StandardScaler @@ -181,6 +194,10 @@ def _generate_train_val_dfs( items_to_keep = data.item_ids.to_series().sample(n=int(max_num_items)) # noqa: F841 data = data.query("item_id in @items_to_keep") + # MLForecast.preprocess does not support missing values, but we will exclude them later from the training set + missing_entries = data.index[data[self.target].isna()] + data = data.fill_missing_values() + num_items = data.num_items mlforecast_df = self._to_mlforecast_df(data, data.static_features) @@ -197,6 +214,10 @@ def _generate_train_val_dfs( df = self._mask_df(df) + # We remove originally missing values filled via imputation from the training set + if len(missing_entries): + df = df.set_index(["unique_id", "ds"]).drop(missing_entries, errors="ignore").reset_index() + if max_num_samples is not None and len(df) > max_num_samples: df = df.sample(n=max_num_samples) @@ -246,6 +267,7 @@ def _fit( self._check_fit_params() fit_start_time = time.time() + self._train_target_median = train_data[self.target].median() # TabularEstimator is passed to MLForecast later to include tuning_data model_params = self._get_model_params() @@ -355,7 +377,7 @@ def _add_gaussian_quantiles(self, predictions: pd.DataFrame, repeated_item_ids: return predictions def _more_tags(self) -> dict: - return {"can_refit_full": True} + return {"allow_nan": True, "can_refit_full": True} class DirectTabularModel(AbstractMLForecastModel): diff --git a/timeseries/src/autogluon/timeseries/models/chronos/model.py b/timeseries/src/autogluon/timeseries/models/chronos/model.py --- a/timeseries/src/autogluon/timeseries/models/chronos/model.py +++ b/timeseries/src/autogluon/timeseries/models/chronos/model.py @@ -363,3 +363,6 @@ def _predict( ) return TimeSeriesDataFrame(df) + + def _more_tags(self) -> Dict: + return {"allow_nan": True} diff --git a/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py b/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py --- a/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py +++ b/timeseries/src/autogluon/timeseries/models/gluonts/abstract_gluonts.py @@ -328,8 +328,6 @@ def _to_gluonts_dataset( if self.num_feat_static_real > 0: feat_static_real = time_series_df.static_features[self.metadata.static_features_real] - if feat_static_real.isna().values.any(): - feat_static_real = feat_static_real.fillna(feat_static_real.mean()) else: feat_static_real = None @@ -548,3 +546,6 @@ def _gluonts_forecasts_to_data_frame( forecast_df.index = forecast_index return TimeSeriesDataFrame(forecast_df) + + def _more_tags(self) -> dict: + return {"allow_nan": True, "can_use_val_data": True} diff --git a/timeseries/src/autogluon/timeseries/models/local/abstract_local_model.py b/timeseries/src/autogluon/timeseries/models/local/abstract_local_model.py --- a/timeseries/src/autogluon/timeseries/models/local/abstract_local_model.py +++ b/timeseries/src/autogluon/timeseries/models/local/abstract_local_model.py @@ -1,7 +1,7 @@ import logging import time from multiprocessing import TimeoutError, cpu_count -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd @@ -85,6 +85,12 @@ def __init__( self._local_model_args: Dict[str, Any] = None self._seasonal_period: Optional[int] = None self.time_limit: Optional[float] = None + self._dummy_forecast: Optional[pd.DataFrame] = None + + def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any: + if not self._get_tags()["allow_nan"]: + data = data.fill_missing_values() + return data def _fit(self, train_data: TimeSeriesDataFrame, time_limit: Optional[int] = None, **kwargs): self._check_fit_params() @@ -115,8 +121,16 @@ def _fit(self, train_data: TimeSeriesDataFrame, time_limit: Optional[int] = None self._local_model_args = self._update_local_model_args(local_model_args=local_model_args) self.time_limit = time_limit + + self._dummy_forecast = self._get_dummy_forecast(train_data) return self + def _get_dummy_forecast(self, train_data: TimeSeriesDataFrame) -> pd.DataFrame: + agg_functions = ["mean"] + [get_quantile_function(q) for q in self.quantile_levels] + stats_marginal = train_data[self.target].agg(agg_functions) + stats_repeated = np.tile(stats_marginal.values, [self.prediction_length, 1]) + return pd.DataFrame(stats_repeated, columns=stats_marginal.index) + def _update_local_model_args(self, local_model_args: Dict[str, Any]) -> Dict[str, Any]: return local_model_args @@ -164,25 +178,30 @@ def score_and_cache_oof( def _predict_wrapper(self, time_series: pd.Series, end_time: Optional[float] = None) -> Tuple[pd.DataFrame, bool]: if end_time is not None and time.time() >= end_time: raise TimeLimitExceeded - try: - result = self._predict_with_local_model( - time_series=time_series, - local_model_args=self._local_model_args.copy(), - ) - if not np.isfinite(result.values).all(): - raise RuntimeError("Forecast contains NaN or Inf values.") - model_failed = False - except Exception: - if self.use_fallback_model: - result = seasonal_naive_forecast( - target=time_series.values.ravel(), - prediction_length=self.prediction_length, - quantile_levels=self.quantile_levels, - seasonal_period=self._seasonal_period, + + if time_series.isna().all(): + result = self._dummy_forecast.copy() + model_failed = True + else: + try: + result = self._predict_with_local_model( + time_series=time_series, + local_model_args=self._local_model_args.copy(), ) - model_failed = True - else: - raise + if not np.isfinite(result.values).all(): + raise RuntimeError("Forecast contains NaN or Inf values.") + model_failed = False + except Exception: + if self.use_fallback_model: + result = seasonal_naive_forecast( + target=time_series.values.ravel(), + prediction_length=self.prediction_length, + quantile_levels=self.quantile_levels, + seasonal_period=self._seasonal_period, + ) + model_failed = True + else: + raise return result, model_failed def _predict_with_local_model( @@ -197,25 +216,51 @@ def seasonal_naive_forecast( target: np.ndarray, prediction_length: int, quantile_levels: List[float], seasonal_period: int ) -> pd.DataFrame: """Generate seasonal naive forecast, predicting the last observed value from the same period.""" + + def numpy_ffill(arr: np.ndarray) -> np.ndarray: + """Fast implementation of forward fill in numpy.""" + idx = np.arange(len(arr)) + mask = np.isnan(arr) + idx[mask] = 0 + return arr[np.maximum.accumulate(idx)] + forecast = {} + # Convert to float64 since std computation can be unstable in float32 + target = target.astype(np.float64) # At least seasonal_period + 2 values are required to compute sigma for seasonal naive if len(target) > seasonal_period + 1 and seasonal_period > 1: + if np.isnan(target[-(seasonal_period + 2) :]).any(): + target = numpy_ffill(target) + indices = [len(target) - seasonal_period + k % seasonal_period for k in range(prediction_length)] forecast["mean"] = target[indices] residuals = target[seasonal_period:] - target[:-seasonal_period] - sigma = np.sqrt(np.mean(np.square(residuals))) + sigma = np.sqrt(np.nanmean(np.square(residuals))) num_full_seasons = np.arange(1, prediction_length + 1) // seasonal_period sigma_per_timestep = sigma * np.sqrt(num_full_seasons + 1) else: # Fall back to naive forecast - forecast["mean"] = np.full(shape=[prediction_length], fill_value=target[-1]) + last_observed_value = target[np.isfinite(target)][-1] + forecast["mean"] = np.full(shape=[prediction_length], fill_value=last_observed_value) residuals = target[1:] - target[:-1] - sigma = np.sqrt(np.mean(np.square(residuals))) + sigma = np.sqrt(np.nanmean(np.square(residuals))) + if np.isnan(sigma): # happens if there are no two consecutive non-nan observations + sigma = 0.0 sigma_per_timestep = sigma * np.sqrt(np.arange(1, prediction_length + 1)) for q in quantile_levels: forecast[str(q)] = forecast["mean"] + norm.ppf(q) * sigma_per_timestep return pd.DataFrame(forecast) + + +def get_quantile_function(q: float) -> Callable: + """Returns a function with name "q" that computes the q'th quantile of a pandas.Series.""" + + def quantile_fn(x: pd.Series) -> pd.Series: + return x.quantile(q) + + quantile_fn.__name__ = str(q) + return quantile_fn diff --git a/timeseries/src/autogluon/timeseries/models/local/naive.py b/timeseries/src/autogluon/timeseries/models/local/naive.py --- a/timeseries/src/autogluon/timeseries/models/local/naive.py +++ b/timeseries/src/autogluon/timeseries/models/local/naive.py @@ -1,9 +1,11 @@ -from typing import Callable - import numpy as np import pandas as pd -from autogluon.timeseries.models.local.abstract_local_model import AbstractLocalModel, seasonal_naive_forecast +from autogluon.timeseries.models.local.abstract_local_model import ( + AbstractLocalModel, + get_quantile_function, + seasonal_naive_forecast, +) class NaiveModel(AbstractLocalModel): @@ -36,6 +38,9 @@ def _predict_with_local_model( seasonal_period=1, ) + def _more_tags(self) -> dict: + return {"allow_nan": True} + class SeasonalNaiveModel(AbstractLocalModel): """Baseline model that sets the forecast equal to the last observed value from the same season. @@ -75,15 +80,8 @@ def _predict_with_local_model( seasonal_period=local_model_args["seasonal_period"], ) - -def _get_quantile_function(q: float) -> Callable: - """Returns a function with name "q" that computes the q'th quantile of a pandas.Series.""" - - def quantile_fn(x: pd.Series) -> pd.Series: - return x.quantile(q) - - quantile_fn.__name__ = str(q) - return quantile_fn + def _more_tags(self) -> dict: + return {"allow_nan": True} class AverageModel(AbstractLocalModel): @@ -109,11 +107,14 @@ def _predict_with_local_model( time_series: pd.Series, local_model_args: dict, ) -> pd.DataFrame: - agg_functions = ["mean"] + [_get_quantile_function(q) for q in self.quantile_levels] + agg_functions = ["mean"] + [get_quantile_function(q) for q in self.quantile_levels] stats_marginal = time_series.agg(agg_functions) stats_repeated = np.tile(stats_marginal.values, [self.prediction_length, 1]) return pd.DataFrame(stats_repeated, columns=stats_marginal.index) + def _more_tags(self) -> dict: + return {"allow_nan": True} + class SeasonalAverageModel(AbstractLocalModel): """Baseline model that sets the forecast equal to the historic average or quantile in the same season. @@ -146,7 +147,7 @@ def _predict_with_local_model( local_model_args: dict, ) -> pd.DataFrame: seasonal_period = local_model_args["seasonal_period"] - agg_functions = ["mean"] + [_get_quantile_function(q) for q in self.quantile_levels] + agg_functions = ["mean"] + [get_quantile_function(q) for q in self.quantile_levels] # Compute mean & quantiles for each season ts_df = time_series.reset_index(drop=True).to_frame() @@ -162,3 +163,6 @@ def _predict_with_local_model( stats_marginal = time_series.agg(agg_functions) result = result.fillna(stats_marginal) return result + + def _more_tags(self) -> dict: + return {"allow_nan": True} diff --git a/timeseries/src/autogluon/timeseries/models/local/npts.py b/timeseries/src/autogluon/timeseries/models/local/npts.py --- a/timeseries/src/autogluon/timeseries/models/local/npts.py +++ b/timeseries/src/autogluon/timeseries/models/local/npts.py @@ -88,3 +88,6 @@ def _predict_with_local_model( for q in self.quantile_levels: forecast_dict[str(q)] = forecast.quantile(q) return pd.DataFrame(forecast_dict) + + def _more_tags(self) -> dict: + return {"allow_nan": True} diff --git a/timeseries/src/autogluon/timeseries/models/local/statsforecast.py b/timeseries/src/autogluon/timeseries/models/local/statsforecast.py --- a/timeseries/src/autogluon/timeseries/models/local/statsforecast.py +++ b/timeseries/src/autogluon/timeseries/models/local/statsforecast.py @@ -204,6 +204,8 @@ class ARIMAModel(AbstractProbabilisticStatsForecastModel): This significantly speeds up fitting and usually leads to no change in accuracy. """ + # TODO: This model requires statsforecast >= 1.5.0, so it will only be available after we upgrade the dependency + allowed_local_model_args = [ "order", "seasonal_order", diff --git a/timeseries/src/autogluon/timeseries/models/multi_window/multi_window_model.py b/timeseries/src/autogluon/timeseries/models/multi_window/multi_window_model.py --- a/timeseries/src/autogluon/timeseries/models/multi_window/multi_window_model.py +++ b/timeseries/src/autogluon/timeseries/models/multi_window/multi_window_model.py @@ -243,4 +243,6 @@ def convert_to_refit_full_via_copy(self) -> AbstractTimeSeriesModel: return refit_model def _more_tags(self) -> dict: - return self.most_recent_model._get_tags() + tags = self.model_base._get_tags() + tags["can_use_val_data"] = False + return tags diff --git a/timeseries/src/autogluon/timeseries/predictor.py b/timeseries/src/autogluon/timeseries/predictor.py --- a/timeseries/src/autogluon/timeseries/predictor.py +++ b/timeseries/src/autogluon/timeseries/predictor.py @@ -276,7 +276,7 @@ def _check_and_prepare_data_frame( data: Union[TimeSeriesDataFrame, pd.DataFrame, Path, str], name: str = "data", ) -> TimeSeriesDataFrame: - """Ensure that TimeSeriesDataFrame has a sorted index, valid frequency, and contains no missing values. + """Ensure that TimeSeriesDataFrame has a sorted index and a valid frequency. If self.freq is None, then self.freq of the predictor will be set to the frequency of the data. @@ -314,18 +314,6 @@ def _check_and_prepare_data_frame( if df.freq != self.freq: logger.warning(f"{name} with frequency '{df.freq}' has been resampled to frequency '{self.freq}'.") df = df.convert_frequency(freq=self.freq) - - # Fill missing values - if df.isna().values.any(): - # FIXME: Do not automatically fill NaNs here, handle missing values at the level of individual models. - # FIXME: Current solution leads to incorrect metric computation if missing values are present - logger.warning( - f"{name} contains missing values represented by NaN. " - f"They have been filled by carrying forward the last valid observation." - ) - df = df.fill_missing_values() - if df.isna().values.any(): - raise ValueError(f"Some time series in {name} consist completely of NaN values. Please remove them.") return df def _check_data_for_evaluation(self, data: TimeSeriesDataFrame, name: str = "data"): @@ -337,15 +325,19 @@ def _check_data_for_evaluation(self, data: TimeSeriesDataFrame, name: str = "dat f"all time series have length > prediction_length (at least {self.prediction_length + 1})" ) - @staticmethod - def _get_dataset_stats(data: TimeSeriesDataFrame) -> str: + def _get_dataset_stats(self, data: TimeSeriesDataFrame) -> str: ts_lengths = data.num_timesteps_per_item() - median_length = int(ts_lengths.median()) + median_length = ts_lengths.median() min_length = ts_lengths.min() max_length = ts_lengths.max() + missing_value_fraction = data[self.target].isna().mean() + if missing_value_fraction > 0: + missing_value_fraction_str = f" (NaN fraction={missing_value_fraction:.1%})" + else: + missing_value_fraction_str = "" return ( - f"{len(data)} rows, {data.num_items} time series. " - f"Median time series length is {median_length} (min={min_length}, max={max_length}). " + f"{len(data)} rows{missing_value_fraction_str}, {data.num_items} time series. " + f"Median time series length is {median_length:.0f} (min={min_length}, max={max_length}). " ) def _reduce_num_val_windows_if_necessary( @@ -374,41 +366,45 @@ def _reduce_num_val_windows_if_necessary( ) return new_num_val_windows - def _filter_short_series( + def _filter_useless_train_data( self, train_data: TimeSeriesDataFrame, num_val_windows: int, val_step_size: int, ) -> Tuple[TimeSeriesDataFrame, Optional[TimeSeriesDataFrame]]: - """Remove time series from train_data that are too short for chosen prediction_length and validation settings. + """Remove time series from train_data that either contain all NaNs or are too short for chosen settings. - This method ensures that for each validation fold, all train series have length >= max(prediction_length + 1, 5). + This method ensures that 1) no time series consist of all NaN values and 2) for each validation fold, all train + series have length >= max(prediction_length + 1, 5). - In other words, this method removes from train_data all time series with length less than + In other words, this method removes from train_data all time series with only NaN values or length less than min_train_length + prediction_length + (num_val_windows - 1) * val_step_size """ min_length = self._min_train_length + self.prediction_length + (num_val_windows - 1) * val_step_size - train_lengths = train_data.num_timesteps_per_item() - train_items_to_drop = train_lengths.index[train_lengths < min_length] - if len(train_items_to_drop) > 0: + too_short_items = train_lengths.index[train_lengths < min_length] + + if len(too_short_items) > 0: logger.info( - f"\tRemoving {len(train_items_to_drop)} short time series from train_data. Only series with length " + f"\tRemoving {len(too_short_items)} short time series from train_data. Only series with length " f">= {min_length} will be used for training." ) - filtered_train_data = train_data.query("item_id not in @train_items_to_drop") - if len(filtered_train_data) == 0: - raise ValueError( - f"At least some time series in train_data must have length >= {min_length}. Please provide longer " - f"time series as train_data or reduce prediction_length, num_val_windows, or val_step_size." - ) - logger.info( - f"\tAfter removing short series, train_data has {self._get_dataset_stats(filtered_train_data)}" - ) - else: - filtered_train_data = train_data + train_data = train_data.query("item_id not in @too_short_items") + + all_nan_items = train_data.item_ids[train_data[self.target].isna().groupby(ITEMID, sort=False).all()] + if len(all_nan_items) > 0: + logger.info(f"\tRemoving {len(all_nan_items)} time series consisting of only NaN values from train_data.") + train_data = train_data.query("item_id not in @all_nan_items") - return filtered_train_data + if len(too_short_items) or len(all_nan_items): + logger.info(f"\tAfter filtering, train_data has {self._get_dataset_stats(train_data)}") + + if len(train_data) == 0: + raise ValueError( + f"At least some time series in train_data must have >= {min_length} observations. Please provide " + f"longer time series as train_data or reduce prediction_length, num_val_windows, or val_step_size." + ) + return train_data @apply_presets(TIMESERIES_PRESETS_CONFIGS) def fit( @@ -722,7 +718,7 @@ def fit( raise ValueError("Please set num_val_windows >= 1 or provide custom tuning_data") if not skip_model_selection: - train_data = self._filter_short_series( + train_data = self._filter_useless_train_data( train_data, num_val_windows=num_val_windows, val_step_size=val_step_size ) diff --git a/timeseries/src/autogluon/timeseries/utils/features.py b/timeseries/src/autogluon/timeseries/utils/features.py --- a/timeseries/src/autogluon/timeseries/utils/features.py +++ b/timeseries/src/autogluon/timeseries/utils/features.py @@ -28,9 +28,32 @@ class CovariateMetadata: past_covariates_real: List[str] = field(default_factory=list) past_covariates_cat: List[str] = field(default_factory=list) + @property + def known_covariates(self) -> List[str]: + return self.known_covariates_cat + self.known_covariates_real + + @property + def past_covariates(self) -> List[str]: + return self.past_covariates_cat + self.past_covariates_real + + @property + def covariates(self) -> List[str]: + return self.known_covariates + self.past_covariates + + @property + def covariates_real(self) -> List[str]: + return self.known_covariates_real + self.past_covariates_real + + @property + def covariates_cat(self) -> List[str]: + return self.known_covariates_cat + self.past_covariates_cat + class ContinuousAndCategoricalFeatureGenerator(PipelineFeatureGenerator): - """Generates categorical and continuous features for time series models.""" + """Generates categorical and continuous features for time series models. + + Imputes missing categorical features with the most frequent value in the training set. + """ def __init__(self, verbosity: int = 0, minimum_cat_count=2, float_dtype: str = "float32", **kwargs): generators = [ @@ -62,13 +85,22 @@ def fit_transform(self, X: pd.DataFrame, *args, **kwargs) -> pd.DataFrame: # PipelineFeatureGenerator does not use transform() inside fit_transform(), so we need to override both methods if isinstance(X, TimeSeriesDataFrame): X = pd.DataFrame(X) - return self._convert_numerical_columns_to_float(super().fit_transform(X, *args, **kwargs)) + transformed = self._convert_numerical_columns_to_float(super().fit_transform(X, *args, **kwargs)) + # Ignore the '__dummy__' feature generated by PipelineFeatureGenerator if none of the features are informative + return transformed.drop(columns=["__dummy__"], errors="ignore") class TimeSeriesFeatureGenerator: """Takes care of preprocessing for static_features and past/known covariates. All covariates & static features are converted into either float32 or categorical dtype. + + Missing values in the target column are left as-is but missing values in static features & covariates are imputed. + Imputation logic is as follows: + 1. For all categorical columns (static, past, known), we fill missing values with the mode of the training set. + 2. For real static features, we impute missing values with the median of the training set. + 3. For real covariates (past, known), we ffill + bfill within each time series. If for some time series all + covariate values are missing, we fill them with the median of the training set. """ def __init__(self, target: str, known_covariates_names: List[str], float_dtype: str = "float32"): @@ -82,6 +114,8 @@ def __init__(self, target: str, known_covariates_names: List[str], float_dtype: # Cat features with cat_count=1 are fine in static_features since they are repeated for all time steps in a TS self.static_feature_pipeline = ContinuousAndCategoricalFeatureGenerator(minimum_cat_count=1) self.covariate_metadata: CovariateMetadata = None + self._train_covariates_real_median: Optional[pd.Series] = None + self._train_static_real_median: Optional[pd.Series] = None @property def required_column_names(self) -> List[str]: @@ -129,6 +163,7 @@ def fit(self, data: TimeSeriesDataFrame) -> None: logger.info("\tstatic_features:") static_features_cat, static_features_real = self._detect_and_log_column_types(static_features_df) ignored_static_features = data.static_features.columns.difference(self.static_feature_pipeline.features_in) + self._train_static_real_median = data.static_features[static_features_real].median() else: static_features_cat = [] static_features_real = [] @@ -154,6 +189,7 @@ def fit(self, data: TimeSeriesDataFrame) -> None: static_features_cat=static_features_cat, static_features_real=static_features_real, ) + self._train_covariates_real_median = data[self.covariate_metadata.covariates_real].median() self._is_fit = True def transform(self, data: TimeSeriesDataFrame, data_frame_name: str = "data") -> TimeSeriesDataFrame: @@ -180,10 +216,26 @@ def transform(self, data: TimeSeriesDataFrame, data_frame_name: str = "data") -> if data.static_features is None: raise ValueError(f"Provided {data_frame_name} must contain static_features") static_features = self.static_feature_pipeline.transform(data.static_features) + static_real_names = self.covariate_metadata.static_features_real + # Fill missing static_features_real with the median of the training set + if static_real_names and static_features[static_real_names].isna().any(axis=None): + static_features[static_real_names] = static_features[static_real_names].fillna( + self._train_static_real_median + ) else: static_features = None - return TimeSeriesDataFrame(pd.concat(dfs, axis=1), static_features=static_features) + ts_df = TimeSeriesDataFrame(pd.concat(dfs, axis=1), static_features=static_features) + + covariates_names = self.covariate_metadata.covariates + if len(covariates_names) > 0: + # ffill + bfill covariates that have at least some observed values + ts_df[covariates_names] = ts_df[covariates_names].fill_missing_values() + # If for some items covariates consist completely of NaNs, fill them with median of training data + if ts_df[covariates_names].isna().any(axis=None): + ts_df[covariates_names] = ts_df[covariates_names].fillna(self._train_covariates_real_median) + + return ts_df def transform_future_known_covariates( self, known_covariates: Optional[TimeSeriesDataFrame] @@ -194,7 +246,13 @@ def transform_future_known_covariates( self._check_required_columns_are_present( known_covariates, required_column_names=self.known_covariates_names, data_frame_name="known_covariates" ) - return TimeSeriesDataFrame(self.known_covariates_pipeline.transform(known_covariates)) + known_covariates = TimeSeriesDataFrame(self.known_covariates_pipeline.transform(known_covariates)) + # ffill + bfill covariates that have at least some observed values + known_covariates = known_covariates.fill_missing_values() + # If for some items covariates consist completely of NaNs, fill them with median of training data + if known_covariates.isna().any(axis=None): + known_covariates = known_covariates.fillna(self._train_covariates_real_median) + return known_covariates else: return None
diff --git a/timeseries/tests/unittests/common.py b/timeseries/tests/unittests/common.py --- a/timeseries/tests/unittests/common.py +++ b/timeseries/tests/unittests/common.py @@ -101,18 +101,20 @@ def get_data_frame_with_item_index( ) -DUMMY_TS_DATAFRAME = get_data_frame_with_item_index(["10", "A", "2", "1"]) - - -def get_dummy_ts_dataframe_with_missing(): - data = DUMMY_TS_DATAFRAME.copy() - # Completely mask one item + some additional indexes +def mask_entries(data: TimeSeriesDataFrame) -> TimeSeriesDataFrame: + """Replace some values in a TimeSeriesDataFrame with NaNs""" + data = data.copy() + # Mask all but the first entry for item #1 + data.iloc[1 : data.num_timesteps_per_item()[data.item_ids[0]]] = float("nan") + # Completely mask item #2 data.loc[data.item_ids[1]] = float("nan") - data.iloc[[0, 1, 2, 5, 15, 17, 18, 42, 53]] = float("nan") + # Mask random indices for item #3 + nan_idx = [42, 53, 58, 59][: len(data)] + data.iloc[nan_idx] = float("nan") return data -DUMMY_TS_DATAFRAME_WITH_MISSING = get_dummy_ts_dataframe_with_missing() +DUMMY_TS_DATAFRAME = mask_entries(get_data_frame_with_item_index(["10", "A", "2", "1"])) def get_data_frame_with_variable_lengths( @@ -142,7 +144,7 @@ def get_data_frame_with_variable_lengths( ITEM_ID_TO_LENGTH = {"D": 22, "A": 50, "C": 10, "B": 17} -DUMMY_VARIABLE_LENGTH_TS_DATAFRAME = get_data_frame_with_variable_lengths(ITEM_ID_TO_LENGTH) +DUMMY_VARIABLE_LENGTH_TS_DATAFRAME = mask_entries(get_data_frame_with_variable_lengths(ITEM_ID_TO_LENGTH)) def get_static_features(item_ids: List[Union[str, int]], feature_names: List[str]): diff --git a/timeseries/tests/unittests/models/test_local.py b/timeseries/tests/unittests/models/test_local.py --- a/timeseries/tests/unittests/models/test_local.py +++ b/timeseries/tests/unittests/models/test_local.py @@ -86,25 +86,6 @@ def test_when_local_model_saved_then_local_model_args_are_saved(model_class, hyp assert dict_equal_primitive(model._local_model_args, loaded_model._local_model_args) [email protected]("model_class", TESTABLE_MODELS) [email protected]("prediction_length", [1, 3, 10]) -def test_when_local_model_predicts_then_time_index_is_correct(model_class, prediction_length, temp_model_path): - data = DUMMY_VARIABLE_LENGTH_TS_DATAFRAME - model = model_class( - path=temp_model_path, - prediction_length=prediction_length, - hyperparameters=DEFAULT_HYPERPARAMETERS, - freq=data.freq, - ) - model.fit(train_data=data) - predictions = model.predict(data=data) - for item_id in data.item_ids: - cutoff = data.loc[item_id].index[-1] - start = cutoff + pd.tseries.frequencies.to_offset(data.freq) - expected_timestamps = pd.date_range(start, periods=prediction_length, freq=data.freq) - assert (predictions.loc[item_id].index == expected_timestamps).all() - - def get_seasonal_period_from_fitted_local_model(model): if model.name in ["ARIMA", "AutoETS", "AutoARIMA", "AutoCES", "DynamicOptimizedTheta", "ETS", "Theta"]: return model._local_model_args["season_length"] @@ -406,7 +387,7 @@ def test_when_intermittent_models_fit_then_values_are_lower_bounded( @pytest.mark.parametrize("model_class", TESTABLE_MODELS) @pytest.mark.parametrize("prediction_length", [1, 3]) def test_when_local_models_fit_then_quantiles_are_present_and_ranked(model_class, prediction_length, temp_model_path): - data = DUMMY_VARIABLE_LENGTH_TS_DATAFRAME + data = get_data_frame_with_item_index(["B", "A", "X"]) model = model_class( path=temp_model_path, prediction_length=prediction_length, diff --git a/timeseries/tests/unittests/models/test_mlforecast.py b/timeseries/tests/unittests/models/test_mlforecast.py --- a/timeseries/tests/unittests/models/test_mlforecast.py +++ b/timeseries/tests/unittests/models/test_mlforecast.py @@ -12,6 +12,7 @@ from ..common import ( DATAFRAME_WITH_COVARIATES, DATAFRAME_WITH_STATIC, + DUMMY_TS_DATAFRAME, DUMMY_VARIABLE_LENGTH_TS_DATAFRAME, get_data_frame_with_variable_lengths, ) @@ -195,7 +196,7 @@ def test_given_some_time_series_are_too_short_then_seasonal_naive_forecast_is_us def test_when_point_forecast_metric_is_used_then_per_item_residuals_are_used_for_prediction( temp_model_path, model_type ): - data = DUMMY_VARIABLE_LENGTH_TS_DATAFRAME.sort_index() + data = get_data_frame_with_variable_lengths({"A": 20, "B": 30, "C": 15}) prediction_length = 5 model = model_type( path=temp_model_path, @@ -235,3 +236,18 @@ def test_when_mlf_model_is_used_then_predictions_have_correct_scale(temp_model_p model.fit(train_data=data) predictions = model.predict(data) assert np.all(np.abs(predictions.values - value) < value) + + [email protected]("model_type", TESTABLE_MODELS) +def test_given_train_data_has_nans_when_fit_called_then_nan_rows_removed_from_train_df(temp_model_path, model_type): + data = DUMMY_TS_DATAFRAME.copy() + model = model_type( + path=temp_model_path, + freq=data.freq, + eval_metric="WAPE", + prediction_length=3, + hyperparameters={"differences": []}, + ) + model.fit(train_data=data) + train_df, val_df = model._generate_train_val_dfs(model.preprocess(data, is_train=True)) + assert len(train_df) + len(val_df) == len(data.dropna()) diff --git a/timeseries/tests/unittests/models/test_models.py b/timeseries/tests/unittests/models/test_models.py --- a/timeseries/tests/unittests/models/test_models.py +++ b/timeseries/tests/unittests/models/test_models.py @@ -126,7 +126,7 @@ def test_when_score_called_then_model_receives_truncated_data(model_class, predi (call_df,) = patch_method.call_args[0] for j in DUMMY_TS_DATAFRAME.item_ids: - assert np.allclose(call_df.loc[j], DUMMY_TS_DATAFRAME.loc[j][:-prediction_length]) + assert np.allclose(call_df.loc[j], DUMMY_TS_DATAFRAME.loc[j][:-prediction_length], equal_nan=True) @pytest.mark.parametrize("model_class", TESTABLE_MODELS) @@ -363,18 +363,19 @@ def test_when_get_info_is_called_then_all_keys_are_present(model_class, predicti @pytest.mark.parametrize("model_class", TESTABLE_MODELS) def test_when_median_not_in_quantile_levels_then_median_is_present_in_raw_predictions(model_class): + data = get_data_frame_with_item_index(["B", "A", "X", "C"]) model = model_class( prediction_length=3, quantile_levels=[0.1, 0.15], - freq=DUMMY_TS_DATAFRAME.freq, + freq=data.freq, hyperparameters=DUMMY_HYPERPARAMETERS, ) if isinstance(model, MultiWindowBacktestingModel): # Median is present in the predictions of the base model, but not in the MultiWindowBacktestingModel wrapper pytest.skip() - model.fit(train_data=DUMMY_TS_DATAFRAME) + model.fit(train_data=data) - raw_predictions = model._predict(DUMMY_TS_DATAFRAME) + raw_predictions = model._predict(data) assert "0.5" in raw_predictions.columns @@ -475,3 +476,91 @@ def test_given_searcher_when_ray_backend_used_in_hpo_then_correct_searcher_used( "bayes": "HyperOpt", "random": "BasicVariant", }.get(searcher) in ray_searcher_class_name + + [email protected]("model_class", TESTABLE_MODELS) +def test_when_data_contains_missing_values_then_model_can_fit_and_predict(temp_model_path, model_class): + data = DUMMY_TS_DATAFRAME + prediction_length = 5 + model = model_class( + freq=data.freq, + path=temp_model_path, + prediction_length=prediction_length, + hyperparameters=DUMMY_HYPERPARAMETERS, + ) + model.fit( + train_data=data, + val_data=None if isinstance(model, MultiWindowBacktestingModel) else data, + ) + predictions = model.predict(data) + assert not predictions.isna().any(axis=None) and all(predictions.item_ids == data.item_ids) + + [email protected]("model_class", TESTABLE_MODELS) +def test_when_fit_and_predict_called_then_train_val_and_test_data_is_preprocessed(temp_model_path, model_class): + train_data = DUMMY_TS_DATAFRAME.copy() + model = model_class(freq=train_data.freq, path=temp_model_path, hyperparameters=DUMMY_HYPERPARAMETERS) + preprocessed_data = train_data + 5.0 + if model._get_tags()["can_use_val_data"]: + expected_val_data = preprocessed_data + else: + expected_val_data = train_data + with ( + mock.patch.object(model, "preprocess") as mock_preprocess, + mock.patch.object(model, "_fit") as mock_fit, + mock.patch.object(model, "_predict") as mock_predict, + ): + mock_preprocess.return_value = preprocessed_data + model.fit(train_data=train_data, val_data=train_data) + fit_kwargs = mock_fit.call_args[1] + model_train_data = fit_kwargs["train_data"] + model_val_data = fit_kwargs["val_data"] + assert model_train_data.equals(preprocessed_data) + assert model_val_data.equals(expected_val_data) + + model.predict(train_data) + model_predict_data = mock_predict.call_args[1]["data"] + assert model_predict_data.equals(preprocessed_data) + + [email protected]("model_class", TESTABLE_MODELS) +def test_given_model_doesnt_support_nan_when_model_fits_then_nans_are_filled(temp_model_path, model_class): + data = get_data_frame_with_item_index(["B", "A", "C", "X"]) + data.iloc[[0, 1, 5, 10, 23, 26, 33, 60]] = float("nan") + prediction_length = 5 + model = model_class( + freq=data.freq, + path=temp_model_path, + prediction_length=prediction_length, + hyperparameters=DUMMY_HYPERPARAMETERS, + ) + + with mock.patch.object(model, "_fit") as mock_fit: + model.fit( + train_data=data, + val_data=None if isinstance(model, MultiWindowBacktestingModel) else data, + ) + fit_kwargs = mock_fit.call_args[1] + + model_allows_nan = model._get_tags()["allow_nan"] + input_contains_nan = fit_kwargs["train_data"].isna().any(axis=None) + assert model_allows_nan == input_contains_nan + + +EXPECTED_MODEL_TAGS = [ + "allow_nan", + "can_refit_full", + "can_use_val_data", + # Tabular tags - not used by time series models + "valid_oof", + "handles_text", +] + + [email protected]("model_class", TESTABLE_MODELS) +def test_when_model_created_then_model_has_all_required_tags(temp_model_path, model_class): + model = model_class(path=temp_model_path) + model_tags = model._get_tags() + for tag in EXPECTED_MODEL_TAGS: + assert tag in model_tags + assert len(model_tags) == len(EXPECTED_MODEL_TAGS) diff --git a/timeseries/tests/unittests/test_features.py b/timeseries/tests/unittests/test_features.py --- a/timeseries/tests/unittests/test_features.py +++ b/timeseries/tests/unittests/test_features.py @@ -137,3 +137,35 @@ def test_when_bool_columns_provided_then_they_are_converted_to_cat(): assert isinstance(data_transformed["known_bool"].dtype, pd.CategoricalDtype) assert isinstance(data_transformed["past_bool"].dtype, pd.CategoricalDtype) assert isinstance(data_transformed.static_features["static_bool"].dtype, pd.CategoricalDtype) + + [email protected]("known_covariates_names", [[], ["real_1", "cat_1"], ["real_1", "real_2", "cat_1", "cat_2"]]) +def test_when_covariates_contain_missing_values_then_they_are_filled_during_transform(known_covariates_names): + prediction_length = 5 + data_full = get_data_frame_with_covariates(covariates_cat=["cat_1", "cat_2"], covariates_real=["real_1", "real_2"]) + data_full.iloc[[0, 1, 8, 9, 10, 12, 15, -2, -1]] = float("nan") + data_full.loc[data_full.item_ids[1]] = float("nan") + + data, known_covariates = data_full.get_model_inputs_for_scoring(prediction_length, known_covariates_names) + feat_generator = TimeSeriesFeatureGenerator(target="target", known_covariates_names=known_covariates_names) + + data_transformed = feat_generator.fit_transform(data) + assert not data_transformed[feat_generator.covariate_metadata.covariates].isna().any(axis=None) + assert data_transformed["target"].isna().any() + + known_covariates_transformed = feat_generator.transform_future_known_covariates(known_covariates) + if known_covariates_names == []: + assert known_covariates_transformed is None + else: + assert not known_covariates_transformed[known_covariates_names].isna().any(axis=None) + + +def test_when_static_features_contain_missing_values_then_they_are_filled_during_transform(): + data = get_data_frame_with_covariates( + static_features_cat=["cat_1", "cat_2"], static_features_real=["real_1", "real_2"] + ) + data.static_features.iloc[[0], [1, 2]] = float("nan") + feat_generator = TimeSeriesFeatureGenerator(target="target", known_covariates_names=[]) + + data_transformed = feat_generator.fit_transform(data) + assert not data_transformed.static_features.isna().any(axis=None) diff --git a/timeseries/tests/unittests/test_learner.py b/timeseries/tests/unittests/test_learner.py --- a/timeseries/tests/unittests/test_learner.py +++ b/timeseries/tests/unittests/test_learner.py @@ -1,4 +1,5 @@ """Unit tests for learners""" + import shutil import sys import tempfile @@ -6,6 +7,7 @@ from unittest import mock import numpy as np +import pandas as pd import pytest from autogluon.common import space @@ -15,6 +17,7 @@ from autogluon.timeseries.utils.forecast import get_forecast_horizon_index_single_time_series from .common import DUMMY_TS_DATAFRAME, get_data_frame_with_variable_lengths, get_static_features +from .test_features import get_data_frame_with_covariates TEST_HYPERPARAMETER_SETTINGS = [ {"SimpleFeedForward": {"epochs": 1, "num_batches_per_epoch": 1}}, @@ -380,3 +383,61 @@ def test_when_train_data_has_static_or_dynamic_feat_then_leaderboard_works( leaderboard = learner.leaderboard(data=pred_data) assert len(leaderboard) > 0 assert ("score_test" in leaderboard.columns) == pred_data_present + + +def test_when_features_are_all_nan_and_learner_is_loaded_then_mode_or_median_are_imputed(temp_model_path): + covariates_cat = ["known_cat", "past_cat"] + covariates_real = ["known_real", "past_real"] + data = get_data_frame_with_covariates( + covariates_cat=covariates_cat, + covariates_real=covariates_real, + static_features_cat=["static_cat"], + static_features_real=["static_real"], + ) + known_covariates_names = ["known_cat", "known_real"] + prediction_length = 3 + learner = TimeSeriesLearner( + path_context=temp_model_path, + known_covariates_names=known_covariates_names, + prediction_length=prediction_length, + ) + learner.fit(data, hyperparameters={"Naive": {}}) + data_transformed = learner.feature_generator.transform(data) + learner.save() + del learner + + loaded_learner = TimeSeriesLearner.load(temp_model_path) + data_with_nan = data.copy() + for col in data_with_nan.columns: + if col != "target": + data_with_nan[col] = float("nan") + for col in data_with_nan.static_features.columns: + data_with_nan.static_features[col] = float("nan") + data_with_nan, known_covariates_with_nan = data_with_nan.get_model_inputs_for_scoring( + prediction_length, known_covariates_names + ) + with mock.patch("autogluon.timeseries.trainer.AbstractTimeSeriesTrainer.predict") as trainer_predict: + loaded_learner.predict(data_with_nan, known_covariates=known_covariates_with_nan) + trainer_predict_call_args = trainer_predict.call_args[1] + imputed_data = trainer_predict_call_args["data"] + imputed_known_covariates = trainer_predict_call_args["known_covariates"] + imputed_static = imputed_data.static_features + + def get_mode(series: pd.Series): + # series.mode() can result in ties. We copy tiebreaking logic from CategoryFeatureGenerator + return series.value_counts().sort_values().index[-1] + + for col in covariates_cat: + column_mode_train = get_mode(data_transformed[col]) + assert (imputed_data[col] == column_mode_train).all() + if col in known_covariates_names: + assert (imputed_known_covariates[col] == column_mode_train).all() + + for col in covariates_real: + column_median_train = data_transformed[col].median() + assert np.allclose(imputed_data[col], column_median_train) + if col in known_covariates_names: + assert np.allclose(imputed_known_covariates[col], column_median_train) + + assert (imputed_static["static_cat"] == get_mode(data_transformed.static_features["static_cat"])).all() + assert np.allclose(imputed_static["static_real"], data_transformed.static_features["static_real"].median()) diff --git a/timeseries/tests/unittests/test_metrics.py b/timeseries/tests/unittests/test_metrics.py --- a/timeseries/tests/unittests/test_metrics.py +++ b/timeseries/tests/unittests/test_metrics.py @@ -24,12 +24,7 @@ from autogluon.timeseries.metrics.utils import _in_sample_abs_seasonal_error, _in_sample_squared_seasonal_error from autogluon.timeseries.models.gluonts.abstract_gluonts import AbstractGluonTSModel -from .common import ( - DUMMY_TS_DATAFRAME, - DUMMY_TS_DATAFRAME_WITH_MISSING, - get_data_frame_with_item_index, - get_prediction_for_df, -) +from .common import DUMMY_TS_DATAFRAME, get_data_frame_with_item_index, get_prediction_for_df pytestmark = pytest.mark.filterwarnings("ignore") @@ -172,7 +167,7 @@ def test_given_missing_target_values_when_metric_evaluated_then_output_equal_to_ check_gluonts_parity( ag_metric_name, gts_metric, - data=DUMMY_TS_DATAFRAME_WITH_MISSING, + data=DUMMY_TS_DATAFRAME, model=deepar_trained, ) @@ -180,7 +175,7 @@ def test_given_missing_target_values_when_metric_evaluated_then_output_equal_to_ @pytest.mark.parametrize("metric_cls", AVAILABLE_METRICS.values()) def test_given_missing_target_values_when_metric_evaluated_then_metric_is_not_nan(metric_cls): prediction_length = 5 - train, test = DUMMY_TS_DATAFRAME_WITH_MISSING.train_test_split(prediction_length) + train, test = DUMMY_TS_DATAFRAME.train_test_split(prediction_length) predictions = get_prediction_for_df(train, prediction_length) score = metric_cls()(data=test, predictions=predictions, prediction_length=prediction_length) assert not pd.isna(score) @@ -189,7 +184,7 @@ def test_given_missing_target_values_when_metric_evaluated_then_metric_is_not_na @pytest.mark.parametrize("metric_cls", AVAILABLE_METRICS.values()) def test_given_predictions_contain_nan_when_metric_evaluated_then_exception_is_raised(metric_cls): prediction_length = 5 - train, test = DUMMY_TS_DATAFRAME_WITH_MISSING.train_test_split(prediction_length) + train, test = DUMMY_TS_DATAFRAME.train_test_split(prediction_length) predictions = get_prediction_for_df(train, prediction_length) predictions.iloc[[3, 5]] = float("nan") with pytest.raises(AssertionError, match="Predictions contain NaN values"): @@ -312,7 +307,7 @@ def test_given_metric_is_optimized_by_median_when_model_predicts_then_median_is_ def test_when_perfect_predictions_passed_to_metric_then_score_equals_optimum(metric_name): prediction_length = 5 eval_metric = check_get_evaluation_metric(metric_name) - data = DUMMY_TS_DATAFRAME_WITH_MISSING.copy() + data = DUMMY_TS_DATAFRAME.copy() predictions = data.slice_by_timestep(-prediction_length, None).rename(columns={"target": "mean"}).fillna(0.0) for q in ["0.1", "0.4", "0.9"]: predictions[q] = predictions["mean"] @@ -324,7 +319,7 @@ def test_when_perfect_predictions_passed_to_metric_then_score_equals_optimum(met def test_when_better_predictions_passed_to_metric_then_score_improves(metric_name): prediction_length = 5 eval_metric = check_get_evaluation_metric(metric_name) - data = DUMMY_TS_DATAFRAME_WITH_MISSING.copy() + data = DUMMY_TS_DATAFRAME.copy() predictions = data.slice_by_timestep(-prediction_length, None).rename(columns={"target": "mean"}).fillna(0.0) for q in ["0.1", "0.4", "0.9"]: predictions[q] = predictions["mean"] diff --git a/timeseries/tests/unittests/test_predictor.py b/timeseries/tests/unittests/test_predictor.py --- a/timeseries/tests/unittests/test_predictor.py +++ b/timeseries/tests/unittests/test_predictor.py @@ -24,6 +24,7 @@ from autogluon.timeseries.predictor import TimeSeriesPredictor from .common import ( + DATAFRAME_WITH_COVARIATES, DUMMY_TS_DATAFRAME, PREDICTIONS_FOR_DUMMY_TS_DATAFRAME, CustomMetric, @@ -217,8 +218,8 @@ def test_given_hyperparameters_when_predictor_called_and_loaded_back_then_all_mo @pytest.mark.parametrize( "hyperparameters", [ - {"ETS": {"maxiter": 1}, "SimpleFeedForward": {"epochs": 1}}, - {"ETS": {"maxiter": 1}, "SimpleFeedForward": {"epochs": space.Int(1, 3)}}, + {"Naive": {"maxiter": 1}, "SimpleFeedForward": {"epochs": 1}}, + {"Naive": {"maxiter": 1}, "SimpleFeedForward": {"epochs": space.Int(1, 3)}}, ], ) def test_given_hp_spaces_and_custom_target_when_predictor_called_predictor_can_predict( @@ -474,7 +475,7 @@ def test_when_predictor_is_loaded_then_info_works(temp_model_path): def test_when_train_data_contains_nans_then_predictor_can_fit(temp_model_path): predictor = TimeSeriesPredictor(path=temp_model_path) - df = DUMMY_TS_DATAFRAME.copy() + df = DATAFRAME_WITH_COVARIATES.copy() df.iloc[5] = np.nan predictor.fit( df, @@ -486,23 +487,42 @@ def test_when_train_data_contains_nans_then_predictor_can_fit(temp_model_path): def test_when_prediction_data_contains_nans_then_predictor_can_predict(temp_model_path): predictor = TimeSeriesPredictor(path=temp_model_path) predictor.fit(DUMMY_TS_DATAFRAME, hyperparameters={"Naive": {}}) - df = DUMMY_TS_DATAFRAME.copy() + df = DATAFRAME_WITH_COVARIATES.copy() df.iloc[5] = np.nan predictions = predictor.predict(df) assert isinstance(predictions, TimeSeriesDataFrame) assert not np.any(np.isnan(predictions)) -def test_when_some_time_series_contain_only_nans_then_exception_is_raised(temp_model_path): +def test_when_some_train_time_series_contain_only_nans_then_they_are_removed_from_train_data(temp_model_path): predictor = TimeSeriesPredictor(path=temp_model_path) - df = TimeSeriesDataFrame.from_iterable_dataset( + train_data = TimeSeriesDataFrame.from_iterable_dataset( [ - {"target": [float(5)] * 10, "start": pd.Period("2020-01-01", "D")}, {"target": [float("nan")] * 10, "start": pd.Period("2020-01-01", "D")}, + {"target": [float(5)] * 10, "start": pd.Period("2020-01-01", "D")}, ] ) - with pytest.raises(ValueError, match="consist completely of NaN values"): - predictor._check_and_prepare_data_frame(df) + with mock.patch("autogluon.timeseries.learner.TimeSeriesLearner.fit") as mock_learner_fit: + predictor.fit(train_data) + learner_train_data = mock_learner_fit.call_args[1]["train_data"] + assert all(learner_train_data.item_ids == [1]) + + +def test_when_all_train_time_series_contain_only_nans_then_exception_is_raised(temp_model_path): + predictor = TimeSeriesPredictor(path=temp_model_path) + train_data = DUMMY_TS_DATAFRAME.copy() + train_data["target"] = float("nan") + with pytest.raises(ValueError, match="At least some time series in train"): + predictor.fit(train_data) + + +def test_when_all_nan_data_passed_to_predict_then_predictor_can_predict(temp_model_path): + predictor = TimeSeriesPredictor(path=temp_model_path, prediction_length=3) + predictor.fit(DUMMY_TS_DATAFRAME, hyperparameters=DUMMY_HYPERPARAMETERS) + data = DUMMY_TS_DATAFRAME.copy() + data["target"] = float("nan") + predictions = predictor.predict(data) + assert not predictions.isna().any(axis=None) and all(predictions.item_ids == data.item_ids) @pytest.mark.parametrize("method", ["evaluate", "leaderboard"]) @@ -536,7 +556,6 @@ def test_given_data_is_in_dataframe_format_then_predictor_works(temp_model_path) @pytest.mark.parametrize("path_format", [str, Path]) def test_given_data_is_in_str_format_then_predictor_works(temp_model_path, tmp_path, path_format): df = pd.DataFrame(DUMMY_TS_DATAFRAME.reset_index()) - tmp_path_subdir = tmp_path / str(uuid4())[:4] data_path = path_format(str(tmp_path_subdir)) @@ -907,7 +926,7 @@ def test_given_only_short_series_in_train_data_when_fit_called_then_exception_is "short_series_3": 2, } data = get_data_frame_with_variable_lengths(item_id_to_length, freq="H") - with pytest.raises(ValueError, match="At least some time series in train\_data must have length"): + with pytest.raises(ValueError, match="Please provide longer time series as train"): predictor.fit(data, num_val_windows=num_val_windows, val_step_size=val_step_size) @@ -925,7 +944,7 @@ def test_given_only_short_series_in_train_data_then_exception_is_raised( "short_series_3": 2, } data = get_data_frame_with_variable_lengths(item_id_to_length, freq="H") - with pytest.raises(ValueError, match="At least some time series in train\_data must have length"): + with pytest.raises(ValueError, match="Please provide longer time series as train"): predictor.fit(data, num_val_windows=num_val_windows, hyperparameters=TEST_HYPERPARAMETER_SETTINGS[0]) diff --git a/timeseries/tests/unittests/test_splitter.py b/timeseries/tests/unittests/test_splitter.py --- a/timeseries/tests/unittests/test_splitter.py +++ b/timeseries/tests/unittests/test_splitter.py @@ -13,7 +13,7 @@ def test_when_splitter_splits_then_cached_freq_is_preserved(): def test_when_splitter_splits_then_underlying_data_is_not_copied(): splitter = ExpandingWindowSplitter(prediction_length=3, num_val_windows=2) - original_df = DUMMY_VARIABLE_LENGTH_TS_DATAFRAME.copy() + original_df = DATAFRAME_WITH_STATIC.copy() for train_fold, val_fold in splitter.split(original_df): assert train_fold._is_view assert val_fold._is_view or val_fold.values.data == original_df.values.data
[timeseries] Add first-class support for missing values Currently, `TimeSeriesPredictor` deals with missing values in the data by first imputing them via forward-/backward-filling, and then training all models as if there are no missing values. This strategy may lead to poor accuracy on datasets with a large portion of missing values since training data will include regions with constant values arising from ffilling. A better alternative is to keep the missing values represented by `nan` in the data, and let models handle it. This requires following modifications to the code: - [x] Update metric implementations to handle missing values in target (#3966) - [x] Update preprocessing logic in `TimeSeriesPredictor._check_and_prepare_data_frame` - [x] Make sure that all models can handle missing values. This means, all models can train normally and produce forecasts with no NaN values, even if training data contains NaNs. - [x] GluonTS models (DeepAR, TFT, PatchTST, DLinear) - [ ] StatsForecast models (AutoETS, AutoARIMA, Theta, intermittent demand models) - [x] Baseline models (Naive, SeasonalNaive, Average, SeasonalAverage, Zero) - [x] MLForecast models (DirectTabular, RecursiveTabular)
2024-03-21T14:48:57Z
[]
[]
autogluon/autogluon
4,171
autogluon__autogluon-4171
[ "4133" ]
7e30e5fb15d39d1e5338d38ad234b408721346d7
diff --git a/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py b/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py --- a/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +++ b/timeseries/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py @@ -88,6 +88,27 @@ def __init__( self._train_target_median: Optional[float] = None self._non_boolean_real_covariates: List[str] = [] + @property + def tabular_predictor_path(self) -> str: + return os.path.join(self.path, "tabular_predictor") + + def save(self, path: str = None, verbose: bool = True) -> str: + assert "mean" in self._mlf.models_, "TabularPredictor must be trained before saving" + tabular_predictor = self._mlf.models_["mean"].predictor + self._mlf.models_["mean"].predictor = None + save_path = super().save(path=path, verbose=verbose) + self._mlf.models_["mean"].predictor = tabular_predictor + return save_path + + @classmethod + def load( + cls, path: str, reset_paths: bool = True, load_oof: bool = False, verbose: bool = True + ) -> "AbstractTimeSeriesModel": + model = super().load(path=path, reset_paths=reset_paths, load_oof=load_oof, verbose=verbose) + assert "mean" in model._mlf.models_, "Loaded model doesn't have a trained TabularPredictor" + model._mlf.models_["mean"].predictor = TabularPredictor.load(model.tabular_predictor_path) + return model + def preprocess(self, data: TimeSeriesDataFrame, is_train: bool = False, **kwargs) -> Any: if is_train: # All-NaN series are removed; partially-NaN series in train_data are handled inside _generate_train_val_dfs @@ -295,7 +316,7 @@ def _fit( estimator = TabularEstimator( predictor_init_kwargs={ - "path": os.path.join(self.path, "tabular_predictor"), + "path": self.tabular_predictor_path, "verbosity": verbosity - 2, "label": MLF_TARGET, **self._get_extra_tabular_init_kwargs(),
diff --git a/timeseries/tests/unittests/models/test_mlforecast.py b/timeseries/tests/unittests/models/test_mlforecast.py --- a/timeseries/tests/unittests/models/test_mlforecast.py +++ b/timeseries/tests/unittests/models/test_mlforecast.py @@ -1,3 +1,6 @@ +import os +import shutil +import tempfile from unittest import mock import numpy as np @@ -252,3 +255,25 @@ def test_given_train_data_has_nans_when_fit_called_then_nan_rows_removed_from_tr model.fit(train_data=data) train_df, val_df = model._generate_train_val_dfs(model.preprocess(data, is_train=True)) assert len(train_df) + len(val_df) == len(data.dropna()) + + [email protected]("model_type", TESTABLE_MODELS) [email protected]("eval_metric", ["WAPE", "WQL"]) +def test_when_trained_model_moved_to_different_folder_then_loaded_model_can_predict(model_type, eval_metric): + data = DUMMY_TS_DATAFRAME.copy().sort_index() + old_model_dir = tempfile.mkdtemp() + model = model_type( + path=old_model_dir, + freq=data.freq, + eval_metric=eval_metric, + quantile_levels=[0.1, 0.5, 0.9], + prediction_length=3, + hyperparameters={"differences": []}, + ) + model.fit(train_data=data) + model.save() + new_model_dir = tempfile.mkdtemp() + shutil.move(model.path, new_model_dir) + loaded_model = model_type.load(os.path.join(new_model_dir, model.name)) + predictions = loaded_model.predict(data) + assert isinstance(predictions, TimeSeriesDataFrame)
[BUG] TimeSeriesPredictor loading from different path than originally saved in fails for tabular predictors **Bug Report Checklist** <!-- Please ensure at least one of the following to help the developers troubleshoot the problem: --> - [x] I provided code that demonstrates a minimal reproducible example. <!-- Ideal, especially via source install --> - [ ] I confirmed bug exists on the latest mainline of AutoGluon via source install. <!-- Preferred --> - [X] I confirmed bug exists on the latest stable version of AutoGluon. <!-- Unnecessary if prior items are checked --> **Describe the bug** When training a TimeSeriesPredictor, we are copying the resulting model into blob storage and then downloading it from blob storage into a local directory at prediction time. All the non-tabular models work well, but when the best model is one of RecursiveTabular or DirectTabular, the TimeSeriesPredictor fails as it is unable to find the pkl ``` Traceback (most recent call last): File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/trainer/abstract_trainer.py", line 944, in get_model_pred_dict model_pred_dict[model_name] = self._predict_model( File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/trainer/abstract_trainer.py", line 874, in _predict_model return model.predict(data, known_covariates=known_covariates) File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/models/abstract/abstract_timeseries_model.py", line 298, in predict predictions = self._predict(data=data, known_covariates=known_covariates, **kwargs) File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/models/multi_window/multi_window_model.py", line 177, in _predict return self.most_recent_model.predict(data, known_covariates=known_covariates, **kwargs) File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/models/abstract/abstract_timeseries_model.py", line 298, in predict predictions = self._predict(data=data, known_covariates=known_covariates, **kwargs) File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/models/autogluon_tabular/mlforecast.py", line 467, in _predict raw_predictions = self._mlf.models_["mean"].predict(df) File "/usr/local/lib/python3.10/site-packages/autogluon/timeseries/models/autogluon_tabular/mlforecast.py", line 55, in predict return self.predictor.predict(X).values File "/usr/local/lib/python3.10/site-packages/autogluon/tabular/predictor/predictor.py", line 1931, in predict return self._learner.predict(X=data, model=model, as_pandas=as_pandas, transform_features=transform_features, decision_threshold=decision_threshold) File "/usr/local/lib/python3.10/site-packages/autogluon/tabular/learner/abstract_learner.py", line 208, in predict y_pred_proba = self.predict_proba( File "/usr/local/lib/python3.10/site-packages/autogluon/tabular/learner/abstract_learner.py", line 189, in predict_proba y_pred_proba = self.load_trainer().predict_proba(X, model=model) File "/usr/local/lib/python3.10/site-packages/autogluon/core/trainer/abstract_trainer.py", line 773, in predict_proba return self._predict_proba_model(X, model, cascade=cascade) File "/usr/local/lib/python3.10/site-packages/autogluon/core/trainer/abstract_trainer.py", line 2525, in _predict_proba_model return self.get_pred_proba_from_model(model=model, X=X, model_pred_proba_dict=model_pred_proba_dict, cascade=cascade) File "/usr/local/lib/python3.10/site-packages/autogluon/core/trainer/abstract_trainer.py", line 787, in get_pred_proba_from_model model_pred_proba_dict = self.get_model_pred_proba_dict(X=X, models=models, model_pred_proba_dict=model_pred_proba_dict, cascade=cascade) File "/usr/local/lib/python3.10/site-packages/autogluon/core/trainer/abstract_trainer.py", line 1022, in get_model_pred_proba_dict model = self.load_model(model_name=model_name) File "/usr/local/lib/python3.10/site-packages/autogluon/core/trainer/abstract_trainer.py", line 1651, in load_model return model_type.load(path=os.path.join(self.path, path), reset_paths=self.reset_paths) File "/usr/local/lib/python3.10/site-packages/autogluon/core/models/abstract/abstract_model.py", line 1096, in load model = load_pkl.load(path=file_path, verbose=verbose) File "/usr/local/lib/python3.10/site-packages/autogluon/common/loaders/load_pkl.py", line 43, in load with compression_fn_map[compression_fn]["open"](validated_path, "rb", **compression_fn_kwargs) as fin: FileNotFoundError: [Errno 2] No such file or directory: '/tmp/tmp4katc9my/models/DirectTabular/W2/tabular_predictor/models/LightGBM/model.pkl' ``` The temporary path which it is trying to load from is the path in which it was trained in another cloud instance. The way we are persisting these models is by uploading the full model directory, then downloading it into a new path, and attempting to call TimeSeriesPredictor.load(new_path). **Steps to reproduce** 1. Train a TimeSeriesPredictor with `DirectTabular` enabled 2. Move the model directory to a different location, ensuring the original path no longer exists 3. Load from the new directory 4. Predict using `DirectTabular` (or the `WeightedEnsemble`) The Predictor will try to load the model from the old directory. This issue only arises with the Tabular models. **Installed Versions** ``` INSTALLED VERSIONS ------------------ date : 2024-04-24 time : 12:29:19.525968 python : 3.10.13.final.0 OS : Linux OS-release : 5.15.0-1052-azure Version : #60-Ubuntu SMP Mon Nov 6 10:08:16 UTC 2023 machine : x86_64 processor : x86_64 num_cores : 32 cpu_ram_mb : 257926.4453125 cuda version : None num_gpus : 0 gpu_ram_mb : [] avail_disk_size_mb : 33082 accelerate : 0.21.0 autogluon : None autogluon.common : 1.1.0 autogluon.core : 1.1.0 autogluon.features : 1.1.0 autogluon.tabular : 1.1.0 autogluon.timeseries: 1.1.0 boto3 : 1.34.90 catboost : 1.2.5 fastai : None gluonts : 0.14.3 hyperopt : 0.2.7 imodels : None joblib : 1.4.0 lightgbm : 3.3.5 lightning : 2.1.4 matplotlib : 3.8.4 mlforecast : 0.10.0 networkx : 3.3 numpy : 1.25.2 onnxruntime-gpu : None optimum : None optimum-intel : None orjson : 3.10.1 pandas : 2.0.3 psutil : 5.9.8 pytorch-lightning : 2.1.4 ray : 2.10.0 requests : 2.31.0 scikit-learn : 1.4.0 scikit-learn-intelex: None scipy : 1.12.0 setuptools : 69.5.1 skl2onnx : None statsforecast : 1.4.0 tabpfn : None tensorboard : 2.16.2 torch : 2.1.2 tqdm : 4.66.2 transformers : 4.38.2 utilsforecast : 0.0.10 vowpalwabbit : None xgboost : 1.7.6 ``` Somewhere there is an absolute path being used instead of a relative path. Have you seen this issue before? I see for TabularPredictor there are some cloning for deployment methods, and also in autogluon.cloud there is functionality to persist models into s3. Is there a recommended way to do this on our own (using azure blob storage). Thank you
2024-05-04T17:35:28Z
[]
[]
autogluon/autogluon
4,202
autogluon__autogluon-4202
[ "4183" ]
7a394dc6951c062b3f975526118616ae108c42b4
diff --git a/timeseries/src/autogluon/timeseries/trainer/abstract_trainer.py b/timeseries/src/autogluon/timeseries/trainer/abstract_trainer.py --- a/timeseries/src/autogluon/timeseries/trainer/abstract_trainer.py +++ b/timeseries/src/autogluon/timeseries/trainer/abstract_trainer.py @@ -292,6 +292,10 @@ def __init__( self.cache_predictions = cache_predictions self.hpo_results = {} + if self._cached_predictions_path.exists(): + logger.debug(f"Removing existing cached predictions file {self._cached_predictions_path}") + self._cached_predictions_path.unlink() + def save_train_data(self, data: TimeSeriesDataFrame, verbose: bool = True) -> None: path = os.path.join(self.path_data, "train.pkl") save_pkl.save(path=path, object=data, verbose=verbose)
diff --git a/timeseries/tests/unittests/test_predictor.py b/timeseries/tests/unittests/test_predictor.py --- a/timeseries/tests/unittests/test_predictor.py +++ b/timeseries/tests/unittests/test_predictor.py @@ -1727,3 +1727,27 @@ def test_given_predictor_takes_known_only_when_feature_importance_called_with_im assert np.allclose(importance, 0, atol=1e-8) else: assert np.isfinite(importance) + + +def test_when_predictor_saved_to_same_directory_then_leaderboard_works(temp_model_path): + data = DUMMY_TS_DATAFRAME + old_predictor = TimeSeriesPredictor(path=temp_model_path).fit(data, hyperparameters={"Naive": {}}) + old_predictor.leaderboard(data) + + new_predictor = TimeSeriesPredictor(path=temp_model_path).fit(data, hyperparameters={"Average": {}}) + assert len(new_predictor.leaderboard(data)) == 1 + + +def test_when_predictor_saved_to_same_directory_and_loaded_then_number_of_models_matches(temp_model_path): + data = DUMMY_TS_DATAFRAME + old_predictor = TimeSeriesPredictor(path=temp_model_path).fit(data, hyperparameters={"Naive": {}, "Average": {}}) + old_predictor.leaderboard(data) + + hyperparameters = {"SeasonalNaive": {}, "SeasonalAverage": {}} + new_predictor = TimeSeriesPredictor(path=temp_model_path).fit(data, hyperparameters=hyperparameters) + loaded_predictor = TimeSeriesPredictor.load(temp_model_path) + assert ( + set(new_predictor.model_names()) + == set(loaded_predictor.model_names()) + == set(hyperparameters).union({"WeightedEnsemble"}) + )
networkx.exception.NetworkXError: The node ETS is not in the digraph. Traceback (most recent call last): File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/networkx/classes/digraph.py", line 927, in predecessors return iter(self._pred[n]) KeyError: 'ETS' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/leo/web3/LLM/langchain/mlts/auto_gluon.py", line 32, in <module> forecast_entry_df = predictor.predict(train_data) File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/autogluon/timeseries/predictor.py", line 845, in predict predictions = self._learner.predict( File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/autogluon/timeseries/learner.py", line 185, in predict return self.load_trainer().predict( File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/autogluon/timeseries/trainer/abstract_trainer.py", line 892, in predict model_pred_dict = self.get_model_pred_dict( File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/autogluon/timeseries/trainer/abstract_trainer.py", line 1199, in get_model_pred_dict pred_time_dict_total = self._get_total_pred_time_from_marginal(pred_time_dict_marginal) File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/autogluon/timeseries/trainer/abstract_trainer.py", line 1211, in _get_total_pred_time_from_marginal for base_model in self.get_minimum_model_set(model_name): File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/autogluon/timeseries/trainer/abstract_trainer.py", line 174, in get_minimum_model_set minimum_model_set = list(nx.bfs_tree(self.model_graph, model, reverse=True)) File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/breadth_first_search.py", line 235, in bfs_tree T.add_edges_from(edges_gen) File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/networkx/classes/digraph.py", line 768, in add_edges_from for e in ebunch_to_add: File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/breadth_first_search.py", line 170, in bfs_edges yield from generic_bfs_edges(G, source, successors, depth_limit, sort_neighbors) File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/breadth_first_search.py", line 77, in generic_bfs_edges queue = deque([(source, depth_limit, neighbors(source))]) File "/Users/leo/web3/LLM/langchain/venv/lib/python3.10/site-packages/networkx/classes/digraph.py", line 929, in predecessors raise NetworkXError(f"The node {n} is not in the digraph.") from err networkx.exception.NetworkXError: The node ETS is not in the digraph.
Hello @LeonTing1010, Can you please provide additional details, such as a reproducible code example and the AutoGluon version used? Getting the same error but for DeepAR model. Here are the logs: `Warning: path already exists! This predictor may overwrite an existing predictor! path="output_model_2" Beginning AutoGluon training... AutoGluon will save models to 'output_model_2' =================== System Info =================== AutoGluon Version: 1.1.0 Python Version: 3.11.0 Operating System: Linux Platform Machine: x86_64 Platform Version: #18~22.04.1-Ubuntu SMP Fri Apr 5 17:44:33 UTC 2024 CPU Count: 8 GPU Count: 0 Memory Avail: 24.42 GB [/](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/) 31.05 GB (78.6%) Disk Space Avail: 6.77 GB [/](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/) 48.28 GB (14.0%) WARNING: Available disk space is low and there is a risk that AutoGluon will run out of disk during fit, causing an exception. We recommend a minimum available disk space of 10 GB, and large datasets may require more. =================================================== Setting presets to: best_quality Fitting with arguments: {'enable_ensemble': True, 'eval_metric': MAPE, 'freq': 'W-MON', 'hyperparameters': 'default', 'known_covariates_names': [], 'num_val_windows': 10, 'prediction_length': 16, 'quantile_levels': [0.1, 0.5, 0.9], 'random_seed': 123, 'refit_every_n_windows': 1, 'refit_full': True, 'skip_model_selection': False, 'target': 'ship_qty', 'verbosity': 2} Provided train_data has 373779 rows, 1071 time series. Median time series length is 349 (min=349, max=349). Provided data contains following columns: target: 'ship_qty' past_covariates: categorical: ['category', 'market_code'] continuous (float): ['market_size_sales', 'stock_days', 'product_consumption', 'weighted_avg_product_price', 'price_gap_1', 'covid', ...] To learn how to fix incorrectly inferred types, please see documentation for TimeSeriesPredictor.fit AutoGluon will gauge predictive performance using evaluation metric: 'MAPE' This metric's sign has been flipped to adhere to being higher_is_better. The metric score can be multiplied by -1 to get the metric value. =================================================== Starting training. Start time is 2024-05-10 11:50:25 Models that will be trained: ['SeasonalNaive', 'RecursiveTabular', 'DirectTabular', 'CrostonSBA', 'NPTS', 'DynamicOptimizedTheta', 'AutoETS', 'AutoARIMA', 'Chronos[base]', 'TemporalFusionTransformer', 'DeepAR', 'PatchTST'] Training timeseries model SeasonalNaive. -1.9552 = Validation score (-MAPE) 17.19 s = Training runtime 1.04 s = Validation (prediction) runtime Training timeseries model RecursiveTabular. -1.7324 = Validation score (-MAPE) 3158.88 s = Training runtime 1.06 s = Validation (prediction) runtime Training timeseries model DirectTabular. -2.2581 = Validation score (-MAPE) 54.85 s = Training runtime 1.15 s = Validation (prediction) runtime Training timeseries model CrostonSBA. -1.9592 = Validation score (-MAPE) 32.32 s = Training runtime 1.63 s = Validation (prediction) runtime Training timeseries model NPTS. -1.5177 = Validation score (-MAPE) 124.04 s = Training runtime 16.45 s = Validation (prediction) runtime Training timeseries model DynamicOptimizedTheta. -2.1870 = Validation score (-MAPE) 193.77 s = Training runtime 32.40 s = Validation (prediction) runtime Training timeseries model AutoETS. -1.8336 = Validation score (-MAPE) 72.40 s = Training runtime 4.96 s = Validation (prediction) runtime Training timeseries model AutoARIMA. Warning: AutoARIMA/W4 failed for 12 time series (1.1%). Fallback model SeasonalNaive was used for these time series. -1.4207 = Validation score (-MAPE) 673.52 s = Training runtime 94.51 s = Validation (prediction) runtime Training timeseries model Chronos[base]. Warning: Exception caused Chronos[base] to fail during training... Skipping this model. Chronos[base]/W0 requires a GPU to run, but no GPU was detected. Please make sure that you are using a computer with a CUDA-compatible GPU and `import torch; torch.cuda.is_available()` returns `True`. Training timeseries model TemporalFusionTransformer. Warning: Exception caused TemporalFusionTransformer to fail during training... Skipping this model. Could not deserialize ATN with version  (expected 4). Training timeseries model DeepAR. Warning: Exception caused DeepAR to fail during training... Skipping this model. Could not deserialize ATN with version  (expected 4). Training timeseries model PatchTST. Warning: Exception caused PatchTST to fail during training... Skipping this model. Could not deserialize ATN with version  (expected 4). Fitting simple weighted ensemble. Ensemble weights: {'AutoARIMA': 0.59, 'NPTS': 0.41} -1.3364 = Validation score (-MAPE) 44.60 s = Training runtime 110.96 s = Validation (prediction) runtime Training complete. Models trained: ['SeasonalNaive', 'RecursiveTabular', 'DirectTabular', 'CrostonSBA', 'NPTS', 'DynamicOptimizedTheta', 'AutoETS', 'AutoARIMA', 'WeightedEnsemble'] Total runtime: 4539.08 s Best model: WeightedEnsemble Best model score: -1.3364 WARNING: refit_full functionality for TimeSeriesPredictor is experimental and is not yet supported by all models. Refitting models via `refit_full` using all of the data (combined train and validation)... Models trained in this way will have the suffix '_FULL' and have NaN validation score. This process is not bound by time_limit, but should take less time than the original `fit` call. Fitting model: SeasonalNaive_FULL | Skipping fit via cloning parent ... Fitting model: RecursiveTabular_FULL 341.75 s = Training runtime Fitting model: DirectTabular_FULL 4.26 s = Training runtime Fitting model: CrostonSBA_FULL | Skipping fit via cloning parent ... Fitting model: NPTS_FULL | Skipping fit via cloning parent ... Fitting model: DynamicOptimizedTheta_FULL | Skipping fit via cloning parent ... Fitting model: AutoETS_FULL | Skipping fit via cloning parent ... Fitting model: AutoARIMA_FULL | Skipping fit via cloning parent ... Fitting model: WeightedEnsemble_FULL | Skipping fit via cloning parent ... Refit complete. Models trained: ['SeasonalNaive_FULL', 'RecursiveTabular_FULL', 'DirectTabular_FULL', 'CrostonSBA_FULL', 'NPTS_FULL', 'DynamicOptimizedTheta_FULL', 'AutoETS_FULL', 'AutoARIMA_FULL', 'WeightedEnsemble_FULL'] Total runtime: 346.19 s Updated best model to 'WeightedEnsemble_FULL' (Previously 'WeightedEnsemble'). AutoGluon will default to using 'WeightedEnsemble_FULL' for predict(). Model not specified in predict, will default to the model with the best validation score: WeightedEnsemble_FULL` And here is the error when predicting: `--------------------------------------------------------------------------- KeyError Traceback (most recent call last) File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:936](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:936), in DiGraph.predecessors(self, n) [935](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:935) try: --> [936](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:936) return iter(self._pred[n]) [937](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:937) except KeyError as err: KeyError: 'DeepAR_FULL' The above exception was the direct cause of the following exception: NetworkXError Traceback (most recent call last) Cell In[10], [line 27](vscode-notebook-cell:?execution_count=10&line=27) [10](vscode-notebook-cell:?execution_count=10&line=10) predictor = TimeSeriesPredictor( [11](vscode-notebook-cell:?execution_count=10&line=11) freq=frequency, [12](vscode-notebook-cell:?execution_count=10&line=12) prediction_length=16, (...) [16](vscode-notebook-cell:?execution_count=10&line=16) quantile_levels=[0.1, 0.5, 0.9] [17](vscode-notebook-cell:?execution_count=10&line=17) ) [19](vscode-notebook-cell:?execution_count=10&line=19) predictor.fit( [20](vscode-notebook-cell:?execution_count=10&line=20) train_data, [21](vscode-notebook-cell:?execution_count=10&line=21) presets=train_preset_type, (...) [24](vscode-notebook-cell:?execution_count=10&line=24) [25](vscode-notebook-cell:?execution_count=10&line=25) ) ---> [27](vscode-notebook-cell:?execution_count=10&line=27) predictions = predictor.predict( [28](vscode-notebook-cell:?execution_count=10&line=28) data=train_data, [29](vscode-notebook-cell:?execution_count=10&line=29) ) [31](vscode-notebook-cell:?execution_count=10&line=31) predictions_1 = predictions.reset_index() [34](vscode-notebook-cell:?execution_count=10&line=34) model_name = "AutoGluon" File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:845](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:845), in TimeSeriesPredictor.predict(self, data, known_covariates, model, use_cache, random_seed) [843](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:843) if known_covariates is not None: [844](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:844) known_covariates = self._to_data_frame(known_covariates) --> [845](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:845) predictions = self._learner.predict( [846](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:846) data, [847](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:847) known_covariates=known_covariates, [848](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:848) model=model, [849](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:849) use_cache=use_cache, [850](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:850) random_seed=random_seed, [851](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:851) ) [852](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/predictor.py:852) return predictions.reindex(original_item_id_order, level=ITEMID) File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:185](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:185), in TimeSeriesLearner.predict(self, data, known_covariates, model, use_cache, random_seed, **kwargs) [183](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:183) known_covariates = self.feature_generator.transform_future_known_covariates(known_covariates) [184](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:184) known_covariates = self._align_covariates_with_forecast_index(known_covariates=known_covariates, data=data) --> [185](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:185) return self.load_trainer().predict( [186](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:186) data=data, [187](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:187) known_covariates=known_covariates, [188](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:188) model=model, [189](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:189) use_cache=use_cache, [190](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:190) random_seed=random_seed, [191](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:191) **kwargs, [192](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/learner.py:192) ) File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:892](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:892), in AbstractTimeSeriesTrainer.predict(self, data, known_covariates, model, use_cache, random_seed, **kwargs) [882](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:882) def predict( [883](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:883) self, [884](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:884) data: TimeSeriesDataFrame, (...) [889](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:889) **kwargs, [890](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:890) ) -> TimeSeriesDataFrame: [891](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:891) model_name = self._get_model_for_prediction(model) --> [892](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:892) model_pred_dict = self.get_model_pred_dict( [893](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:893) model_names=[model_name], [894](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:894) data=data, [895](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:895) known_covariates=known_covariates, [896](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:896) use_cache=use_cache, [897](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:897) random_seed=random_seed, [898](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:898) ) [899](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:899) return model_pred_dict[model_name] File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1199](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1199), in AbstractTimeSeriesTrainer.get_model_pred_dict(self, model_names, data, known_covariates, record_pred_time, raise_exception_if_failed, use_cache, random_seed) [1195](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1195) if self.cache_predictions and use_cache: [1196](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1196) self._save_cached_pred_dicts( [1197](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1197) dataset_hash, model_pred_dict=model_pred_dict, pred_time_dict=pred_time_dict_marginal [1198](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1198) ) -> [1199](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1199) pred_time_dict_total = self._get_total_pred_time_from_marginal(pred_time_dict_marginal) [1201](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1201) final_model_pred_dict = {model_name: model_pred_dict[model_name] for model_name in model_names} [1202](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1202) final_pred_time_dict_total = {model_name: pred_time_dict_total[model_name] for model_name in model_names} File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1211](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1211), in AbstractTimeSeriesTrainer._get_total_pred_time_from_marginal(self, pred_time_dict_marginal) [1209](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1209) pred_time_dict_total = defaultdict(float) [1210](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1210) for model_name in pred_time_dict_marginal.keys(): -> [1211](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1211) for base_model in self.get_minimum_model_set(model_name): [1212](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1212) if pred_time_dict_marginal[base_model] is not None: [1213](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:1213) pred_time_dict_total[model_name] += pred_time_dict_marginal[base_model] File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:174](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:174), in SimpleAbstractTrainer.get_minimum_model_set(self, model, include_self) [172](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:172) if not isinstance(model, str): [173](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:173) model = model.name --> [174](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:174) minimum_model_set = list(nx.bfs_tree(self.model_graph, model, reverse=True)) [175](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:175) if not include_self: [176](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/autogluon/timeseries/trainer/abstract_trainer.py:176) minimum_model_set = [m for m in minimum_model_set if m != model] File <class 'networkx.utils.decorators.argmap'> compilation 4:3, in argmap_bfs_tree_1(G, source, reverse, depth_limit, sort_neighbors, backend, **backend_kwargs) [ compilation 4:1'>1](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/%3Cclass) import bz2 [ compilation 4:2'>2](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/%3Cclass) import collections ----> [ compilation 4:3'>3](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/%3Cclass) import gzip [ compilation 4:4'>4](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/%3Cclass) import inspect [ compilation 4:5'>5](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/%3Cclass) import itertools File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:633](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:633), in _dispatchable.__call__(self, backend, *args, **kwargs) [628](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:628) """Returns the result of the original function, or the backend function if [629](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:629) the backend is specified and that backend implements `func`.""" [631](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:631) if not backends: [632](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:632) # Fast path if no backends are installed --> [633](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:633) return self.orig_func(*args, **kwargs) [635](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:635) # Use `backend_name` in this function instead of `backend` [636](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/utils/backends.py:636) backend_name = backend File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:285](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:285), in bfs_tree(G, source, reverse, depth_limit, sort_neighbors) [277](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:277) T.add_node(source) [278](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:278) edges_gen = bfs_edges( [279](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:279) G, [280](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:280) source, (...) [283](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:283) sort_neighbors=sort_neighbors, [284](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:284) ) --> [285](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:285) T.add_edges_from(edges_gen) [286](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:286) return T File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:774](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:774), in DiGraph.add_edges_from(self, ebunch_to_add, **attr) [719](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:719) def add_edges_from(self, ebunch_to_add, **attr): [720](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:720) """Add all the edges in ebunch_to_add. [721](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:721) [722](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:722) Parameters (...) [772](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:772) >>> G.add_edges_from(list((5, n) for n in G.nodes)) [773](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:773) """ --> [774](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:774) for e in ebunch_to_add: [775](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:775) ne = len(e) [776](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:776) if ne == 3: File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:218](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:218), in bfs_edges(G, source, reverse, depth_limit, sort_neighbors) [214](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:214) yield from generic_bfs_edges( [215](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:215) G, source, lambda node: iter(sort_neighbors(successors(node))), depth_limit [216](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:216) ) [217](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:217) else: --> [218](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:218) yield from generic_bfs_edges(G, source, successors, depth_limit) File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:117](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:117), in generic_bfs_edges(G, source, neighbors, depth_limit, sort_neighbors) [115](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:115) n = len(G) [116](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:116) depth = 0 --> [117](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:117) next_parents_children = [(source, neighbors(source))] [118](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:118) while next_parents_children and depth < depth_limit: [119](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/breadth_first_search.py:119) this_parents_children = next_parents_children File [~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:938](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:938), in DiGraph.predecessors(self, n) [936](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:936) return iter(self._pred[n]) [937](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:937) except KeyError as err: --> [938](https://vscode-remote+ssh-002dremote-002bhackathon-005fmachine-005f01.vscode-resource.vscode-cdn.net/home/ubuntu/vamshi/ml-platform/~/vamshi/ml-platform/.venv/lib/python3.11/site-packages/networkx/classes/digraph.py:938) raise NetworkXError(f"The node {n} is not in the digraph.") from err NetworkXError: The node DeepAR_FULL is not in the digraph.` @vamshik113 according to the log, you are saving the predictor to a directory that already contains a trained predictor. Can you please try removing the directory or providing a new `path` to the predictor and see if the problem persists? @shchur Even if they are saving to an existing predictor location, shouldn't the logic work correctly without issue? Only way I'd foresee an error is if 1. The old predictor.pkl / trainer.pkl / learner.pkl isn't overwritten, 2. The new model files do not overwrite the old ones, 3. We have logic that looks for the existance of files to determine what to do next, which introduces bugs when re-using the same save location as an old run. @shchur Yes, figured it out that the issue is due to the existing directory with already trained predictors. After deleting, the issue was resolved.
2024-05-15T19:28:25Z
[]
[]
autogluon/autogluon
4,203
autogluon__autogluon-4203
[ "4161" ]
7a394dc6951c062b3f975526118616ae108c42b4
diff --git a/tabular/src/autogluon/tabular/predictor/predictor.py b/tabular/src/autogluon/tabular/predictor/predictor.py --- a/tabular/src/autogluon/tabular/predictor/predictor.py +++ b/tabular/src/autogluon/tabular/predictor/predictor.py @@ -214,7 +214,7 @@ class TabularPredictor(TabularPredictorDeprecatedMixin): Dataset = TabularDataset predictor_file_name = "predictor.pkl" - _predictor_version_file_name = "__version__" + _predictor_version_file_name = "version.txt" _predictor_metadata_file_name = "metadata.json" _predictor_log_file_name = "predictor_log.txt" @@ -406,7 +406,7 @@ def fit( num_cpus: int | str = "auto", num_gpus: int | str = "auto", **kwargs, - ): + ) -> "TabularPredictor": """ Fit models to predict a column of a data table (label) based on the other columns (features). @@ -4147,24 +4147,44 @@ def _set_post_fit_vars(self, learner: AbstractTabularLearner = None): self._trainer: AbstractTrainer = self._learner.load_trainer() # Trainer object @classmethod - def _load_version_file(cls, path) -> str: + def _load_version_file(cls, path: str) -> str: + """ + Loads the version file that is part of the saved predictor artifact. + The version file contains a string matching `predictor._learner.version`. + + Parameters + ---------- + path: str + The path that would be used to load the predictor via `predictor.load(path)` + + Returns + ------- + The version of AutoGluon used to fit the predictor, as a string. + + """ version_file_path = os.path.join(path, cls._predictor_version_file_name) - version = load_str.load(path=version_file_path) + try: + version = load_str.load(path=version_file_path) + except: + # Loads the old version file used in `autogluon.tabular<=1.1.0`, named `__version__`. + # This file name was changed because Kaggle does not allow uploading files named `__version__`. + version_file_path = os.path.join(path, "__version__") + version = load_str.load(path=version_file_path) return version @classmethod - def _load_metadata_file(cls, path: str, silent=True): + def _load_metadata_file(cls, path: str, silent: bool = True): metadata_file_path = os.path.join(path, cls._predictor_metadata_file_name) return load_json.load(path=metadata_file_path, verbose=not silent) - def _save_version_file(self, silent=False): + def _save_version_file(self, silent: bool = False): from ..version import __version__ version_file_contents = f"{__version__}" version_file_path = os.path.join(self.path, self._predictor_version_file_name) save_str.save(path=version_file_path, data=version_file_contents, verbose=not silent) - def _save_metadata_file(self, silent=False): + def _save_metadata_file(self, silent: bool = False): """ Save metadata json file to disk containing information such as python version, autogluon version, installed packages, operating system, etc. @@ -4177,7 +4197,7 @@ def _save_metadata_file(self, silent=False): if not silent: logger.log(15, f"Saving {metadata_file_path}") - def save(self, silent=False): + def save(self, silent: bool = False): """ Save this Predictor to file in directory specified by this Predictor's `path`. Note that :meth:`TabularPredictor.fit` already saves the predictor object automatically @@ -4206,7 +4226,7 @@ def save(self, silent=False): logger.log(20, f'TabularPredictor saved. To load, use: predictor = TabularPredictor.load("{self.path}")') @classmethod - def _load(cls, path: str): + def _load(cls, path: str) -> "TabularPredictor": """ Inner load method, called in `load`. """ @@ -4216,7 +4236,14 @@ def _load(cls, path: str): return predictor @classmethod - def load(cls, path: str, verbosity: int = None, require_version_match: bool = True, require_py_version_match: bool = True, check_packages: bool = False): + def load( + cls, + path: str, + verbosity: int = None, + require_version_match: bool = True, + require_py_version_match: bool = True, + check_packages: bool = False, + ) -> "TabularPredictor": """ Load a TabularPredictor object previously produced by `fit()` from file and returns this object. It is highly recommended the predictor be loaded with the exact AutoGluon version it was fit with. @@ -4240,6 +4267,15 @@ def load(cls, path: str, verbosity: int = None, require_version_match: bool = Tr check_packages : bool, default = False If True, checks package versions of the loaded predictor against the package versions of the current environment. Warnings will be logged for each mismatch of package version. + + Returns + ------- + predictor : TabularPredictor + + Examples + -------- + >>> predictor = TabularPredictor.load(path_to_predictor) + """ if verbosity is not None: set_logger_verbosity(verbosity) # Reset logging after load (may be in new Python session) @@ -4259,7 +4295,7 @@ def load(cls, path: str, verbosity: int = None, require_version_match: bool = Tr except: logger.warning( f'WARNING: Could not find version file at "{os.path.join(path, cls._predictor_version_file_name)}".\n' - f"This means that the predictor was fit in a version `<=0.3.1`." + f"This means that the predictor was fit in an AutoGluon version `<=0.3.1`." ) version_saved = None @@ -4326,8 +4362,8 @@ def load_log(cls, predictor_path: str = None, log_file_path: Optional[str] = Non If you specified a `log_file_path` while initializing the predictor, you should use `log_file_path` to load the log file instead. At least one of `predictor_path` or `log_file_path` must to be specified - Return - ------ + Returns + ------- List[str] A list containing lines of the log file """ @@ -4721,7 +4757,7 @@ def _sanitize_stack_args( # TODO: Add .delete() method to easily clean-up clones? # Would need to be careful that user doesn't delete important things accidentally. # TODO: Add .save_zip() and load_zip() methods to pack and unpack artifacts into a single file to simplify deployment code? - def clone(self, path: str, *, return_clone: bool = False, dirs_exist_ok: bool = False): + def clone(self, path: str, *, return_clone: bool = False, dirs_exist_ok: bool = False) -> str | "TabularPredictor": """ Clone the predictor and all of its artifacts to a new location on local disk. This is ideal for use-cases where saving a snapshot of the predictor is desired before performing @@ -4753,7 +4789,7 @@ def clone(self, path: str, *, return_clone: bool = False, dirs_exist_ok: bool = ) return self.__class__.load(path=path_clone) if return_clone else path_clone - def clone_for_deployment(self, path: str, *, model: str = "best", return_clone: bool = False, dirs_exist_ok: bool = False): + def clone_for_deployment(self, path: str, *, model: str = "best", return_clone: bool = False, dirs_exist_ok: bool = False) -> str | "TabularPredictor": """ Clone the predictor and all of its artifacts to a new location on local disk, then delete the clones artifacts unnecessary during prediction. diff --git a/timeseries/src/autogluon/timeseries/predictor.py b/timeseries/src/autogluon/timeseries/predictor.py --- a/timeseries/src/autogluon/timeseries/predictor.py +++ b/timeseries/src/autogluon/timeseries/predictor.py @@ -146,7 +146,7 @@ class TimeSeriesPredictor(TimeSeriesPredictorDeprecatedMixin): """ predictor_file_name = "predictor.pkl" - _predictor_version_file_name = "__version__" + _predictor_version_file_name = "version.txt" _predictor_log_file_name = "predictor_log.txt" def __init__( @@ -1041,8 +1041,27 @@ def feature_importance( @classmethod def _load_version_file(cls, path: str) -> str: + """ + Loads the version file that is part of the saved predictor artifact. + + Parameters + ---------- + path: str + The path that would be used to load the predictor via `predictor.load(path)` + + Returns + ------- + The version of AutoGluon used to fit the predictor, as a string. + + """ version_file_path = os.path.join(path, cls._predictor_version_file_name) - version = load_str.load(path=version_file_path) + try: + version = load_str.load(path=version_file_path) + except: + # Loads the old version file used in `autogluon.timeseries<=1.1.0`, named `__version__`. + # This file name was changed because Kaggle does not allow uploading files named `__version__`. + version_file_path = os.path.join(path, "__version__") + version = load_str.load(path=version_file_path) return version @classmethod @@ -1077,7 +1096,7 @@ def load(cls, path: Union[str, Path], require_version_match: bool = True) -> "Ti except: logger.warning( f'WARNING: Could not find version file at "{os.path.join(path, cls._predictor_version_file_name)}".\n' - f"This means that the predictor was fit in a version `<=0.7.0`." + f"This means that the predictor was fit in an AutoGluon version `<=0.7.0`." ) version_saved = "Unknown (Likely <=0.7.0)"
diff --git a/tabular/tests/unittests/test_tabular.py b/tabular/tests/unittests/test_tabular.py --- a/tabular/tests/unittests/test_tabular.py +++ b/tabular/tests/unittests/test_tabular.py @@ -35,7 +35,7 @@ from autogluon.common.utils.simulation_utils import convert_simulation_artifacts_to_tabular_predictions_dict from autogluon.core.constants import BINARY, MULTICLASS, PROBLEM_TYPES_CLASSIFICATION, QUANTILE, REGRESSION from autogluon.core.utils import download, unzip -from autogluon.tabular import TabularDataset, TabularPredictor +from autogluon.tabular import TabularDataset, TabularPredictor, __version__ from autogluon.tabular.configs.hyperparameter_configs import get_hyperparameter_config PARALLEL_LOCAL_BAGGING = "parallel_local" @@ -149,6 +149,10 @@ def test_advanced_functionality(): shutil.rmtree(savedir, ignore_errors=True) # Delete AutoGluon output directory to ensure previous runs' information has been removed. savedir_predictor_original = savedir + "predictor/" predictor: TabularPredictor = TabularPredictor(label=label, path=savedir_predictor_original).fit(train_data) + + version_in_file = predictor._load_version_file(path=predictor.path) + assert version_in_file == __version__ + leaderboard = predictor.leaderboard(data=test_data) # test metric_error leaderboard
__version__ file not uploadable to Kaggle [User Report](https://www.kaggle.com/competitions/playground-series-s4e5/discussion/499495#2789917) When uploading a trained AutoGluon TabularPredictor artifact to Kaggle, the `__version__` file in the metadata is not able to be uploaded, because Kaggle for some unknown reason does not allow this specific file name to be uploaded. This results in a confusing warning message when a user tries to then load the predictor via `TabularPredictor.load`: ``` WARNING: Could not find version file at "/kaggle/input/test-ag/ag-20230301_033436/__version__". This means that the predictor was fit in a version `<=0.3.1`. ``` To resolve, we can rename the file from `__version__` to `version.txt`, or some equivalent. - [x] Ensure backwards compatibility, check for `__version__` if `version.txt` doesn't exist. - [x] Check if the same problem exists for TimeSeries and Mulitmodal: Yes, fixed in #4203 for TimeSeries and Tabular. Multimodal does not check version on load.
2024-05-15T23:12:49Z
[]
[]
mindsdb/mindsdb
86
mindsdb__mindsdb-86
[ "84" ]
3ee4e40a2e16a4e16b43df4a95e5163d105e22f7
diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -127,6 +127,12 @@ def get_column_data_type(self, data): # assume that the type is the one with the most prevelant type_dist for data_type in type_dist: + # If any of the members are text, use that data type, since otherwise the model will crash when casting + if data_type == DATA_TYPES.TEXT: + pass + curr_data_type = DATA_TYPES.TEXT + print(curr_data_type) + break if type_dist[data_type] > max_data_type: curr_data_type = data_type max_data_type = type_dist[data_type] @@ -298,6 +304,7 @@ def _compute_z_score(self, stats, columns, col_name): if stats[col_name][KEYS.DATA_TYPE] != DATA_TYPES.NUMERIC: return {} + print(col_name, len(columns[col_name])) z_scores = list(map(abs,(st.zscore(columns[col_name])))) threshold = 3 z_score_outlier_indexes = [i for i in range(len(z_scores)) if z_scores[i] > threshold] @@ -521,6 +528,7 @@ def run(self): column_count[column] += 1 stats = {} + col_data_dict = {} for i, col_name in enumerate(non_null_data): col_data = non_null_data[col_name] # all rows in just one column full_col_data = all_sampled_data[col_name] @@ -556,7 +564,6 @@ def run(self): if value != '' and value != '\r' and value != '\n': newData.append(value) - col_data = [clean_float(i) for i in newData if str(i) not in ['', str(None), str(False), str(np.nan), 'NaN', 'nan', 'NA']] y, x = np.histogram(col_data, 50, density=False) @@ -669,15 +676,15 @@ def run(self): stats[col_name]['column'] = col_name stats[col_name]['empty_cells'] = empty_count[col_name] stats[col_name]['empty_percentage'] = empty_count[col_name] * 100 / column_count[col_name] - + col_data_dict[col_name] = col_data for i, col_name in enumerate(all_sampled_data): stats[col_name].update(self._compute_duplicates_score(stats, all_sampled_data, col_name)) stats[col_name].update(self._compute_empty_cells_score(stats, all_sampled_data, col_name)) stats[col_name].update(self._compute_clf_based_correlation_score(stats, all_sampled_data, col_name)) stats[col_name].update(self._compute_data_type_dist_score(stats, all_sampled_data, col_name)) - stats[col_name].update(self._compute_z_score(stats, all_sampled_data, col_name)) - stats[col_name].update(self._compute_lof_score(stats, all_sampled_data, col_name)) + stats[col_name].update(self._compute_z_score(stats, col_data_dict, col_name)) + stats[col_name].update(self._compute_lof_score(stats, col_data_dict, col_name)) stats[col_name].update(self._compute_similariy_score(stats, all_sampled_data, col_name)) stats[col_name].update(self._compute_value_distribution_score(stats, all_sampled_data, col_name))
diff --git a/integration_testing/run_a_file.py b/integration_testing/run_a_file.py --- a/integration_testing/run_a_file.py +++ b/integration_testing/run_a_file.py @@ -1,18 +1,7 @@ -import mindsdb +from mindsdb import * - -mdb = mindsdb.MindsDB(send_logs=False) - -mdb.learn( - from_data='marvel_wiki.csv', - predict='FIRST_APPEARANCE', - model_name='run_a_file' +MindsDB().learn( + from_data="train.csv", + predict='Survived', + model_name='titanic_model' ) -print('!------------- Learning ran successfully -------------!') - -''': -features = {} -result = mdb.predict(when=features, model_name='run_a_file') -print(result) -''' -print('!------------- Prediction from file ran successfully -------------!')
Error while training model on a kaggle dataset **Describe the bug** Error `"could not convert string to float: 'SC/PARIS 2131'"` while training the model on Kaggle dataset for beginner competition, Titanic: Machine Learning from disaster. **To Reproduce** Steps to reproduce the behavior: 1. Download the train.csv from [here](https://www.kaggle.com/c/titanic/data). 2. Run ``` from mindsdb import * MindsDB().learn( from_data="train.csv", predict='Survived', model_name='titanic_model' ) ``` 3. See error. **Expected behavior** The best-trained model should be returned. **Screenshots** ![ss](https://user-images.githubusercontent.com/21312552/52881645-2277c300-318b-11e9-8c77-0f8f2686d25f.png) **Desktop (please complete the following information):** - OS: Ubuntu 18.04 Kubuntu - Used on Jupyter notebook **Additional context** The error seems to be at StatsGenerator step on the 'Ticket' column of the dataset.
thank you @ayush9398 we will review this dataset.
2019-02-18T00:14:25Z
[]
[]
mindsdb/mindsdb
133
mindsdb__mindsdb-133
[ "125" ]
1d496341fc75e4ec9d9e9511f5dd8074159cdc76
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -178,6 +178,18 @@ def _create_ludwig_dataframe(self, mode): unix_ts = parse_datetime(self.transaction.input_data.data_array[row_ind][col_ind]).timestamp() data[col].append(unix_ts) + elif data_subtype in (DATA_SUBTYPES.FLOAT): + if type(self.transaction.input_data.data_array[row_ind][col_ind]) == str: + data[col].append(float(self.transaction.input_data.data_array[row_ind][col_ind].replace(',','.'))) + else: + data[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) + + elif data_subtype in (DATA_SUBTYPES.INT): + if type(self.transaction.input_data.data_array[row_ind][col_ind]) == str: + data[col].append(round(float(self.transaction.input_data.data_array[row_ind][col_ind].replace(',','.')))) + else: + data[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) + else: data[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -1,6 +1,5 @@ - +import shutil import os - import _thread import uuid import traceback @@ -63,28 +62,28 @@ def __init__(self, name, root_folder=CONFIG.MINDSDB_STORAGE_PATH, log_level=CONF error_message = '''Cannot write into storage path, please either set the config variable mindsdb.config.set('MINDSDB_STORAGE_PATH',<path>) or give write access to {folder}''' raise ValueError(error_message.format(folder=CONFIG.MINDSDB_STORAGE_PATH)) - - - - def export(self, model_zip_file): + def export(self, model_zip_file='mindsdb_storage'): """ If you want to export this mind to a file :param model_zip_file: this is the full_path where you want to store a mind to, it will be a zip file :return: bool (True/False) True if mind was exported successfully """ - pass + try: + shutil.make_archive(model_zip_file, 'zip', CONFIG.MINDSDB_STORAGE_PATH) + return True + except: + return False - def load(self, model_zip_file): + def load(self, model_zip_file='mindsdb_storage.zip'): """ If you want to import a mind from a file :param model_zip_file: this is the full_path that contains your mind :return: bool (True/False) True if mind was importerd successfully """ - pass - + shutil.unpack_archive(model_zip_file, extract_dir=CONFIG.MINDSDB_STORAGE_PATH) def learn(self, to_predict, from_data = None, test_from_data=None, group_by = None, window_size_samples = None, window_size_seconds = None, window_size = None, order_by = [], sample_margin_of_error = CONFIG.DEFAULT_MARGIN_OF_ERROR, ignore_columns = [], rename_strange_columns = False, diff --git a/mindsdb/libs/data_types/transaction_output_data.py b/mindsdb/libs/data_types/transaction_output_data.py --- a/mindsdb/libs/data_types/transaction_output_data.py +++ b/mindsdb/libs/data_types/transaction_output_data.py @@ -10,116 +10,20 @@ def __init__(self): class PredictTransactionOutputData(): - def __init__(self, transaction = None, data = {}, evaluations = {}, ): self.data = data - self.columns = None self.evaluations = evaluations self.transaction = transaction def __iter__(self): - self.columns = self.transaction.persistent_model_metadata.columns - first_col = self.columns[0] - - for i, cell in enumerate(self.data[first_col]): + for i, cell in enumerate(self.data[self.transaction.persistent_model_metadata.columns[0]]): yield TransactionOutputRow(self, i).as_dict() def __getitem__(self, item): return TransactionOutputRow(self, item) - def __str__(self): return str(self.data) - @property - def predicted_values(self): - """ - Legacy method, we should remove but for now so that things work - :return: - """ - self.transaction.log.error('predict_values method will be removed from Predictor.predict response, just make sure you use the outout as an iterator') - self.columns = self.transaction.persistent_model_metadata.columns - first_column = self.columns[0] - - ret = [] - for row, v in enumerate(self.data[first_column]): - ret_arr = {col: self.data[col][row] for col in self.evaluations} - ret_arr['prediction_confidence'] = 0 # we no longer support - ret += [ret_arr] - return ret - - -""" - - def _getOrigColum(self, col): - for orig_col in self.columns_map: - if self.columns_map[orig_col] == col: - return orig_col - - def __iter__(self): - self.columns = self.transaction.persistent_model_metadata.columns - first_col = self.columns[0] - - def __iter__(self): - self.iter_col_n = 0 - return self - - def __next__(self): - if self.iter_col_n < len(self.data_array[0]): - predictions_map = {} - for i in range(len(self.predicted_columns)): - pred_col = self.predicted_columns[i] - confidence_col = self.confidence_columns[i] - predictions_map[self._getOrigColum(pred_col)] = self.data_array[self.iter_col_n][self.columns.index(pred_col)] - predictions_map[confidence_col] = self.data_array[self.iter_col_n][self.columns.index(confidence_col)] - - self.iter_col_n += 1 - return predictions_map - else: - raise StopIteration - - def __iter__(self): - self.iter_col_n = 0 - return self - - def __next__(self): - if self.iter_col_n < len(self.data_array[0]): - predictions_map = {} - for i in range(len(self.predicted_columns)): - pred_col = self.predicted_columns[i] - confidence_col = self.confidence_columns[i] - predictions_map[self._getOrigColum(pred_col)] = self.data_array[self.iter_col_n][self.columns.index(pred_col)] - predictions_map[confidence_col] = self.data_array[self.iter_col_n][self.columns.index(confidence_col)] - - self.iter_col_n += 1 - return predictions_map - else: - raise StopIteration - - @property - def predicted_values(self, as_list=False, add_header = False): - - ret = [] - - - for i in range(len(self.predicted_columns)): - pred_col = self.predicted_columns[i] - confidence_col = self.confidence_columns[i] - - if as_list: - ret_row += [row[pred_col_index]] - ret_row += [row[pred_col_confidence_index]] - else: - ret_row[self._getOrigColum(pred_col)] = row[pred_col_index] - ret_row[confidence_col] = row[pred_col_confidence_index] - - - if as_list and add_header: - header = [] - for i in range(len(self.predicted_columns)): - pred_col = self.predicted_columns[i] - confidence_col = self.confidence_columns[i] - header.append(pred_col) - header.append(confidence_col) - ret = [header] + ret -""" + def __len__(self): + return len(self.data[self.transaction.persistent_model_metadata.columns[0]]) diff --git a/mindsdb/libs/data_types/transaction_output_row.py b/mindsdb/libs/data_types/transaction_output_row.py --- a/mindsdb/libs/data_types/transaction_output_row.py +++ b/mindsdb/libs/data_types/transaction_output_row.py @@ -1,30 +1,28 @@ - class TransactionOutputRow: - - def __init__(self, transaction_output, row_key): + def __init__(self, transaction_output, row_index): self.transaction_output = transaction_output - self.row_key = row_key + self.row_index = row_index def __getitem__(self, item): - return self.transaction_output.data[item][self.row_key] + return self.transaction_output.data[item][self.row_index] + + def __contains__(self, item): + return item in self.transaction_output.data.keys() def as_dict(self): - return {key:self.transaction_output.data[key][self.row_key] for key in self.transaction_output.data} + return {key:self.transaction_output.data[key][self.row_index] for key in self.transaction_output.data} def explain(self): - for pred_col in self.transaction_output.evaluations: - - self.transaction_output.evaluations[pred_col][self.row_key].explain() + self.transaction_output.evaluations[pred_col][self.row_index].explain() def __str__(self): return str(self.as_dict()) def as_list(self): #Note that here we will not output the confidence columns - - return [self.transaction_output.evaluations[col][self.row_key] for col in self.transaction_output.columns] + return [self.transaction_output.evaluations[col][self.row_index] for col in self.transaction_output.transaction.persistent_model_metadata.columns] @property def _predicted_values(self): - return {pred_col:self.transaction_output.evaluations[pred_col][self.row_key].predicted_value for pred_col in self.transaction_output.evaluations} + return {pred_col:self.transaction_output.evaluations[pred_col][self.row_index].predicted_value for pred_col in self.transaction_output.evaluations}
diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py --- a/integration_testing/run_tests.py +++ b/integration_testing/run_tests.py @@ -266,9 +266,11 @@ def test_multilabel_prediction(): try: results = mdb.predict(when_data=test_file_name) - for row in results: + for i in range(len(results)): + row = results[i] expect_columns = [label_headers[0] ,label_headers[0] + '_confidence'] for col in expect_columns: + print(row[col]) if col not in row: logger.error(f'Prediction failed to return expected column: {col}') logger.debug('Got row: {}'.format(row))
Finalize interface before the v.1.0 release We really need to merge v.1.0 into master sooner rather than later, so we should finalize the interface for `predict` and `learn`, since we can fix any arising bugs and add features without breaking BC if we are happy with the interface. Also somewhat relevant to produce the final website docs and video docs. Personally, I'm happy with the current interface, but if anything comes to mind I'll mention it here.
Add `save_model([path])` <----- saves everything in a zip file Add `load_model([path])` <----- uses the zip file to load a model
2019-03-31T23:11:26Z
[]
[]
mindsdb/mindsdb
134
mindsdb__mindsdb-134
[ "119" ]
1d496341fc75e4ec9d9e9511f5dd8074159cdc76
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -95,6 +95,9 @@ def _create_ludwig_dataframe(self, mode): elif mode == 'validate': indexes = self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY] columns = self.transaction.persistent_model_metadata.columns + elif mode == 'test': + indexes = self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY] + columns = self.transaction.persistent_model_metadata.columns else: raise Exception(f'Unknown mode specified: "{mode}"') model_definition = {'input_features': [], 'output_features': []} @@ -266,5 +269,5 @@ def predict(self, mode='predict', ignore_columns=[]): for col_name in predictions: col_name_normalized = col_name.replace('_predictions', '') predictions = predictions.rename(columns = {col_name: col_name_normalized}) - + return predictions diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -8,8 +8,9 @@ from mindsdb.libs.data_types.model_data import ModelData from mindsdb.libs.data_types.mindsdb_logger import log from mindsdb.libs.backends.ludwig import LudwigBackend -from mindsdb.libs.ml_models.probabilistic_validator import ProbabilisticValidator +from mindsdb.libs.model_examination.probabilistic_validator import ProbabilisticValidator from mindsdb.config import CONFIG +from mindsdb.libs.helpers.general_helpers import unpickle_obj import time import _thread @@ -124,7 +125,7 @@ def _execute_learn(self): self.persistent_model_metadata.insert() self.persistent_model_metadata.update() - + return except Exception as e: self.persistent_model_metadata.current_phase = MODEL_STATUS_ERROR @@ -156,7 +157,8 @@ def _execute_predict(self): :return: """ - self._call_phase_module('StatsLoader') + self.persistent_model_metadata = self.persistent_model_metadata.find_one(self.persistent_model_metadata.getPkey()) + if self.persistent_model_metadata is None: self.log.error('No metadata found for this model') return @@ -187,7 +189,7 @@ def _execute_predict(self): self.output_data.data[col].append(cell) for predicted_col in self.persistent_model_metadata.predict_columns: - probabilistic_validator = ProbabilisticValidator.unpickle(self.persistent_model_metadata.probabilistic_validators[predicted_col]) + probabilistic_validator = unpickle_obj(self.persistent_model_metadata.probabilistic_validators[predicted_col]) predicted_values = predictions[predicted_col] self.output_data.data[predicted_col] = predicted_values diff --git a/mindsdb/libs/data_entities/persistent_model_metadata.py b/mindsdb/libs/data_entities/persistent_model_metadata.py --- a/mindsdb/libs/data_entities/persistent_model_metadata.py +++ b/mindsdb/libs/data_entities/persistent_model_metadata.py @@ -35,3 +35,5 @@ def setup(self): self.probabilistic_validators = None self.ludwig_data = None + + self.column_importances = None diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -1,15 +1,15 @@ - import platform import re import urllib import uuid from pathlib import Path - +import pickle import requests from mindsdb.__about__ import __version__ from mindsdb.config import CONFIG from mindsdb.libs.data_types.mindsdb_logger import log +from mindsdb.libs.constants.mindsdb import * def get_key_for_val(key, dict_map): @@ -124,3 +124,74 @@ def check_for_updates(): except: log.warning('could not check for MindsDB updates') + +def pickle_obj(object_to_pickle): + """ + Returns a version of self that can be serialized into mongodb or tinydb + :return: The data of an object serialized via pickle and decoded as a latin1 string + """ + + return pickle.dumps(object_to_pickle).decode(encoding='latin1') + + +def unpickle_obj(pickle_string): + """ + :param pickle_string: A latin1 encoded python str containing the pickle data + :return: Returns an object generated from the pickle string + """ + return pickle.loads(pickle_string.encode(encoding='latin1')) + + +def closest(arr, value): + """ + :return: The index of the member of `arr` which is closest to `value` + """ + + for i,ele in enumerate(arr): + value = float(value) + if ele > value: + return i - 1 + + return len(arr)-1 + + +def get_value_bucket(value, buckets, col_stats): + """ + :return: The bucket in the `histogram` in which our `value` falls + """ + if col_stats['data_subtype'] in (DATA_SUBTYPES.SINGLE, DATA_SUBTYPES.MULTIPLE): + if value in buckets: + bucket = buckets.index(value) + else: + bucket = -1 #Index for values no in the list + + elif col_stats['data_subtype'] in (DATA_SUBTYPES.BINARY, DATA_SUBTYPES.INT, DATA_SUBTYPES.FLOAT): + bucket = closest(buckets, value) + else: + bucket = None + + return bucket + + +def evaluate_accuracy(predictions, real_values, col_stats, output_columns): + score = 0 + for output_column in output_columns: + cummulative_scores = 0 + if 'percentage_buckets' in col_stats[output_column]: + bucket = col_stats[output_column]['percentage_buckets'] + else: + bucket = None + + for i in range(len(real_values[output_column])): + pred_val_bucket = get_value_bucket(predictions[output_column][i], bucket, col_stats[output_column]) + if pred_val_bucket is None: + if predictions[output_column][i] == real_values[output_column][i]: + cummulative_scores += 1 + elif pred_val_bucket == get_value_bucket(real_values[output_column][i], bucket, col_stats[output_column]): + cummulative_scores += 1 + + score += cummulative_scores/len(predictions[output_column]) + score = score/len(output_columns) + if score == 0: + score = 0.00000001 + return score diff --git a/mindsdb/libs/ml_models/__init__.py b/mindsdb/libs/model_examination/__init__.py similarity index 100% rename from mindsdb/libs/ml_models/__init__.py rename to mindsdb/libs/model_examination/__init__.py diff --git a/mindsdb/libs/model_examination/column_evaluator.py b/mindsdb/libs/model_examination/column_evaluator.py new file mode 100644 --- /dev/null +++ b/mindsdb/libs/model_examination/column_evaluator.py @@ -0,0 +1,53 @@ +from mindsdb.libs.helpers.general_helpers import evaluate_accuracy + + +class ColumnEvaluator(): + """ + # The Hypothesis Executor is responsible for testing out various scenarios + regarding the model, in order to determine things such as the importance of + input variables or the variability of output values + """ + + def __init__(self): + self.columnless_predictions = {} + self.normal_predictions = None + + + def get_column_importance(self, model, output_columns, input_columns, full_dataset, stats): + self.normal_predictions = model.predict('validate') + normal_accuracy = evaluate_accuracy(self.normal_predictions, full_dataset, stats, output_columns) + + column_importance_dict = {} + + for input_column in input_columns: + # See what happens with the accuracy of the outputs if only this column is present + ignore_columns = [col for col in input_columns if col != input_column ] + col_only_predictions = model.predict('validate', ignore_columns) + col_only_accuracy = evaluate_accuracy(self.normal_predictions, full_dataset, stats, output_columns) + col_only_normalized_accuracy = col_only_accuracy/normal_accuracy + + # See what happens with the accuracy if all columns but this one are present + ignore_columns = [input_column] + col_missing_predictions = model.predict('validate', ignore_columns) + + self.columnless_predictions[input_column] = col_missing_predictions + + col_missing_accuracy = evaluate_accuracy(self.normal_predictions, full_dataset, stats, output_columns) + col_missing_reverse_accuracy = (normal_accuracy - col_missing_accuracy)/normal_accuracy + + column_importance = (col_only_normalized_accuracy + col_missing_reverse_accuracy)/2 + column_importance_dict[input_column] = column_importance + return column_importance_dict + + def get_column_influence(self): + pass + + + + + + + + + +# diff --git a/mindsdb/libs/ml_models/probabilistic_validator.py b/mindsdb/libs/model_examination/probabilistic_validator.py similarity index 73% rename from mindsdb/libs/ml_models/probabilistic_validator.py rename to mindsdb/libs/model_examination/probabilistic_validator.py --- a/mindsdb/libs/ml_models/probabilistic_validator.py +++ b/mindsdb/libs/model_examination/probabilistic_validator.py @@ -1,17 +1,14 @@ -from sklearn.naive_bayes import GaussianNB, ComplementNB, MultinomialNB - - from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.data_types.probability_evaluation import ProbabilityEvaluation -import numpy as np -import pickle +from mindsdb.libs.helpers.general_helpers import get_value_bucket +from sklearn.naive_bayes import GaussianNB, ComplementNB, MultinomialNB +import numpy as np class ProbabilisticValidator(): """ - # The probabilistic validator is a quick to train model used for validating the predictions - of our main model + # The probabilistic validator is a quick to train model used for validating the predictions of our main model # It is fit to the results our model gets on the validation set """ _smoothing_factor = 0.5 # TODO: Autodetermine smotthing factor depending on the info we know about the dataset @@ -21,7 +18,7 @@ class ProbabilisticValidator(): Y_buff = None - def __init__(self, buckets, data_type=None): + def __init__(self, col_stats, data_type=None): """ Chose the algorithm to use for the rest of the model As of right now we go with ComplementNB @@ -34,54 +31,15 @@ def __init__(self, buckets, data_type=None): self.X_buff = [] self.Y_buff = [] - self.buckets = buckets - if self.buckets is not None: - self.bucket_keys = [i for i in range(len(self.buckets))] - - self.data_type = data_type - - def pickle(self): - """ - Returns a version of self that can be serialized into mongodb or tinydb - - :return: The data of a ProbabilisticValidator serialized via pickle and decoded as a latin1 string - """ - - return pickle.dumps(self).decode(encoding='latin1') - - @staticmethod - def unpickle(pickle_string): - """ - :param pickle_string: A latin1 encoded python str containing the pickle data - :return: Returns a ProbabilisticValidator object generated from the pickle string - """ - return pickle.loads(pickle_string.encode(encoding='latin1')) - - @staticmethod - def _closest(arr, value): - """ - :return: The index of the member of `arr` which is closest to `value` - """ - - for i,ele in enumerate(arr): - if ele > value: - return i - 1 + self.col_stats = col_stats - return len(arr)-1 - - # For contignous values we want to use a bucket in the histogram to get a discrete label - def _get_value_bucket(self, value): - """ - :return: The bucket in the `histogram` in which our `value` falls - """ - if type(value) == type(''): - if value in self.buckets: - i = self.buckets.index(value) - else: - i = 1 # todo make sure that there is an index for values not in list + if 'percentage_buckets' in col_stats: + self.buckets = col_stats['percentage_buckets'] + self.bucket_keys = [i for i in range(len(self.buckets))] else: - i = self._closest(self.buckets, value) - return i + self.buckets = None + + self.data_type = col_stats['data_type'] def register_observation(self, features_existence, real_value, predicted_value): @@ -97,8 +55,8 @@ def register_observation(self, features_existence, real_value, predicted_value): real_value = real_value if self.data_type != DATA_TYPES.NUMERIC else float(real_value) if self.buckets is not None: - predicted_value_b = self._get_value_bucket(predicted_value) - real_value_b = self._get_value_bucket(real_value) + predicted_value_b = get_value_bucket(predicted_value, self.buckets, self.col_stats) + real_value_b = get_value_bucket(real_value, self.buckets, self.col_stats) X = [False] * len(self.buckets) X[predicted_value_b] = True X = X + features_existence @@ -141,16 +99,14 @@ def fit(self): def evaluate_prediction_accuracy(self, features_existence, predicted_value): """ - # Fit the probabilistic validator on an observation - + # Fit the probabilistic validator on an observation def evaluate_prediction_accuracy(self, features_existence, predicted_value): :param features_existence: A vector of 0 and 1 representing the existence of all the features (0 == not exists, 1 == exists) :param predicted_value: The predicted value/label - :param histogram: The histogram for the predicted column, which allows us to bucketize the `predicted_value` :return: The probability (from 0 to 1) of our prediction being accurate (within the same histogram bucket as the real value) """ if self.buckets is not None: - predicted_value_b = self._get_value_bucket(predicted_value) + predicted_value_b = get_value_bucket(predicted_value, self.buckets, self.col_stats) X = [False] * len(self.buckets) X[predicted_value_b] = True X = [X + features_existence] @@ -171,11 +127,8 @@ def evaluate_prediction_accuracy(self, features_existence, predicted_value): if __name__ == "__main__": - import random - - values = [2,2,2,3,5,2,2,2,3,5] predictions = [2,2,2,3,2,2,2,2,3,2] diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -1,8 +1,9 @@ -from mindsdb.libs.helpers.general_helpers import convert_snake_to_cammelcase_string +from mindsdb.libs.helpers.general_helpers import pickle_obj, unpickle_obj from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.phases.base_module import BaseModule from mindsdb.libs.data_types.sampler import Sampler -from mindsdb.libs.ml_models.probabilistic_validator import ProbabilisticValidator +from mindsdb.libs.model_examination.probabilistic_validator import ProbabilisticValidator +from mindsdb.libs.model_examination.column_evaluator import ColumnEvaluator import pandas as pd import numpy as np @@ -16,61 +17,59 @@ def run(self): # Runs the model on the validation set in order to fit a probabilistic model that will evaluate the accuracy of future predictions """ - predict_column_names = self.transaction.train_metadata.model_predict_columns - non_predict_columns = [col for col in self.transaction.persistent_model_metadata.columns if col not in predict_column_names] + output_columns = self.transaction.persistent_model_metadata.predict_columns + input_columns = [col for col in self.transaction.persistent_model_metadata.columns if col not in output_columns] + validation_dataset = {} - probabilistic_validators = {} + for row_ind in self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY]: + for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns): + if col not in validation_dataset: + validation_dataset[col] = [] + validation_dataset[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) + + # Test some hypotheses about our columns + column_evaluator = ColumnEvaluator() + column_importances = column_evaluator.get_column_importance(model=self.transaction.model_backend, output_columns=output_columns, input_columns=input_columns, + full_dataset=validation_dataset, stats=self.transaction.persistent_model_metadata.column_stats) + + print(column_importances) + self.transaction.persistent_model_metadata.column_importances = column_importances - for col in predict_column_names: + # Create the probabilistic validators for each of the predict column + probabilistic_validators = {} + for col in output_columns: if 'percentage_buckets' in self.transaction.persistent_model_metadata.column_stats[col]: probabilistic_validators[col] = ProbabilisticValidator( - buckets=self.transaction.persistent_model_metadata.column_stats[col]['percentage_buckets'], data_type=self.transaction.persistent_model_metadata.column_stats[col]['data_type']) + col_stats=self.transaction.persistent_model_metadata.column_stats[col]) else: probabilistic_validators[col] = ProbabilisticValidator( - buckets=None, data_type=self.transaction.persistent_model_metadata.column_stats[col]['data_type']) - - # create a list of columns to ignore starting with none, and then one experiment per column - ignore_none = [[]] - ignore_just_one = [[col] for col in non_predict_columns] - ignore_all_but_one = [[coli for coli in non_predict_columns if coli!=col] for col in non_predict_columns] - ignore_column_options = ignore_none + ignore_just_one + ignore_all_but_one - - ### Create the real values for the created columns (maybe move to a new 'validate' method of the mode backend ?) - validation_data = {} - - indexes = self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY] - for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns): - validation_data[col] = [] - for row_ind in indexes: - validation_data[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) + col_stats=self.transaction.persistent_model_metadata.column_stats[col]) # Run on the validation set multiple times, each time with one of the column blanked out - for column_name in self.transaction.persistent_model_metadata.predict_columns: + for column_name in input_columns: ignore_columns = [] - if column_name not in self.transaction.persistent_model_metadata.predict_columns: - ignore_columns.append(column_name) + ignore_columns.append(column_name) - predictions = self.transaction.model_backend.predict('validate',ignore_columns) + predictions = self.transaction.model_backend.predict('validate', ignore_columns) # create a vector that has True for each feature that was passed to the model tester and False if it was blanked - features_existence = [True if np_col not in ignore_columns else False for np_col in non_predict_columns] + features_existence = [True if np_col not in ignore_columns else False for np_col in input_columns] # A separate probabilistic model is trained for each predicted column, we may want to change this in the future, @TODO - for pcol in predict_column_names: + for pcol in output_columns: for i in range(len(predictions[pcol])): - predicted_val = predictions[pcol][i] - real_val = predictions[pcol][i] + real_val = validation_dataset[pcol][i] probabilistic_validators[pcol].register_observation(features_existence=features_existence, real_value=real_val, predicted_value=predicted_val) - for pcol in predict_column_names: + for pcol in output_columns: probabilistic_validators[pcol].partial_fit() # Pickle for later use self.transaction.persistent_model_metadata.probabilistic_validators = {} for col in probabilistic_validators: - self.transaction.persistent_model_metadata.probabilistic_validators[col] = probabilistic_validators[col].pickle() + self.transaction.persistent_model_metadata.probabilistic_validators[col] = pickle_obj(probabilistic_validators[col]) self.transaction.persistent_model_metadata.update() @@ -78,9 +77,6 @@ def test(): from mindsdb.libs.controllers.predictor import Predictor from mindsdb import CONFIG - #CONFIG.DEBUG_BREAK_POINT = PHASE_MODEL_ANALYZER - - #mdb = Predictor(name='home_rentals') mdb = Predictor(name='home_rentals') mdb.learn( diff --git a/mindsdb/libs/phases/stats_loader/__init__.py b/mindsdb/libs/phases/stats_loader/__init__.py deleted file mode 100644 diff --git a/mindsdb/libs/phases/stats_loader/stats_loader.py b/mindsdb/libs/phases/stats_loader/stats_loader.py deleted file mode 100644 --- a/mindsdb/libs/phases/stats_loader/stats_loader.py +++ /dev/null @@ -1,32 +0,0 @@ -from mindsdb.libs.constants.mindsdb import * -from mindsdb.libs.phases.base_module import BaseModule - - -class StatsLoader(BaseModule): - - phase_name = PHASE_STATS_GENERATOR - - def run(self): - - self.transaction.persistent_model_metadata = self.transaction.persistent_model_metadata.find_one(self.transaction.persistent_model_metadata.getPkey()) - - # laod the most accurate model - - info = self.transaction.persistent_ml_model_info.find({'model_name':self.transaction.metadata.model_name}, order_by=[('r_squared',-1)]) - - if info is not None and len(info)>0: - self.transaction.persistent_ml_model_info = info[0] - else: - self.log.error('No model found for this statement, please check if model_name {model_name} was trained'.format(model_name=self.transaction.metadata.model_name)) - -def test(): - - from mindsdb.libs.test.test_controller import TestController - - module = TestController('CREATE MODEL FROM (SELECT * FROM Uploads.views.tweets2 LIMIT 100) AS tweets4 PREDICT likes', PHASE_DATA_EXTRACTOR) - - return - -# only run the test if this file is called from debugger -if __name__ == "__main__": - test()
diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py --- a/integration_testing/run_tests.py +++ b/integration_testing/run_tests.py @@ -204,6 +204,82 @@ def test_one_label_prediction(): logger.info('One-label prediction test ran succesfully !') +def test_one_label_prediction_wo_strings(): + logger.info('Starting one-label test') + separator = ',' + train_file_name = 'train_data.csv' + test_file_name = 'test_data.csv' + data_len = 8000 + + # Create the full dataset + logger.debug(f'Creating one-labe test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows') + + try: + features = generate_value_cols(['int','float','date','int'],data_len, separator) + labels = [generate_labels_2(features, separator)] + + feature_headers = list(map(lambda col: col[0], features)) + label_headers = list(map(lambda col: col[0], labels)) + + # Create the training dataset and save it to a file + columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features)) + columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels))) + columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers]) + + # Create the testing dataset and save it to a file + columns_test = list(map(lambda col: col[int(len(col)*3/4):], features)) + columns_to_file(columns_test, test_file_name, separator, headers=feature_headers) + logger.debug(f'Datasets generate and saved to files successfully') + except: + print(traceback.format_exc()) + logger.error(f'Failed to generate datasets !') + exit(1) + + # Train + mdb = None + try: + mdb = mindsdb.Predictor(name='test_one_label_prediction_wo_strings') + logger.debug(f'Succesfully create mindsdb Predictor') + except: + logger.error(f'Failed to create mindsdb Predictor') + exit(1) + + + try: + mdb.learn(from_data=train_file_name, to_predict=label_headers) + logger.info(f'--------------- Learning ran succesfully ---------------') + except: + print(traceback.format_exc()) + logger.error(f'Failed during the training !') + exit(1) + + # Predict + try: + mdb = mindsdb.Predictor(name='test_one_label_prediction_wo_strings') + logger.debug(f'Succesfully create mindsdb Predictor') + except: + print(traceback.format_exc()) + logger.error(f'Failed to create mindsdb Predictor') + exit(1) + + try: + results = mdb.predict(when_data=test_file_name) + for row in results: + expect_columns = [label_headers[0] ,label_headers[0] + '_confidence'] + for col in expect_columns: + if col not in row: + logger.error(f'Prediction failed to return expected column: {col}') + logger.debug('Got row: {}'.format(row)) + exit(1) + + logger.info(f'--------------- Predicting ran succesfully ---------------') + except: + print(traceback.format_exc()) + logger.error(f'Failed whilst predicting') + exit(1) + + logger.info('One-label prediction test ran succesfully !') + def test_multilabel_prediction(): logger.info('Starting multilabel prediction test') separator = ',' @@ -288,6 +364,7 @@ def test_multilabel_prediction(): setup_testing_logger() +test_one_label_prediction_wo_strings() +test_timeseries() test_multilabel_prediction() -#test_timeseries() -#test_one_label_prediction() +test_one_label_prediction()
Determine column importance (black box approach) Within the model analyzer, we should try to determine the importance of each individual column. In order to do this we will: a) Run the model on the validation dataset feeding only one column at a time, from this we will obtain a score we'll call `COS` (column only score). This score is the accuracy of the model going from 0 to 1 (1 being perfect accuracy). b) Run the model on the validation dataset feeding all but one column at a time, from this we will obtain a score we'll call `CMS` (column missing score). This score is the accuracy of the model going from 0 to 1 (1 being perfect accuracy). Each individual column will have both of these scores attached to it. From these, we'll compute the final `CIS` (column importance score) as: `CIS = (1 - CMS + COS)/2` Based on the `CIS` score for each column we can report various things to the user: * Most important column * Most likely column to contribute too poor predictions (high `CIS` + low quality detected in the stats generator) * Redundant columns or columns (very low `CIS` + bad correlation scores in the stats generator) * Columns to consider removing (low `CIS` + low quality detected in the stats generator)
2019-03-31T23:18:00Z
[]
[]
mindsdb/mindsdb
141
mindsdb__mindsdb-141
[ "25", "43" ]
bb8d5845157f8e9b83821e67f2b985981deb1f76
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -88,16 +88,16 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co def _create_ludwig_dataframe(self, mode): if mode == 'train': indexes = self.transaction.input_data.train_indexes[KEY_NO_GROUP_BY] - columns = self.transaction.persistent_model_metadata.columns + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns)] elif mode == 'predict': indexes = self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY] - columns = [col for col in self.transaction.persistent_model_metadata.columns if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] elif mode == 'validate': indexes = self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY] - columns = [col for col in self.transaction.persistent_model_metadata.columns if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] elif mode == 'test': indexes = self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY] - columns = [col for col in self.transaction.persistent_model_metadata.columns if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] else: raise Exception(f'Unknown mode specified: "{mode}"') model_definition = {'input_features': [], 'output_features': []} @@ -108,7 +108,9 @@ def _create_ludwig_dataframe(self, mode): else: timeseries_cols = list(map(lambda x: x[0], self.transaction.persistent_model_metadata.model_order_by)) - for col_ind, col in enumerate(columns): + for ele in columns: + col = ele[0] + col_ind = ele[1] data[col] = [] col_stats = self.transaction.persistent_model_metadata.column_stats[col] @@ -299,6 +301,7 @@ def train(self): def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) + model = LudwigModel.load(self.transaction.persistent_model_metadata.ludwig_data['ludwig_save_path']) if self.transaction.persistent_model_metadata.model_order_by is None: @@ -310,7 +313,11 @@ def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._translate_df_to_timeseries_format(predict_dataframe, model_definition, timeseries_cols) for ignore_col in ignore_columns: - predict_dataframe[ignore_col] = [None] * len(predict_dataframe[ignore_col]) + try: + predict_dataframe[ignore_col] = [None] * len(predict_dataframe[ignore_col]) + except: + for date_appendage in ['_year', '_month','_day']: + predict_dataframe[ignore_col + date_appendage] = [None] * len(predict_dataframe[ignore_col + date_appendage]) predictions = model.predict(data_df=predict_dataframe) for col_name in predictions: diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -148,7 +148,9 @@ def closest(arr, value): """ for i,ele in enumerate(arr): - value = float(value) + if value == None: + return -1 + value = float(value.replace(',','.')) if ele > value: return i - 1 @@ -163,12 +165,12 @@ def get_value_bucket(value, buckets, col_stats): if value in buckets: bucket = buckets.index(value) else: - bucket = -1 #Index for values no in the list + bucket = len(buckets) # for null values elif col_stats['data_subtype'] in (DATA_SUBTYPES.BINARY, DATA_SUBTYPES.INT, DATA_SUBTYPES.FLOAT): bucket = closest(buckets, value) else: - bucket = None + bucket = len(buckets) # for null values return bucket diff --git a/mindsdb/libs/helpers/text_helpers.py b/mindsdb/libs/helpers/text_helpers.py --- a/mindsdb/libs/helpers/text_helpers.py +++ b/mindsdb/libs/helpers/text_helpers.py @@ -41,7 +41,7 @@ def cast_string_to_python_type(string): if string is None: return None return int(string) - except ValueError: + except: try: return clean_float(string) except ValueError: diff --git a/mindsdb/libs/model_examination/probabilistic_validator.py b/mindsdb/libs/model_examination/probabilistic_validator.py --- a/mindsdb/libs/model_examination/probabilistic_validator.py +++ b/mindsdb/libs/model_examination/probabilistic_validator.py @@ -52,12 +52,15 @@ def register_observation(self, features_existence, real_value, predicted_value): :param histogram: The histogram for the predicted column, which allows us to bucketize the `predicted_value` and `real_value` """ predicted_value = predicted_value if self.data_type != DATA_TYPES.NUMERIC else float(predicted_value) - real_value = real_value if self.data_type != DATA_TYPES.NUMERIC else float(real_value) + try: + real_value = real_value if self.data_type != DATA_TYPES.NUMERIC else float(real_value.replace(',','.')) + except: + real_value = None if self.buckets is not None: predicted_value_b = get_value_bucket(predicted_value, self.buckets, self.col_stats) real_value_b = get_value_bucket(real_value, self.buckets, self.col_stats) - X = [False] * len(self.buckets) + X = [False] * (len(self.buckets) + 1) X[predicted_value_b] = True X = X + features_existence self.X_buff.append(X) @@ -107,7 +110,7 @@ def evaluate_prediction_accuracy(self, features_existence, predicted_value): if self.buckets is not None: predicted_value_b = get_value_bucket(predicted_value, self.buckets, self.col_stats) - X = [False] * len(self.buckets) + X = [False] * (len(self.buckets) + 1) X[predicted_value_b] = True X = [X + features_existence] else: diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -11,6 +11,7 @@ import random import traceback import pandas +import numpy as np class DataExtractor(BaseModule): @@ -102,7 +103,11 @@ def _get_prepared_input_df(self, train_metadata): return None df = self._apply_sort_conditions_to_df(df, train_metadata) + g = df.columns.to_series().groupby(df.dtypes).groups + if np.dtype('<M8[ns]') in g: + for colname in g[np.dtype('<M8[ns]')]: + df[colname] = df[colname].astype(str) return df @@ -225,7 +230,7 @@ def run(self): test_window = (validation_window[1],length) self.transaction.input_data.test_indexes[key] = self.transaction.input_data.all_indexes[key][test_window[0]:test_window[1]] self.transaction.input_data.validation_indexes[key] = self.transaction.input_data.all_indexes[key][validation_window[0]:validation_window[1]] - + # log some stats if self.transaction.metadata.type == TRANSACTION_LEARN: diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -76,7 +76,7 @@ def _get_date_type(self, string): return DATA_SUBTYPES.DATE else: return DATA_SUBTYPES.TIMESTAMP - except ValueError: + except: return False def _get_text_type(self, data): @@ -135,7 +135,7 @@ def _get_column_data_type(self, data): # calculate type_dist for element in data: # Maybe use list of functions in the future - + element = element current_subtype_guess = 'Unknown' current_type_guess = 'Unknown' @@ -748,10 +748,8 @@ def run(self): kurtosis = 0 xp = [] - is_float = True if max([1 if int(i) != i else 0 for i in col_data]) == 1 else False - col_stats = { 'data_type': data_type, 'data_subtype': curr_data_subtype,
diff --git a/integration_testing/run_a_file.py b/integration_testing/run_a_file.py --- a/integration_testing/run_a_file.py +++ b/integration_testing/run_a_file.py @@ -1,16 +1,20 @@ from mindsdb import Predictor -mdb = Predictor(name='titanic_model') +mdb = Predictor(name='marvel') -#mdb.learn(from_data="integration_testing/train.csv", to_predict=['Survived','Pclass']) +mdb.learn(from_data="marvel-wikia.xlsx", to_predict='FIRST_APPEARANCE') print('------------------------------------------------------------Done training------------------------------------------------------------') - -#mdb = Predictor(name='titanic_model') -predicted = mdb.predict(when_data="integration_testing/train.csv") +""" +predicted = mdb.predict(when={ + 'Date':'11/03/2020', + 'Time':'18.00.00', + 'NMHC_GT': 1360.0, + 'AH': 0.655 +}) print('------------------------------------------------------------Preidiction output------------------------------------------------------------') for val in predicted: - print(val) -exit() -print(predicted.predicted_values) + print(val['CO_GT']) + print(val['CO_GT_confidence']) +"""
Getting None as prediction value and prediction confidence **Describe the bug** I trained some data and I got around 73% accuracy, so I started predicting by passing different independent variables. Then I got predicting value and confidence both as None. Especially when I include date in the "when" **Expected behavior** It has to give some reasonable predicting value, and prediction confidence(probably some number for confidence) **Screenshots** Please find the screenshot ![pollutant_none](https://user-images.githubusercontent.com/27191684/49336859-d7d8b700-f62f-11e8-9ce6-35efb730ef4d.JPG) **Desktop (please complete the following information):** - OS: MacOs "Could not convert string to date" on generating train model **Describe the bug** I took a CSV file containg Date column. While generating a model on a date column getting the error could not convert string to date error **To Reproduce** Steps to reproduce the behavior: Attached CSV in pdf format [marvel-wikia.pdf](https://github.com/mindsdb/mindsdb/files/2698791/marvel-wikia.pdf) train.py : from mindsdb import * MindsDB().learn( from_file="D://mindsdb/basic/marvel-wikia/marvel-wikia.csv", # the path to the file where we can learn from predict='FIRST_APPEARANCE', # the column we want to learn to predict given all the data in the file model_name='marvel_model' # the name of this model ) **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** Should generate the model **Desktop (please complete the following information):** - OS: Windows 10
Thank you Surendra, can you share your training set and the train script. On Sun, Dec 2, 2018 at 8:13 AM Surendra Gandham <[email protected]> wrote: > *Describe the bug* > I trained some data and I got around 73% accuracy, so I started predicting > by passing different independent variables. Then I got predicting value and > confidence both as None. Especially when I include date in the "when" > > *Expected behavior* > It has to give some reasonable predicting value, and prediction > confidence(probably some number for confidence) > *Screenshots* > Please find the screenshot > [image: pollutant_none] > <https://user-images.githubusercontent.com/27191684/49336859-d7d8b700-f62f-11e8-9ce6-35efb730ef4d.JPG> > > *Desktop (please complete the following information):* > > - OS: MacOs > > — > You are receiving this because you are subscribed to this thread. > Reply to this email directly, view it on GitHub > <https://github.com/mindsdb/main/issues/25>, or mute the thread > <https://github.com/notifications/unsubscribe-auth/AFoBCsautaA9u0R2pJCig2z97ZBnlL6yks5u032kgaJpZM4Y9RtT> > . > @surendra1472 thank you for reporting this, you can contact me directly jorge @ mindsdb -dot- com very interested in seeing the dataset see how can I help with it Hello, please find the attachment [AirQualityUCI.zip](https://github.com/mindsdb/main/files/2660238/AirQualityUCI.zip) And you can find the when condition from the screen shot, and please use the below data and ignore previous one [AirQualityUCI 2.zip](https://github.com/mindsdb/main/files/2660239/AirQualityUCI.2.zip) @surendra1472 Did you train model from this data successfully? I try to train model from this dataset but there are some errors with parsing the Date and Time. With AirQualityUCI 2.zip, I can able to train. May I know which os you are using? Yes, I am using the AirQualityUCI 2 data. I got Ubuntu(16.04) but I don't think that the problem is because of the OS. @geetab19 , marvel-wikia.pdf does not have 'Date' column, can you check once. > @geetab19 , marvel-wikia.pdf does not have 'Date' column, can you check once. Yes it does. It's the -2 column; the pdf shows it from page 95. The pdf you provided seems to contain 2 CSV schemas with different columns. Could you provide the actual `.csv` file you are using for training ? Github doesn't supporting .csv so uploading xlsx [marvel-wikia.xlsx](https://github.com/mindsdb/mindsdb/files/2840530/marvel-wikia.xlsx) Converted to csv and data looks fine, at the moment I'm unsure why training is failing, since there are other confounding issues on the dev branch, but I'll try to come back to you in a few days and try to include a fix for whatever is happening here in the next release.
2019-04-02T21:29:28Z
[]
[]
mindsdb/mindsdb
144
mindsdb__mindsdb-144
[ "120" ]
20d553cd3312d32ac6666409d6fe3f0fdd8e7607
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -12,26 +12,50 @@ class LudwigBackend(): def __init__(self, transaction): self.transaction = transaction - def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_cols): - input_features = model_definition['input_features'] + def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_cols, mode='predict'): + timeseries_col_name = timeseries_cols[0] + + previous_predict_col_names = [] + predict_col_names = [] + for feature_def in model_definition['output_features']: + if mode == 'train': + predict_col_names.append(feature_def['name']) + else: + predict_col_names = [] + previous_predict_col_name = 'previous_' + feature_def['name'] + + previous_predict_col_already_in = False + + for definition in model_definition['input_features']: + if definition['name'] == previous_predict_col_name: + previous_predict_col_already_in = True + + if not previous_predict_col_already_in: + model_definition['input_features'].append({ + 'name': previous_predict_col_name + ,'type': 'sequence' + }) other_col_names = [] - timeseries_col_name = timeseries_cols[0] - for feature_def in input_features: - if feature_def['name'] not in self.transaction.persistent_model_metadata.model_group_by: + for feature_def in model_definition['input_features']: + if feature_def['name'] not in self.transaction.persistent_model_metadata.model_group_by and feature_def['name'] not in previous_predict_col_names: feature_def['type'] = 'sequence' if feature_def['name'] not in timeseries_cols: other_col_names.append(feature_def['name']) + + previous_predict_col_names.append(previous_predict_col_name) + new_cols = {} - for col in [*other_col_names,timeseries_col_name]: + for col in [*other_col_names,*previous_predict_col_names,timeseries_col_name,*predict_col_names,*self.transaction.persistent_model_metadata.model_group_by]: new_cols[col] = [] nr_ele = len(df[timeseries_col_name]) if self.transaction.persistent_model_metadata.window_size_seconds is not None: window_size_seconds = self.transaction.persistent_model_metadata.window_size_seconds - for i in range(nr_ele): + i = 0 + while i < nr_ele: current_window = 0 new_row = {} @@ -39,50 +63,94 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co for col in other_col_names: new_row[col] = [df[col][i]] + for col in previous_predict_col_names: + new_row[col] = [] + for col in predict_col_names: + new_row[col] = df[col][i] + for col in self.transaction.persistent_model_metadata.model_group_by: + new_row[col] = df[col][i] inverted_index_range = list(range(i)) inverted_index_range.reverse() + ii = 0 for ii in inverted_index_range: if window_size_seconds < current_window + (timeseries_row[-1] - df[timeseries_col_name][ii]): break current_window += (timeseries_row[-1] - df[timeseries_col_name][ii]) timeseries_row.append(df[timeseries_col_name][ii]) + for col in other_col_names: new_row[col].append(df[col][ii]) + for col in previous_predict_col_names: + try: + new_row[col].append(df[col.replace('previous_', '')][ii]) + except: + try: + new_row[col].append(df[col][ii]) + except: + self.transaction.log.warning('Missing previous predicted values for output column: {}, these should be included in your input under the name: {}'.format(col.replace('previous_', ''), col)) + + if mode == 'train': + i = max(i + 1, (i + round((i - ii)/2))) + else: + i = i + 1 new_row[timeseries_col_name] = timeseries_row for col in new_row: - new_row[col].reverse() + if col not in predict_col_names and col not in self.transaction.persistent_model_metadata.model_group_by: + new_row[col].reverse() new_cols[col].append(new_row[col]) else: window_size_samples = self.transaction.persistent_model_metadata.window_size_samples - for i in range(nr_ele): + i = 0 + while i < nr_ele: new_row = {} timeseries_row = [df[timeseries_col_name][i]] for col in other_col_names: new_row[col] = [df[col][i]] + for col in previous_predict_col_names: + new_row[col] = [] + for col in predict_col_names: + new_row[col] = df[col][i] + for col in self.transaction.persistent_model_metadata.model_group_by: + new_row[col] = df[col][i] inverted_index_range = list(range(i)) inverted_index_range.reverse() + ii = 0 for ii in inverted_index_range: if (i - ii) > window_size_samples: break timeseries_row.append(df[timeseries_col_name][ii]) + for col in other_col_names: new_row[col].append(df[col][ii]) + for col in previous_predict_col_names: + try: + new_row[col].append(df[col.replace('previous_', '')][ii]) + except: + try: + new_row[col].append(df[col][ii]) + except: + self.transaction.log.warning('Missing previous predicted values for output column: {}, these should be included in your input under the name: {}'.format(col.replace('previous_', ''), col)) + + if mode == 'train': + i = max(i + 1, (i + round((i - ii)/2))) + else: + i = i + 1 new_row[timeseries_col_name] = timeseries_row for col in new_row: - new_row[col].reverse() + if col not in predict_col_names and col not in self.transaction.persistent_model_metadata.model_group_by: + new_row[col].reverse() new_cols[col].append(new_row[col]) - - for col in new_cols: - df[col] = new_cols[col] + new_df = pd.DataFrame(data=new_cols) + df = new_df return df, model_definition def _create_ludwig_dataframe(self, mode): @@ -94,10 +162,10 @@ def _create_ludwig_dataframe(self, mode): columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] elif mode == 'validate': indexes = self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns)] #if col not in self.transaction.persistent_model_metadata.predict_columns] elif mode == 'test': indexes = self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns)] #if col not in self.transaction.persistent_model_metadata.predict_columns] else: raise Exception(f'Unknown mode specified: "{mode}"') model_definition = {'input_features': [], 'output_features': []} @@ -283,7 +351,7 @@ def train(self): timeseries_cols = list(map(lambda x: x[0], self.transaction.persistent_model_metadata.model_order_by)) if len(timeseries_cols) > 0: - training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols) + training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') model = LudwigModel(model_definition) @@ -301,6 +369,7 @@ def train(self): def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) + model_definition = self.transaction.persistent_model_metadata.ludwig_data['model_definition'] model = LudwigModel.load(self.transaction.persistent_model_metadata.ludwig_data['ludwig_save_path']) @@ -320,6 +389,7 @@ def predict(self, mode='predict', ignore_columns=[]): predict_dataframe[ignore_col + date_appendage] = [None] * len(predict_dataframe[ignore_col + date_appendage]) predictions = model.predict(data_df=predict_dataframe) + for col_name in predictions: col_name_normalized = col_name.replace('_predictions', '') predictions = predictions.rename(columns = {col_name: col_name_normalized}) diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -100,7 +100,7 @@ def check_for_updates(): if file_path.is_file(): token = open(mdb_file).read() else: - token = '{system}|{version}|{uid}'.format(system=platform.system(), version=__version, uid=uuid_str) + token = '{system}|{version}|{uid}'.format(system=platform.system(), version=__version__, uid=uuid_str) try: open(mdb_file,'w').write(token) except:
diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py --- a/integration_testing/run_tests.py +++ b/integration_testing/run_tests.py @@ -14,8 +14,8 @@ import mindsdb from mindsdb import CONST -types_that_fail = ['str','ascii'] -types_that_work = ['int','float','date','datetime','timestamp'] +types_that_fail = ['str'] +types_that_work = ['int','float','date','datetime','timestamp','ascii'] logger = None @@ -53,7 +53,8 @@ def test_timeseries(): logger.debug(f'Creating timeseries test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows') try: - features = generate_value_cols(['datetime','int','float','ascii','ascii'],data_len, separator, ts_hours * 3600) + # add ,'ascii' in the features list to re-implement the group by + features = generate_value_cols(['datetime','int','float', 'ascii'],data_len, separator, ts_hours * 3600) features[3] = list(map(lambda x: str(x[0]) if len(x) > 0 else 'Nrmm', features[3])) labels = [generate_labels_1(features, separator)] @@ -92,6 +93,7 @@ def test_timeseries(): # timeseries specific argsw ,order_by=feature_headers[0] ,window_size_seconds=ts_hours* 3600 * 1.5 + #,window_size=6 ,group_by = feature_headers[3] ) logger.info(f'--------------- Learning ran succesfully ---------------') @@ -114,6 +116,7 @@ def test_timeseries(): for row in results: expect_columns = [label_headers[0] ,label_headers[0] + '_confidence'] for col in expect_columns: + print(col, row[col]) if col not in row: logger.error(f'Prediction failed to return expected column: {col}') logger.debug('Got row: {}'.format(row)) @@ -367,7 +370,7 @@ def test_multilabel_prediction(): setup_testing_logger() -#test_one_label_prediction_wo_strings() -#test_timeseries() -#test_multilabel_prediction() +test_one_label_prediction_wo_strings() +test_timeseries() +test_multilabel_prediction() test_one_label_prediction() diff --git a/integration_testing/run_travis_tests.py b/integration_testing/run_travis_tests.py new file mode 100644 --- /dev/null +++ b/integration_testing/run_travis_tests.py @@ -0,0 +1,117 @@ +from data_generators import * +import traceback +import sys +import os +import itertools +import logging +from colorlog import ColoredFormatter +import time + +import mindsdb +from mindsdb import CONST + +types_that_work = ['int','float','date','datetime','timestamp','ascii'] + +logger = None + +def setup_testing_logger(): + global logger + formatter = ColoredFormatter( + "%(log_color)s%(message)s", + datefmt=None, + reset=True, + log_colors={ + 'DEBUG': 'black,bg_white', + 'INFO': 'blue,bg_white', + 'WARNING': 'orange,bg_white', + 'ERROR': 'red,bg_white', + 'CRITICAL': 'red,bg_white', + } + ) + + logger = logging.getLogger('mindsdb_integration_testing') + logger.handlers = [] + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.DEBUG) + +def run_tests(): + logger.info('Starting one-label test') + separator = ',' + train_file_name = 'train_data.csv' + test_file_name = 'test_data.csv' + data_len = 8000 + + # Create the full dataset + logger.debug(f'Creating one-labe test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows') + + try: + features = generate_value_cols(types_that_work,data_len, separator) + labels = [generate_labels_2(features, separator)] + + feature_headers = list(map(lambda col: col[0], features)) + label_headers = list(map(lambda col: col[0], labels)) + + # Create the training dataset and save it to a file + columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features)) + columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels))) + columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers]) + + # Create the testing dataset and save it to a file + columns_test = list(map(lambda col: col[int(len(col)*3/4):], features)) + columns_to_file(columns_test, test_file_name, separator, headers=feature_headers) + logger.debug(f'Datasets generate and saved to files successfully') + except: + print(traceback.format_exc()) + logger.error(f'Failed to generate datasets !') + exit(1) + + # Train + mdb = None + try: + mdb = mindsdb.Predictor(name='test_one_label_prediction') + logger.debug(f'Succesfully create mindsdb Predictor') + except: + logger.error(f'Failed to create mindsdb Predictor') + exit(1) + + + try: + mdb.learn(from_data=train_file_name, to_predict=label_headers) + logger.info(f'--------------- Learning ran succesfully ---------------') + except: + print(traceback.format_exc()) + logger.error(f'Failed during the training !') + exit(1) + + # Predict + try: + mdb = mindsdb.Predictor(name='test_one_label_prediction') + logger.debug(f'Succesfully create mindsdb Predictor') + except: + print(traceback.format_exc()) + logger.error(f'Failed to create mindsdb Predictor') + exit(1) + + try: + results = mdb.predict(when_data=test_file_name) + for row in results: + expect_columns = [label_headers[0] ,label_headers[0] + '_confidence'] + for col in expect_columns: + if col not in row: + logger.error(f'Prediction failed to return expected column: {col}') + logger.debug('Got row: {}'.format(row)) + exit(1) + + logger.info(f'--------------- Predicting ran succesfully ---------------') + except: + print(traceback.format_exc()) + logger.error(f'Failed whilst predicting') + exit(1) + + logger.info('Travis CLI Tests ran succesfully !') + + +setup_testing_logger() +run_tests()
Feedback predict column into timeseries We should be feeding back the previous values of the `predict` column as a sequential column when making timeseries predictions. Consider doing this by using the ludwig model (label1, label2, label3... etc as separate columns, rather than 1 large array), at least when using the ludwig backend. This could also cause issues in certain predict cases, depending on how we train our model. So we should train the model to handle both the case when predictions are available and the case when they aren't or provide an argument for it in the `Preidctor` or in the `learn` interface.
2019-04-04T21:04:46Z
[]
[]
mindsdb/mindsdb
156
mindsdb__mindsdb-156
[ "140" ]
8b1aafc0afbdee48b2629cf03e9a5e7d61d84fae
diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py --- a/mindsdb/__init__.py +++ b/mindsdb/__init__.py @@ -1,14 +1,11 @@ import sys -if sys.version_info < (3,3): - sys.exit('Sorry, For MindsDB Python < 3.3 is not supported') +if sys.version_info < (3,6): + sys.exit('Sorry, For MindsDB Python < 3.6 is not supported') from mindsdb.config import CONFIG import mindsdb.libs.constants.mindsdb as CONST from mindsdb.__about__ import __package_name__ as name, __version__ -from mindsdb.libs.data_types.data_source import DataSource -from mindsdb.libs.data_sources import * from mindsdb.libs.controllers.predictor import Predictor MindsDB = Predictor -MDS = DataSource # A Mindsdb Data Source diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -41,7 +41,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co other_col_names = [] for feature_def in model_definition['input_features']: - if feature_def['name'] not in self.transaction.persistent_model_metadata.model_group_by and feature_def['name'] not in previous_predict_col_names: + if feature_def['name'] not in self.transaction.lmd.model_group_by and feature_def['name'] not in previous_predict_col_names: feature_def['type'] = 'sequence' if feature_def['name'] not in timeseries_cols: other_col_names.append(feature_def['name']) @@ -50,13 +50,13 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co previous_predict_col_names.append(previous_predict_col_name) new_cols = {} - for col in [*other_col_names,*previous_predict_col_names,timeseries_col_name,*predict_col_names,*self.transaction.persistent_model_metadata.model_group_by]: + for col in [*other_col_names,*previous_predict_col_names,timeseries_col_name,*predict_col_names,*self.transaction.lmd.model_group_by]: new_cols[col] = [] nr_ele = len(df[timeseries_col_name]) - if self.transaction.persistent_model_metadata.window_size_seconds is not None: - window_size_seconds = self.transaction.persistent_model_metadata.window_size_seconds + if self.transaction.lmd.window_size_seconds is not None: + window_size_seconds = self.transaction.lmd.window_size_seconds i = 0 while i < nr_ele: current_window = 0 @@ -70,7 +70,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[col] = [] for col in predict_col_names: new_row[col] = df[col][i] - for col in self.transaction.persistent_model_metadata.model_group_by: + for col in self.transaction.lmd.model_group_by: new_row[col] = df[col][i] inverted_index_range = list(range(i)) @@ -101,11 +101,11 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[timeseries_col_name] = timeseries_row for col in new_row: - if col not in predict_col_names and col not in self.transaction.persistent_model_metadata.model_group_by: + if col not in predict_col_names and col not in self.transaction.lmd.model_group_by: new_row[col].reverse() new_cols[col].append(new_row[col]) else: - window_size_samples = self.transaction.persistent_model_metadata.window_size_samples + window_size_samples = self.transaction.lmd.window_size_samples i = 0 while i < nr_ele: new_row = {} @@ -118,7 +118,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[col] = [] for col in predict_col_names: new_row[col] = df[col][i] - for col in self.transaction.persistent_model_metadata.model_group_by: + for col in self.transaction.lmd.model_group_by: new_row[col] = df[col][i] inverted_index_range = list(range(i)) @@ -148,7 +148,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[timeseries_col_name] = timeseries_row for col in new_row: - if col not in predict_col_names and col not in self.transaction.persistent_model_metadata.model_group_by: + if col not in predict_col_names and col not in self.transaction.lmd.model_group_by: new_row[col].reverse() new_cols[col].append(new_row[col]) @@ -159,32 +159,32 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co def _create_ludwig_dataframe(self, mode): if mode == 'train': indexes = self.transaction.input_data.train_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns)] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns)] elif mode == 'predict': indexes = self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns) if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns) if col not in self.transaction.lmd.predict_columns] elif mode == 'validate': indexes = self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns)] #if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns)] #if col not in self.transaction.lmd.predict_columns] elif mode == 'test': indexes = self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns)] #if col not in self.transaction.persistent_model_metadata.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns)] #if col not in self.transaction.lmd.predict_columns] else: raise Exception(f'Unknown mode specified: "{mode}"') model_definition = {'input_features': [], 'output_features': []} data = {} - if self.transaction.persistent_model_metadata.model_order_by is None: + if self.transaction.lmd.model_order_by is None: timeseries_cols = [] else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.persistent_model_metadata.model_order_by)) + timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd.model_order_by)) for ele in columns: col = ele[0] col_ind = ele[1] data[col] = [] - col_stats = self.transaction.persistent_model_metadata.column_stats[col] + col_stats = self.transaction.lmd.column_stats[col] data_subtype = col_stats['data_subtype'] ludwig_dtype = None @@ -211,7 +211,7 @@ def _create_ludwig_dataframe(self, mode): ludwig_dtype = 'category' elif data_subtype in (DATA_SUBTYPES.DATE): - if col not in self.transaction.persistent_model_metadata.predict_columns: + if col not in self.transaction.lmd.predict_columns: ludwig_dtype = 'date' else: ludwig_dtype = 'category' @@ -252,7 +252,7 @@ def _create_ludwig_dataframe(self, mode): elif ludwig_dtype == 'sequence': arr_str = self.transaction.input_data.data_array[row_ind][col_ind] - arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.persistent_model_metadata.column_stats[col]['separator']))) + arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd.column_stats[col]['separator']))) data[col].append(arr) # Date isn't supported yet, so we hack around it @@ -306,7 +306,7 @@ def _create_ludwig_dataframe(self, mode): if custom_logic_continue: continue - if col not in self.transaction.persistent_model_metadata.predict_columns: + if col not in self.transaction.lmd.predict_columns: input_def = { 'name': col ,'type': ludwig_dtype @@ -348,10 +348,10 @@ def _create_ludwig_dataframe(self, mode): def train(self): training_dataframe, model_definition = self._create_ludwig_dataframe('train') - if self.transaction.persistent_model_metadata.model_order_by is None: + if self.transaction.lmd.model_order_by is None: timeseries_cols = [] else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.persistent_model_metadata.model_order_by)) + timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd.model_order_by)) if len(timeseries_cols) > 0: training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') @@ -360,25 +360,27 @@ def train(self): model = LudwigModel(model_definition) # Figure out how to pass `model_load_path` - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.persistent_model_metadata.model_name) + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd.model_name) - #model.model.weights_save_path.rstrip('/model_weights_progress') + '/model' - ludwig_model_savepath = Config.LOCALSTORE_PATH.rstrip('local_jsondb_store') + self.transaction.persistent_model_metadata.model_name + #model.model.weights_save_path.rstrip('/model_weights_progress') + '/model' + ludwig_model_savepath = Config.LOCALSTORE_PATH.rstrip('local_jsondb_store') + self.transaction.lmd.model_name - model.save(ludwig_model_savepath) - model.close() + model.save(ludwig_model_savepath) + model.close() - self.transaction.persistent_model_metadata.ludwig_data = {'ludwig_save_path': ludwig_model_savepath, 'model_definition': model_definition} + self.transaction.lmd.ludwig_data = {'ludwig_save_path': ludwig_model_savepath, 'model_definition': model_definition} def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) - model_definition = self.transaction.persistent_model_metadata.ludwig_data['model_definition'] + model_definition = self.transaction.lmd.ludwig_data['model_definition'] - if self.transaction.persistent_model_metadata.model_order_by is None: + model = LudwigModel.load(self.transaction.lmd.ludwig_data['ludwig_save_path']) + + if self.transaction.lmd.model_order_by is None: timeseries_cols = [] else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.persistent_model_metadata.model_order_by)) + timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd.model_order_by)) if len(timeseries_cols) > 0: predict_dataframe, model_definition = self._translate_df_to_timeseries_format(predict_dataframe, model_definition, timeseries_cols) @@ -391,7 +393,7 @@ def predict(self, mode='predict', ignore_columns=[]): predict_dataframe[ignore_col + date_appendage] = [None] * len(predict_dataframe[ignore_col + date_appendage]) with disable_ludwig_output(): - model = LudwigModel.load(self.transaction.persistent_model_metadata.ludwig_data['ludwig_save_path']) + model = LudwigModel.load(self.transaction.lmd.ludwig_data['ludwig_save_path']) predictions = model.predict(data_df=predict_dataframe) for col_name in predictions: diff --git a/mindsdb/libs/constants/mysql.py b/mindsdb/libs/constants/mysql.py deleted file mode 100644 --- a/mindsdb/libs/constants/mysql.py +++ /dev/null @@ -1,888 +0,0 @@ -""" -******************************************************* - * Copyright (C) 2017 MindsDB Inc. <[email protected]> - * - * This file is part of MindsDB Server. - * - * MindsDB Server can not be copied and/or distributed without the express - * permission of MindsDB Inc - ******************************************************* -""" - -# CAPABILITIES -# As defined in : https://dev.mysql.com/doc/dev/mysql-server/8.0.0/group__group__cs__capabilities__flags.html - -MAX_PACKET_SIZE = 16777215 - -CLIENT_LONG_PASSWORD=1 -CLIENT_FOUND_ROWS=2 -CLIENT_LONG_FLAG=4 -CLIENT_CONNECT_WITH_DB=8 -CLIENT_NO_SCHEMA=16 -CLIENT_COMPRESS=32 -CLIENT_ODBC=64 -CLIENT_LOCAL_FILES=128 -CLIENT_IGNORE_SPACE=256 -CLIENT_PROTOCOL_41=512 -CLIENT_INTERACTIVE=1024 -CLIENT_SSL=2048 -CLIENT_IGNORE_SIGPIPE=4096 -CLIENT_TRANSACTIONS=8192 -CLIENT_RESERVED=16384 -CLIENT_RESERVED2=32768 -CLIENT_MULTI_STATEMENTS=1<<16 -CLIENT_MULTI_RESULTS=1<<17 -CLIENT_PS_MULTI_RESULTS=1<<18 -CLIENT_PLUGIN_AUTH=1<<19 -CLIENT_CONNECT_ATTRS=1<<20 -CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA=1<<21 -CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS=1<<22 -CLIENT_SESSION_TRACK=1<<23 -CLIENT_DEPRECATE_EOF=1<<24 -CLIENT_SSL_VERIFY_SERVER_CERT=1<<30 -CLIENT_REMEMBER_OPTIONS=1<<31 - -# SERVER STATUS - -SERVER_STATUS_IN_TRANS = 1 # A transaction is currently active -SERVER_STATUS_AUTOCOMMIT = 2 # Autocommit mode is set -SERVER_MORE_RESULTS_EXISTS = 8 # more results exists (more packet follow) -SERVER_QUERY_NO_GOOD_INDEX_USED = 16 -SERVER_QUERY_NO_INDEX_USED = 32 -SERVER_STATUS_CURSOR_EXISTS = 64 # when using COM_STMT_FETCH, indicate that current cursor still has result (deprecated) -SERVER_STATUS_LAST_ROW_SENT = 128 # when using COM_STMT_FETCH, indicate that current cursor has finished to send results (deprecated) -SERVER_STATUS_DB_DROPPED = 1<<8 # database has been dropped -SERVER_STATUS_NO_BACKSLASH_ESCAPES = 1<<9 #current escape mode is "no backslash escape" -SERVER_STATUS_METADATA_CHANGED = 1<<10 # A DDL change did have an impact on an existing PREPARE (an automatic reprepare has been executed) -SERVER_QUERY_WAS_SLOW = 1<<11 -SERVER_PS_OUT_PARAMs = 1<<12 # this resultset contain stored procedure output parameter -SERVER_STATUS_IN_TRANS_READONLY = 1<<13 # current transaction is a read-only transaction -SERVER_SESSION_STATE_CHANGED = 1<<14 # session state change. see Session change type for more information - -# COMMANDS - -COM_CHANGE_USER = int('0x11',0) -COM_DEBUG = int('0x0D',0) -COM_INIT_DB = int('0x02',0) -COM_PING = int('0x0e',0) -COM_PROCESS_KILL = int('0xC',0) -COM_QUERY = int('0x03',0) -COM_QUIT = int('0x01',0) -COM_RESET_CONNECTION = int('0x1f',0) -COM_SET_OPTION = int('0x1b',0) -COM_SHUTDOWN = int('0x0a',0) -COM_SLEEP = int('0x00',0) -COM_STATISTICS = int('0x09',0) - -# FIELD TYPES -MYSQL_TYPE_DECIMAL=0 -MYSQL_TYPE_TINY=1 -MYSQL_TYPE_SHORT=2 -MYSQL_TYPE_LONG=3 -MYSQL_TYPE_FLOAT=4 -MYSQL_TYPE_DOUBLE=5 -MYSQL_TYPE_NULL=6 -MYSQL_TYPE_TIMESTAMP=7 -MYSQL_TYPE_LONGLONG=8 -MYSQL_TYPE_INT24=9 -MYSQL_TYPE_DATE=10 -MYSQL_TYPE_TIME=11 -MYSQL_TYPE_DATETIME=12 -MYSQL_TYPE_YEAR=13 -MYSQL_TYPE_NEWDATE=14 -MYSQL_TYPE_VARCHAR=15 -MYSQL_TYPE_BIT=16 -MYSQL_TYPE_TIMESTAMP2=17 -MYSQL_TYPE_DATETIME2=18 -MYSQL_TYPE_TIME2=19 -MYSQL_TYPE_NEWDECIMAL=246 -MYSQL_TYPE_ENUM=247 -MYSQL_TYPE_SET=248 -MYSQL_TYPE_TINY_BLOB=249 -MYSQL_TYPE_MEDIUM_BLOB=250 -MYSQL_TYPE_LONG_BLOB=251 -MYSQL_TYPE_BLOB=252 -MYSQL_TYPE_VAR_STRING=253 -MYSQL_TYPE_STRING=254 -MYSQL_TYPE_GEOMETRY=255 - - -# HANDSHAKE - -DEFAULT_COALLITION_ID = 83 -SERVER_STATUS_AUTOCOMMIT = 2 -DEFAULT_CAPABILITIES = sum([ - CLIENT_LONG_PASSWORD - ,CLIENT_LONG_FLAG - #,CLIENT_CONNECT_WITH_DB - ,CLIENT_PROTOCOL_41 - ,CLIENT_TRANSACTIONS - ,CLIENT_FOUND_ROWS - ,CLIENT_MULTI_STATEMENTS - ,CLIENT_MULTI_RESULTS - ,CLIENT_LOCAL_FILES - ,CLIENT_CONNECT_ATTRS - ,CLIENT_PLUGIN_AUTH # TODO: Review this as it may be needed for enterprise - ]) - - - -FILLER_FOR_WIRESHARK_DUMP = 21 - - -# Datum lenenc encoding - -NULL_VALUE = b'\xFB' -ONE_BYTE_ENC = b'\xFA' -TWO_BYTE_ENC = b'\xFC' -THREE_BYTE_ENC = b'\xFD' -EIGHT_BYTE_ENC = b'\xFE' - -#ERROR CODES - -ER_OLD_TEMPORALS_UPGRADED = 1880 -ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730 -ER_ONLY_INTEGERS_ALLOWED = 1578 -ER_ONLY_ON_RANGE_LIST_PARTITION = 1512 -ER_OPEN_AS_READONLY = 1036 -ER_OPERAND_COLUMNS = 1241 -ER_OPTION_PREVENTS_STATEMENT = 1290 -ER_ORDER_WITH_PROC = 1386 -ER_OUT_OF_RESOURCES = 1041 -ER_OUT_OF_SORTMEMORY = 1038 -ER_OUTOFMEMORY = 1037 -ER_PARSE_ERROR = 1064 -ER_PART_STATE_ERROR = 1522 -ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747 -ER_PARTITION_COLUMN_LIST_ERROR = 1653 -ER_PARTITION_CONST_DOMAIN_ERROR = 1563 -ER_PARTITION_ENTRY_ERROR = 1496 -ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731 -ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740 -ER_PARTITION_EXCHANGE_PART_TABLE = 1732 -ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733 -ER_PARTITION_FIELDS_TOO_LONG = 1660 -ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491 -ER_PARTITION_FUNCTION_FAILURE = 1521 -ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564 -ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734 -ER_PARTITION_MAXVALUE_ERROR = 1481 -ER_PARTITION_MERGE_ERROR = 1572 -ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505 -ER_PARTITION_NAME = 1633 -ER_PARTITION_NO_TEMPORARY = 1562 -ER_PARTITION_NOT_DEFINED_ERROR = 1498 -ER_PARTITION_REQUIRES_VALUES_ERROR = 1479 -ER_PARTITION_SUBPART_MIX_ERROR = 1483 -ER_PARTITION_SUBPARTITION_ERROR = 1482 -ER_PARTITION_WRONG_NO_PART_ERROR = 1484 -ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485 -ER_PARTITION_WRONG_VALUES_ERROR = 1480 -ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492 -ER_PASSWD_LENGTH = 1372 -ER_PASSWORD_ANONYMOUS_USER = 1131 -ER_PASSWORD_FORMAT = 1827 -ER_PASSWORD_NO_MATCH = 1133 -ER_PASSWORD_NOT_ALLOWED = 1132 -ER_PATH_LENGTH = 1680 -ER_PLUGIN_CANNOT_BE_UNINSTALLED = 1883 -ER_PLUGIN_IS_NOT_LOADED = 1524 -ER_PLUGIN_IS_PERMANENT = 1702 -ER_PLUGIN_NO_INSTALL = 1721 -ER_PLUGIN_NO_UNINSTALL = 1720 -ER_PRIMARY_CANT_HAVE_NULL = 1171 -ER_PROC_AUTO_GRANT_FAIL = 1404 -ER_PROC_AUTO_REVOKE_FAIL = 1405 -ER_PROCACCESS_DENIED_ERROR = 1370 -ER_PS_MANY_PARAM = 1390 -ER_PS_NO_RECURSION = 1444 -ER_QUERY_CACHE_DISABLED = 1651 -ER_QUERY_INTERRUPTED = 1317 -ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430 -ER_QUERY_ON_MASTER = 1219 -ER_RANGE_NOT_INCREASING_ERROR = 1493 -ER_RBR_NOT_AVAILABLE = 1574 -ER_READ_ONLY_MODE = 1836 -ER_READ_ONLY_TRANSACTION = 1207 -ER_READY = 1076 -ER_RECORD_FILE_FULL = 1114 -ER_REGEXP_ERROR = 1139 -ER_RELAY_LOG_FAIL = 1371 -ER_RELAY_LOG_INIT = 1380 -ER_REMOVED_SPACES = 1466 -ER_RENAMED_NAME = 1636 -ER_REORG_HASH_ONLY_ON_SAME_N = 1510 -ER_REORG_NO_PARAM_ERROR = 1511 -ER_REORG_OUTSIDE_RANGE = 1520 -ER_REORG_PARTITION_NOT_EXIST = 1516 -ER_REQUIRES_PRIMARY_KEY = 1173 -ER_RESERVED_SYNTAX = 1382 -ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645 -ER_REVOKE_GRANTS = 1269 -ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748 -ER_ROW_DOES_NOT_MATCH_PARTITION = 1737 -ER_ROW_IN_WRONG_PARTITION = 1863 -ER_ROW_IS_REFERENCED = 1217 -ER_ROW_IS_REFERENCED_2 = 1451 -ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658 -ER_RPL_INFO_DATA_TOO_LONG = 1742 -ER_SAME_NAME_PARTITION = 1517 -ER_SAME_NAME_PARTITION_FIELD = 1652 -ER_SELECT_REDUCED = 1249 -ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275 -ER_SERVER_SHUTDOWN = 1053 -ER_SET_CONSTANTS_ONLY = 1204 -ER_SET_PASSWORD_AUTH_PLUGIN = 1699 -ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769 -ER_SHUTDOWN_COMPLETE = 1079 -ER_SIGNAL_BAD_CONDITION_TYPE = 1646 -ER_SIGNAL_EXCEPTION = 1644 -ER_SIGNAL_NOT_FOUND = 1643 -ER_SIGNAL_WARN = 1642 -ER_SIZE_OVERFLOW_ERROR = 1532 -ER_SKIPPING_LOGGED_TRANSACTION = 1771 -ER_SLAVE_CANT_CREATE_CONVERSION = 1678 -ER_SLAVE_CONFIGURATION = 1794 -ER_SLAVE_CONVERSION_FAILED = 1677 -ER_SLAVE_CORRUPT_EVENT = 1610 -ER_SLAVE_CREATE_EVENT_FAILURE = 1596 -ER_SLAVE_FATAL_ERROR = 1593 -ER_SLAVE_HAS_MORE_GTIDS_THAN_MASTER = 1885 -ER_SLAVE_HEARTBEAT_FAILURE = 1623 -ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624 -ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704 -ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703 -ER_SLAVE_IGNORE_SERVER_IDS = 1650 -ER_SLAVE_IGNORED_SSL_PARAMS = 1274 -ER_SLAVE_IGNORED_TABLE = 1237 -ER_SLAVE_INCIDENT = 1590 -ER_SLAVE_MASTER_COM_FAILURE = 1597 -ER_SLAVE_MI_INIT_REPOSITORY = 1871 -ER_SLAVE_MUST_STOP = 1198 -ER_SLAVE_NOT_RUNNING = 1199 -ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594 -ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595 -ER_SLAVE_RLI_INIT_REPOSITORY = 1872 -ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806 -ER_SLAVE_THREAD = 1202 -ER_SLAVE_WAS_NOT_RUNNING = 1255 -ER_SLAVE_WAS_RUNNING = 1254 -ER_SP_ALREADY_EXISTS = 1304 -ER_SP_BAD_CURSOR_QUERY = 1322 -ER_SP_BAD_CURSOR_SELECT = 1323 -ER_SP_BAD_SQLSTATE = 1407 -ER_SP_BAD_VAR_SHADOW = 1453 -ER_SP_BADRETURN = 1313 -ER_SP_BADSELECT = 1312 -ER_SP_BADSTATEMENT = 1314 -ER_SP_CANT_ALTER = 1334 -ER_SP_CANT_SET_AUTOCOMMIT = 1445 -ER_SP_CASE_NOT_FOUND = 1339 -ER_SP_COND_MISMATCH = 1319 -ER_SP_CURSOR_AFTER_HANDLER = 1338 -ER_SP_CURSOR_ALREADY_OPEN = 1325 -ER_SP_CURSOR_MISMATCH = 1324 -ER_SP_CURSOR_NOT_OPEN = 1326 -ER_SP_DOES_NOT_EXIST = 1305 -ER_SP_DROP_FAILED = 1306 -ER_SP_DUP_COND = 1332 -ER_SP_DUP_CURS = 1333 -ER_SP_DUP_HANDLER = 1413 -ER_SP_DUP_PARAM = 1330 -ER_SP_DUP_VAR = 1331 -ER_SP_FETCH_NO_DATA = 1329 -ER_SP_GOTO_IN_HNDLR = 1358 -ER_SP_LABEL_MISMATCH = 1310 -ER_SP_LABEL_REDEFINE = 1309 -ER_SP_LILABEL_MISMATCH = 1308 -ER_SP_NO_AGGREGATE = 1460 -ER_SP_NO_DROP_SP = 1357 -ER_SP_NO_RECURSION = 1424 -ER_SP_NO_RECURSIVE_CREATE = 1303 -ER_SP_NO_RETSET = 1415 -ER_SP_NORETURN = 1320 -ER_SP_NORETURNEND = 1321 -ER_SP_NOT_VAR_ARG = 1414 -ER_SP_PROC_TABLE_CORRUPT = 1457 -ER_SP_RECURSION_LIMIT = 1456 -ER_SP_STORE_FAILED = 1307 -ER_SP_SUBSELECT_NYI = 1335 -ER_SP_UNDECLARED_VAR = 1327 -ER_SP_UNINIT_VAR = 1311 -ER_SP_VARCOND_AFTER_CURSHNDLR = 1337 -ER_SP_WRONG_NAME = 1458 -ER_SP_WRONG_NO_OF_ARGS = 1318 -ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328 -ER_SPATIAL_CANT_HAVE_NULL = 1252 -ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687 -ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227 -ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858 -ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763 -ER_SR_INVALID_CREATION_CTX = 1601 -ER_STACK_OVERRUN = 1119 -ER_STACK_OVERRUN_NEED_MORE = 1436 -ER_STARTUP = 1408 -ER_STMT_CACHE_FULL = 1705 -ER_STMT_HAS_NO_OPEN_CURSOR = 1421 -ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 -ER_STOP_SLAVE_IO_THREAD_TIMEOUT = 1876 -ER_STOP_SLAVE_SQL_THREAD_TIMEOUT = 1875 -ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560 -ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695 -ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686 -ER_SUBPARTITION_ERROR = 1500 -ER_SUBPARTITION_NAME = 1634 -ER_SUBQUERY_NO_1_ROW = 1242 -ER_SYNTAX_ERROR = 1149 -ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 -ER_TABLE_CANT_HANDLE_BLOB = 1163 -ER_TABLE_CANT_HANDLE_FT = 1214 -ER_TABLE_CANT_HANDLE_SPKEYS = 1464 -ER_TABLE_CORRUPT = 1877 -ER_TABLE_DEF_CHANGED = 1412 -ER_TABLE_EXISTS_ERROR = 1050 -ER_TABLE_HAS_NO_FT = 1764 -ER_TABLE_IN_FK_CHECK = 1725 -ER_TABLE_IN_SYSTEM_TABLESPACE = 1809 -ER_TABLE_MUST_HAVE_COLUMNS = 1113 -ER_TABLE_NAME = 1632 -ER_TABLE_NEEDS_REBUILD = 1707 -ER_TABLE_NEEDS_UPGRADE = 1459 -ER_TABLE_NOT_LOCKED = 1100 -ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099 -ER_TABLE_SCHEMA_MISMATCH = 1808 -ER_TABLEACCESS_DENIED_ERROR = 1142 -ER_TABLENAME_NOT_ALLOWED_HERE = 1250 -ER_TABLES_DIFFERENT_METADATA = 1736 -ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530 -ER_TABLESPACE_DISCARDED = 1814 -ER_TABLESPACE_EXISTS = 1813 -ER_TABLESPACE_MISSING = 1812 -ER_TEMP_FILE_WRITE_FAILURE = 1878 -ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559 -ER_TEMPORARY_NAME = 1635 -ER_TEXTFILE_NOT_READABLE = 1085 -ER_TOO_BIG_DISPLAYWIDTH = 1439 -ER_TOO_BIG_FIELDLENGTH = 1074 -ER_TOO_BIG_FOR_UNCOMPRESS = 1256 -ER_TOO_BIG_PRECISION = 1426 -ER_TOO_BIG_ROWSIZE = 1118 -ER_TOO_BIG_SCALE = 1425 -ER_TOO_BIG_SELECT = 1104 -ER_TOO_BIG_SET = 1097 -ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473 -ER_TOO_LONG_BODY = 1437 -ER_TOO_LONG_FIELD_COMMENT = 1629 -ER_TOO_LONG_IDENT = 1059 -ER_TOO_LONG_INDEX_COMMENT = 1688 -ER_TOO_LONG_KEY = 1071 -ER_TOO_LONG_STRING = 1162 -ER_TOO_LONG_TABLE_COMMENT = 1628 -ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793 -ER_TOO_MANY_CONCURRENT_TRXS = 1637 -ER_TOO_MANY_DELAYED_THREADS = 1151 -ER_TOO_MANY_FIELDS = 1117 -ER_TOO_MANY_KEY_PARTS = 1070 -ER_TOO_MANY_KEYS = 1069 -ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655 -ER_TOO_MANY_PARTITIONS_ERROR = 1499 -ER_TOO_MANY_ROWS = 1172 -ER_TOO_MANY_TABLES = 1116 -ER_TOO_MANY_USER_CONNECTIONS = 1203 -ER_TOO_MANY_VALUES_ERROR = 1657 -ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 -ER_TRANS_CACHE_FULL = 1197 -ER_TRG_ALREADY_EXISTS = 1359 -ER_TRG_CANT_CHANGE_ROW = 1362 -ER_TRG_CANT_OPEN_TABLE = 1606 -ER_TRG_CORRUPTED_FILE = 1602 -ER_TRG_DOES_NOT_EXIST = 1360 -ER_TRG_IN_WRONG_SCHEMA = 1435 -ER_TRG_INVALID_CREATION_CTX = 1604 -ER_TRG_NO_CREATION_CTX = 1603 -ER_TRG_NO_DEFINER = 1454 -ER_TRG_NO_SUCH_ROW_IN_TRG = 1363 -ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361 -ER_TRUNCATE_ILLEGAL_FK = 1701 -ER_TRUNCATED_WRONG_VALUE = 1292 -ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 -ER_UDF_EXISTS = 1125 -ER_UDF_NO_PATHS = 1124 -ER_UNDO_RECORD_TOO_BIG = 1713 -ER_UNEXPECTED_EOF = 1039 -ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212 -ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503 -ER_UNKNOWN_ALTER_ALGORITHM = 1800 -ER_UNKNOWN_ALTER_LOCK = 1801 -ER_UNKNOWN_CHARACTER_SET = 1115 -ER_UNKNOWN_COLLATION = 1273 -ER_UNKNOWN_COM_ERROR = 1047 -ER_UNKNOWN_ERROR = 1105 -ER_UNKNOWN_EXPLAIN_FORMAT = 1791 -ER_UNKNOWN_KEY_CACHE = 1284 -ER_UNKNOWN_LOCALE = 1649 -ER_UNKNOWN_PARTITION = 1735 -ER_UNKNOWN_PROCEDURE = 1106 -ER_UNKNOWN_STMT_HANDLER = 1243 -ER_UNKNOWN_STORAGE_ENGINE = 1286 -ER_UNKNOWN_SYSTEM_VARIABLE = 1193 -ER_UNKNOWN_TABLE = 1109 -ER_UNKNOWN_TARGET_BINLOG = 1373 -ER_UNKNOWN_TIME_ZONE = 1298 -ER_UNSUPORTED_LOG_ENGINE = 1579 -ER_UNSUPPORTED_ENGINE = 1726 -ER_UNSUPPORTED_EXTENSION = 1112 -ER_UNSUPPORTED_PS = 1295 -ER_UNTIL_COND_IGNORED = 1279 -ER_UPDATE_INF = 1134 -ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315 -ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 -ER_UPDATE_TABLE_USED = 1093 -ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 -ER_USER_LIMIT_REACHED = 1226 -ER_USERNAME = 1468 -ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697 -ER_VAR_CANT_BE_READ = 1233 -ER_VARIABLE_IS_NOT_STRUCT = 1272 -ER_VARIABLE_IS_READONLY = 1621 -ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765 -ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838 -ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766 -ER_VIEW_CHECK_FAILED = 1369 -ER_VIEW_CHECKSUM = 1392 -ER_VIEW_DELETE_MERGE_VIEW = 1395 -ER_VIEW_FRM_NO_USER = 1447 -ER_VIEW_INVALID = 1356 -ER_VIEW_INVALID_CREATION_CTX = 1600 -ER_VIEW_MULTIUPDATE = 1393 -ER_VIEW_NO_CREATION_CTX = 1599 -ER_VIEW_NO_EXPLAIN = 1345 -ER_VIEW_NO_INSERT_FIELD_LIST = 1394 -ER_VIEW_NONUPD_CHECK = 1368 -ER_VIEW_OTHER_USER = 1448 -ER_VIEW_PREVENT_UPDATE = 1443 -ER_VIEW_RECURSIVE = 1462 -ER_VIEW_SELECT_CLAUSE = 1350 -ER_VIEW_SELECT_DERIVED = 1349 -ER_VIEW_SELECT_TMPTABLE = 1352 -ER_VIEW_SELECT_VARIABLE = 1351 -ER_VIEW_WRONG_LIST = 1353 -ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301 -ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 -ER_WARN_DATA_OUT_OF_RANGE = 1264 -ER_WARN_DEPRECATED_SYNTAX = 1287 -ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681 -ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554 -ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622 -ER_WARN_FIELD_RESOLVED = 1276 -ER_WARN_HOSTNAME_WONT_WORK = 1285 -ER_WARN_I_S_SKIPPED_TABLE = 1684 -ER_WARN_INDEX_NOT_APPLICABLE = 1739 -ER_WARN_INVALID_TIMESTAMP = 1299 -ER_WARN_NULL_TO_NOTNULL = 1263 -ER_WARN_PURGE_LOG_IN_USE = 1867 -ER_WARN_PURGE_LOG_IS_ACTIVE = 1868 -ER_WARN_QC_RESIZE = 1282 -ER_WARN_TOO_FEW_RECORDS = 1261 -ER_WARN_TOO_MANY_RECORDS = 1262 -ER_WARN_USING_OTHER_HANDLER = 1266 -ER_WARN_VIEW_MERGE = 1354 -ER_WARN_VIEW_WITHOUT_KEY = 1355 -ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196 -ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751 -ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752 -ER_WRONG_ARGUMENTS = 1210 -ER_WRONG_AUTO_KEY = 1075 -ER_WRONG_COLUMN_NAME = 1166 -ER_WRONG_DB_NAME = 1102 -ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486 -ER_WRONG_FIELD_SPEC = 1063 -ER_WRONG_FIELD_TERMINATORS = 1083 -ER_WRONG_FIELD_WITH_GROUP = 1055 -ER_WRONG_FK_DEF = 1239 -ER_WRONG_GROUP_FIELD = 1056 -ER_WRONG_KEY_COLUMN = 1167 -ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428 -ER_WRONG_MAGIC = 1389 -ER_WRONG_MRG_TABLE = 1168 -ER_WRONG_NAME_FOR_CATALOG = 1281 -ER_WRONG_NAME_FOR_INDEX = 1280 -ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682 -ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 -ER_WRONG_OBJECT = 1347 -ER_WRONG_OUTER_JOIN = 1120 -ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582 -ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 -ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583 -ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108 -ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584 -ER_WRONG_PARTITION_NAME = 1567 -ER_WRONG_PERFSCHEMA_USAGE = 1683 -ER_WRONG_SIZE_NUMBER = 1531 -ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691 -ER_WRONG_STRING_LENGTH = 1470 -ER_WRONG_SUB_KEY = 1089 -ER_WRONG_SUM_SELECT = 1057 -ER_WRONG_TABLE_NAME = 1103 -ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654 -ER_WRONG_TYPE_FOR_VAR = 1232 -ER_WRONG_USAGE = 1221 -ER_WRONG_VALUE = 1525 -ER_WRONG_VALUE_COUNT = 1058 -ER_WRONG_VALUE_COUNT_ON_ROW = 1136 -ER_WRONG_VALUE_FOR_TYPE = 1411 -ER_WRONG_VALUE_FOR_VAR = 1231 -ER_WSAS_FAILED = 1383 -ER_XA_RBDEADLOCK = 1614 -ER_XA_RBROLLBACK = 1402 -ER_XA_RBTIMEOUT = 1613 -ER_XAER_DUPID = 1440 -ER_XAER_INVAL = 1398 -ER_XAER_NOTA = 1397 -ER_XAER_OUTSIDE = 1400 -ER_XAER_RMERR = 1401 -ER_XAER_RMFAIL = 1399 -ER_YES = 1003 -ER_ZLIB_Z_BUF_ERROR = 1258 -ER_ZLIB_Z_DATA_ERROR = 1259 -ER_ZLIB_Z_MEM_ERROR = 1257 - - -WARN_COND_ITEM_TRUNCATED = 1647 -WARN_DATA_TRUNCATED = 1265 -WARN_NO_MASTER_INF = 1617 -WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638 -WARN_ON_BLOCKHOLE_IN_RBR = 1870 -WARN_OPTION_BELOW_LIMIT = 1708 -WARN_OPTION_IGNORED = 1618 -WARN_PLUGIN_BUSY = 1620 -WARN_PLUGIN_DELETE_BUILTIN = 1619 - -# CHARACTER SET NUMBERS - -CHARSET_NUMBERS = { - "big5_chinese_ci": 1, - "latin2_czech_cs": 2, - "dec8_swedish_ci": 3, - "cp850_general_ci": 4, - "latin1_german1_ci": 5, - "hp8_english_ci": 6, - "koi8r_general_ci": 7, - "latin1_swedish_ci": 8, - "latin2_general_ci": 9, - "swe7_swedish_ci": 10, - "ascii_general_ci": 11, - "ujis_japanese_ci": 12, - "sjis_japanese_ci": 13, - "cp1251_bulgarian_ci": 14, - "latin1_danish_ci": 15, - "hebrew_general_ci": 16, - "tis620_thai_ci": 18, - "euckr_korean_ci": 19, - "latin7_estonian_cs": 20, - "latin2_hungarian_ci": 21, - "koi8u_general_ci": 22, - "cp1251_ukrainian_ci": 23, - "gb2312_chinese_ci": 24, - "greek_general_ci": 25, - "cp1250_general_ci": 26, - "latin2_croatian_ci": 27, - "gbk_chinese_ci": 28, - "cp1257_lithuanian_ci": 29, - "latin5_turkish_ci": 30, - "latin1_german2_ci": 31, - "armscii8_general_ci": 32, - "utf8_general_ci": 33, - "cp1250_czech_cs": 34, - "ucs2_general_ci": 35, - "cp866_general_ci": 36, - "keybcs2_general_ci": 37, - "macce_general_ci": 38, - "macroman_general_ci": 39, - "cp852_general_ci": 40, - "latin7_general_ci": 41, - "latin7_general_cs": 42, - "macce_bin": 43, - "cp1250_croatian_ci": 44, - "utf8mb4_general_ci": 45, - "utf8mb4_bin": 46, - "latin1_bin": 47, - "latin1_general_ci": 48, - "latin1_general_cs": 49, - "cp1251_bin": 50, - "cp1251_general_ci": 51, - "cp1251_general_cs": 52, - "macroman_bin": 53, - "utf16_general_ci": 54, - "utf16_bin": 55, - "utf16le_general_ci": 56, - "cp1256_general_ci": 57, - "cp1257_bin": 58, - "cp1257_general_ci": 59, - "utf32_general_ci": 60, - "utf32_bin": 61, - "utf16le_bin": 62, - "binary": 63, - "armscii8_bin": 64, - "ascii_bin": 65, - "cp1250_bin": 66, - "cp1256_bin": 67, - "cp866_bin": 68, - "dec8_bin": 69, - "greek_bin": 70, - "hebrew_bin": 71, - "hp8_bin": 72, - "keybcs2_bin": 73, - "koi8r_bin": 74, - "koi8u_bin": 75, - "latin2_bin": 77, - "latin5_bin": 78, - "latin7_bin": 79, - "cp850_bin": 80, - "cp852_bin": 81, - "swe7_bin": 82, - "utf8_bin": 83, - "big5_bin": 84, - "euckr_bin": 85, - "gb2312_bin": 86, - "gbk_bin": 87, - "sjis_bin": 88, - "tis620_bin": 89, - "ucs2_bin": 90, - "ujis_bin": 91, - "geostd8_general_ci": 92, - "geostd8_bin": 93, - "latin1_spanish_ci": 94, - "cp932_japanese_ci": 95, - "cp932_bin": 96, - "eucjpms_japanese_ci": 97, - "eucjpms_bin": 98, - "cp1250_polish_ci": 99, - "utf16_unicode_ci": 101, - "utf16_icelandic_ci": 102, - "utf16_latvian_ci": 103, - "utf16_romanian_ci": 104, - "utf16_slovenian_ci": 105, - "utf16_polish_ci": 106, - "utf16_estonian_ci": 107, - "utf16_spanish_ci": 108, - "utf16_swedish_ci": 109, - "utf16_turkish_ci": 110, - "utf16_czech_ci": 111, - "utf16_danish_ci": 112, - "utf16_lithuanian_ci": 113, - "utf16_slovak_ci": 114, - "utf16_spanish2_ci": 115, - "utf16_roman_ci": 116, - "utf16_persian_ci": 117, - "utf16_esperanto_ci": 118, - "utf16_hungarian_ci": 119, - "utf16_sinhala_ci": 120, - "utf16_german2_ci": 121, - "utf16_croatian_ci": 122, - "utf16_unicode_520_ci": 123, - "utf16_vietnamese_ci": 124, - "ucs2_unicode_ci": 128, - "ucs2_icelandic_ci": 129, - "ucs2_latvian_ci": 130, - "ucs2_romanian_ci": 131, - "ucs2_slovenian_ci": 132, - "ucs2_polish_ci": 133, - "ucs2_estonian_ci": 134, - "ucs2_spanish_ci": 135, - "ucs2_swedish_ci": 136, - "ucs2_turkish_ci": 137, - "ucs2_czech_ci": 138, - "ucs2_danish_ci": 139, - "ucs2_lithuanian_ci": 140, - "ucs2_slovak_ci": 141, - "ucs2_spanish2_ci": 142, - "ucs2_roman_ci": 143, - "ucs2_persian_ci": 144, - "ucs2_esperanto_ci": 145, - "ucs2_hungarian_ci": 146, - "ucs2_sinhala_ci": 147, - "ucs2_german2_ci": 148, - "ucs2_croatian_ci": 149, - "ucs2_unicode_520_ci": 150, - "ucs2_vietnamese_ci": 151, - "ucs2_general_mysql500_ci": 159, - "utf32_unicode_ci": 160, - "utf32_icelandic_ci": 161, - "utf32_latvian_ci": 162, - "utf32_romanian_ci": 163, - "utf32_slovenian_ci": 164, - "utf32_polish_ci": 165, - "utf32_estonian_ci": 166, - "utf32_spanish_ci": 167, - "utf32_swedish_ci": 168, - "utf32_turkish_ci": 169, - "utf32_czech_ci": 170, - "utf32_danish_ci": 171, - "utf32_lithuanian_ci": 172, - "utf32_slovak_ci": 173, - "utf32_spanish2_ci": 174, - "utf32_roman_ci": 175, - "utf32_persian_ci": 176, - "utf32_esperanto_ci": 177, - "utf32_hungarian_ci": 178, - "utf32_sinhala_ci": 179, - "utf32_german2_ci": 180, - "utf32_croatian_ci": 181, - "utf32_unicode_520_ci": 182, - "utf32_vietnamese_ci": 183, - "utf8_unicode_ci": 192, - "utf8_icelandic_ci": 193, - "utf8_latvian_ci": 194, - "utf8_romanian_ci": 195, - "utf8_slovenian_ci": 196, - "utf8_polish_ci": 197, - "utf8_estonian_ci": 198, - "utf8_spanish_ci": 199, - "utf8_swedish_ci": 200, - "utf8_turkish_ci": 201, - "utf8_czech_ci": 202, - "utf8_danish_ci": 203, - "utf8_lithuanian_ci": 204, - "utf8_slovak_ci": 205, - "utf8_spanish2_ci": 206, - "utf8_roman_ci": 207, - "utf8_persian_ci": 208, - "utf8_esperanto_ci": 209, - "utf8_hungarian_ci": 210, - "utf8_sinhala_ci": 211, - "utf8_german2_ci": 212, - "utf8_croatian_ci": 213, - "utf8_unicode_520_ci": 214, - "utf8_vietnamese_ci": 215, - "utf8_general_mysql500_ci": 223, - "utf8mb4_unicode_ci": 224, - "utf8mb4_icelandic_ci": 225, - "utf8mb4_latvian_ci": 226, - "utf8mb4_romanian_ci": 227, - "utf8mb4_slovenian_ci": 228, - "utf8mb4_polish_ci": 229, - "utf8mb4_estonian_ci": 230, - "utf8mb4_spanish_ci": 231, - "utf8mb4_swedish_ci": 232, - "utf8mb4_turkish_ci": 233, - "utf8mb4_czech_ci": 234, - "utf8mb4_danish_ci": 235, - "utf8mb4_lithuanian_ci": 236, - "utf8mb4_slovak_ci": 237, - "utf8mb4_spanish2_ci": 238, - "utf8mb4_roman_ci": 239, - "utf8mb4_persian_ci": 240, - "utf8mb4_esperanto_ci": 241, - "utf8mb4_hungarian_ci": 242, - "utf8mb4_sinhala_ci": 243, - "utf8mb4_german2_ci": 244, - "utf8mb4_croatian_ci": 245, - "utf8mb4_unicode_520_ci": 246, - "utf8mb4_vietnamese_ci": 247 -} - - -SQL_RESERVED_WORDS = [ - "ALL", - "ANALYSE", - "ANALYZE", - "AND", - "ANY", - "AS", - "ASC", - "AUTHORIZATION", - "BETWEEN", - "BINARY", - "BOTH", - "CASE", - "CAST", - "CHECK", - "COLLATE", - "COLUMN", - "CONSTRAINT", - "CREATE", - "CROSS", - "CURRENT_DATE", - "CURRENT_TIME", - "CURRENT_TIMESTAMP", - "CURRENT_USER", - "DEFAULT", - "DEFERRABLE", - "DESC", - "DISTINCT", - "DO", - "ELSE", - "END", - "EXCEPT", - "FALSE", - "FOR", - "FOREIGN", - "FREEZE", - "FROM", - "FULL", - "GRANT", - "GROUP", - "HAVING", - "ILIKE", - "IN", - "INITIALLY", - "INNER", - "INTERSECT", - "INTO", - "IS", - "ISNULL", - "JOIN", - "LEADING", - "LEFT", - "LIKE", - "LIMIT", - "LOCALTIME", - "LOCALTIMESTAMP", - "NATURAL", - "NEW", - "NOT", - "NOTNULL", - "NULL", - "OFF", - "OFFSET", - "OLD", - "ON", - "ONLY", - "OR", - "ORDER", - "OUTER", - "OVERLAPS", - "PLACING", - "PRIMARY", - "REFERENCES", - "RIGHT", - "SELECT", - "SESSION_USER", - "SIMILAR", - "SOME", - "TABLE", - "THEN", - "TO", - "TRAILING", - "TRUE", - "UNION", - "UNIQUE", - "USER", - "USING", - "VERBOSE", - "WHEN", - "WHERE" -] - -ALL = vars() - -def VAR_NAME(val, prefix=''): - global ALL - - for key in ALL.keys(): - value = ALL[key] - if value == val and key != 'val': - if prefix == '' or (prefix != '' and prefix == key[:len(prefix)]): - return key - return None diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -10,7 +10,8 @@ from mindsdb.libs.helpers.general_helpers import check_for_updates from mindsdb.config import CONFIG -from mindsdb.libs.data_entities.persistent_model_metadata import PersistentModelMetadata +from mindsdb.libs.data_types.light_model_metadata import LightModelMetadata +from mindsdb.libs.data_types.heavy_model_metadata import HeavyModelMetadata from mindsdb.libs.controllers.transaction import Transaction from mindsdb.libs.constants.mindsdb import * @@ -154,8 +155,10 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No else: self.log.warning('Note that after version 1.0, the default value for argument rename_strange_columns in MindsDB().learn, will be flipped from True to False, this means that if your data has columns with special characters, MindsDB will not try to rename them by default.') - transaction_metadata = PersistentModelMetadata() + transaction_metadata = LightModelMetadata() + heavy_transaction_metadata = HeavyModelMetadata() transaction_metadata.model_name = self.name + heavy_transaction_metadata.model_name = self.name transaction_metadata.model_backend = backend transaction_metadata.predict_columns = predict_columns transaction_metadata.model_columns_map = {} if rename_strange_columns else from_ds._col_map @@ -173,7 +176,7 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No transaction_metadata.stop_training_in_x_seconds = stop_training_in_x_seconds transaction_metadata.stop_training_in_accuracy = stop_training_in_accuracy - Transaction(session=self, transaction_metadata=transaction_metadata, logger=self.log, breakpoint=breakpoint) + Transaction(session=self, transaction_metadata=transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log, breakpoint=breakpoint) def predict(self, when={}, when_data = None, update_cached_model = False): @@ -191,8 +194,10 @@ def predict(self, when={}, when_data = None, update_cached_model = False): breakpoint = CONFIG.DEBUG_BREAK_POINT when_ds = None if when_data is None else getDS(when_data) - transaction_metadata = PersistentModelMetadata() + transaction_metadata = LightModelMetadata() + heavy_transaction_metadata = HeavyModelMetadata() transaction_metadata.model_name = self.name + heavy_transaction_metadata.model_name = self.name if update_cached_model: self.predict_worker = None @@ -204,6 +209,6 @@ def predict(self, when={}, when_data = None, update_cached_model = False): transaction_metadata.type = transaction_type transaction_metadata.when_data = when_ds - transaction = Transaction(session=self, transaction_metadata=transaction_metadata, breakpoint=breakpoint) + transaction = Transaction(session=self, transaction_metadata=transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, breakpoint=breakpoint) return transaction.output_data diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -1,25 +1,24 @@ +from mindsdb.libs.helpers.general_helpers import pickle_obj, unpickle_obj from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.helpers.general_helpers import * -from mindsdb.libs.data_entities.persistent_model_metadata import PersistentModelMetadata +from mindsdb.libs.data_types.light_model_metadata import LightModelMetadata from mindsdb.libs.data_types.transaction_data import TransactionData from mindsdb.libs.data_types.transaction_output_data import PredictTransactionOutputData, TrainTransactionOutputData -from mindsdb.libs.data_types.model_data import ModelData from mindsdb.libs.data_types.mindsdb_logger import log from mindsdb.libs.backends.ludwig import LudwigBackend from mindsdb.libs.model_examination.probabilistic_validator import ProbabilisticValidator from mindsdb.config import CONFIG -from mindsdb.libs.helpers.general_helpers import unpickle_obj import time import _thread import traceback import importlib import copy - +import pickle class Transaction: - def __init__(self, session, transaction_metadata, logger = log, breakpoint = PHASE_END): + def __init__(self, session, transaction_metadata, heavy_transaction_metadata, logger = log, breakpoint = PHASE_END): """ A transaction is the interface to start some MindsDB operation within a session @@ -27,14 +26,15 @@ def __init__(self, session, transaction_metadata, logger = log, breakpoint = PH :type session: utils.controllers.session_controller.SessionController :param transaction_type: :param transaction_metadata: - :type transaction_metadata: PersistentModelMetadata + :type transaction_metadata: LightModelMetadata :param breakpoint: """ self.breakpoint = breakpoint self.session = session - self.persistent_model_metadata = transaction_metadata #type: PersistentModelMetadata + self.lmd = transaction_metadata #type: LightModelMetadata + self.hmd = heavy_transaction_metadata # variables to de defined by setup self.error = None @@ -42,7 +42,6 @@ def __init__(self, session, transaction_metadata, logger = log, breakpoint = PH self.input_data = TransactionData() self.output_data = TrainTransactionOutputData() - self.model_data = ModelData() # variables that can be persisted @@ -87,32 +86,35 @@ def _execute_learn(self): return try: - self.persistent_model_metadata.delete() - # start populating data - self.persistent_model_metadata.current_phase = MODEL_STATUS_ANALYZING - self.persistent_model_metadata.columns = self.input_data.columns # this is populated by data extractor + self.lmd.current_phase = MODEL_STATUS_ANALYZING + self.lmd.columns = self.input_data.columns # this is populated by data extractor self._call_phase_module('StatsGenerator') - self.persistent_model_metadata.current_phase = MODEL_STATUS_TRAINING + self.lmd.current_phase = MODEL_STATUS_TRAINING - if self.persistent_model_metadata.model_backend == 'ludwig': + if self.lmd.model_backend == 'ludwig': self.model_backend = LudwigBackend(self) self.model_backend.train() self._call_phase_module('ModelAnalyzer') # @STARTFIX Null out some non jsonable columns, temporary - self.persistent_model_metadata.from_data = None - self.persistent_model_metadata.test_from_data = None + self.lmd.from_data = None + self.lmd.test_from_data = None # @ENDFIX - self.persistent_model_metadata.insert() - self.persistent_model_metadata.update() + + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_light_model_metadata.pickle', 'wb') as fp: + pickle.dump(self.lmd, fp) + + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_heavy_model_metadata.pickle', 'wb') as fp: + pickle.dump(self.hmd, fp) return + except Exception as e: - self.persistent_model_metadata.current_phase = MODEL_STATUS_ERROR - self.persistent_model_metadata.error_msg = traceback.print_exc() + self.lmd.current_phase = MODEL_STATUS_ERROR + self.lmd.error_msg = traceback.print_exc() self.log.error(str(e)) raise e @@ -124,10 +126,8 @@ def _execute_drop_model(self): :return: """ - self.persistent_model_metadata.delete() - self.persistent_model_stats.delete() - self.output_data.data_array = [['Model '+self.persistent_model_metadata.model_name+' deleted.']] + self.output_data.data_array = [['Model '+self.lmd.model_name+' deleted.']] self.output_data.columns = ['Status'] return @@ -139,16 +139,30 @@ def _execute_predict(self): :return: """ - old_pmd = {} - for k in self.persistent_model_metadata.__dict__.keys(): - old_pmd[k] = self.persistent_model_metadata.__dict__[k] + old_lmd = {} + for k in self.lmd.__dict__.keys(): + old_lmd[k] = self.lmd.__dict__[k] + + old_hmd = {} + for k in old_hmd: + if old_hmd[k] is not None: + self.hmd.__dict__[k] = old_hmd[k] + + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_light_model_metadata.pickle', 'rb') as fp: + self.lmd = pickle.load(fp) + + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_heavy_model_metadata.pickle', 'rb') as fp: + self.hmd = pickle.load(fp) + + for k in old_lmd: + if old_lmd[k] is not None: + self.lmd.__dict__[k] = old_lmd[k] - self.persistent_model_metadata = self.persistent_model_metadata.find_one(self.persistent_model_metadata.getPkey()) - for k in old_pmd: - if old_pmd[k] is not None: - self.persistent_model_metadata.__dict__[k] = old_pmd[k] + for k in old_hmd: + if old_hmd[k] is not None: + self.hmd.__dict__[k] = old_hmd[k] - if self.persistent_model_metadata is None: + if self.lmd is None: self.log.error('No metadata found for this model') return @@ -160,21 +174,21 @@ def _execute_predict(self): self.output_data = PredictTransactionOutputData(transaction=self) - if self.persistent_model_metadata.model_backend == 'ludwig': + if self.lmd.model_backend == 'ludwig': self.model_backend = LudwigBackend(self) predictions = self.model_backend.predict() - # self.transaction.persistent_model_metadata.predict_columns + # self.transaction.lmd.predict_columns self.output_data.data = {col: [] for i, col in enumerate(self.input_data.columns)} - input_columns = [col for col in self.input_data.columns if col not in self.persistent_model_metadata.predict_columns] + input_columns = [col for col in self.input_data.columns if col not in self.lmd.predict_columns] for row in self.input_data.data_array: for index, cell in enumerate(row): col = self.input_data.columns[index] self.output_data.data[col].append(cell) - for predicted_col in self.persistent_model_metadata.predict_columns: - probabilistic_validator = unpickle_obj(self.persistent_model_metadata.probabilistic_validators[predicted_col]) + for predicted_col in self.lmd.predict_columns: + probabilistic_validator = unpickle_obj(self.hmd.probabilistic_validators[predicted_col]) predicted_values = predictions[predicted_col] self.output_data.data[predicted_col] = predicted_values @@ -198,18 +212,18 @@ def run(self): :return: """ - if self.persistent_model_metadata.type == TRANSACTION_BAD_QUERY: + if self.lmd.type == TRANSACTION_BAD_QUERY: self.log.error(self.errorMsg) self.error = True return - if self.persistent_model_metadata.type == TRANSACTION_DROP_MODEL: + if self.lmd.type == TRANSACTION_DROP_MODEL: self._execute_drop_model() return - if self.persistent_model_metadata.type == TRANSACTION_LEARN: - self.output_data.data_array = [['Model ' + self.persistent_model_metadata.model_name + ' training.']] + if self.lmd.type == TRANSACTION_LEARN: + self.output_data.data_array = [['Model ' + self.lmd.model_name + ' training.']] self.output_data.columns = ['Status'] if CONFIG.EXEC_LEARN_IN_THREAD == False: @@ -218,7 +232,7 @@ def run(self): _thread.start_new_thread(self._execute_learn, ()) return - elif self.persistent_model_metadata.type == TRANSACTION_PREDICT: + elif self.lmd.type == TRANSACTION_PREDICT: self._execute_predict() - elif self.persistent_model_metadata.type == TRANSACTION_NORMAL_SELECT: + elif self.lmd.type == TRANSACTION_NORMAL_SELECT: self._execute_normal_select() diff --git a/mindsdb/libs/data_entities/__init__.py b/mindsdb/libs/data_entities/__init__.py deleted file mode 100644 diff --git a/mindsdb/libs/data_sources/__init__.py b/mindsdb/libs/data_sources/__init__.py deleted file mode 100644 --- a/mindsdb/libs/data_sources/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from mindsdb.libs.data_sources.array_to_cols_ds import ArrayToColsDS -from mindsdb.libs.data_sources.file_ds import FileDS -from mindsdb.libs.data_sources.window_ds import WindowDS diff --git a/mindsdb/libs/data_sources/array_to_cols_ds.py b/mindsdb/libs/data_sources/array_to_cols_ds.py deleted file mode 100644 --- a/mindsdb/libs/data_sources/array_to_cols_ds.py +++ /dev/null @@ -1,46 +0,0 @@ -import pandas -import json - -from mindsdb.libs.data_types.data_source import DataSource - - - -class ArrayToColsDS(DataSource): - - def _setup(self, df, cols_to_split = {}): - - header = list(df.columns.values) - data = df.values.tolist() - - new_data = [] - new_header = [] - - for row in data: - n_row = [] - for i, col in enumerate(header): - cell = row[i] - if col in cols_to_split: - ncols = cols_to_split[col] - if cell is None: - cells = [None]*ncols - else: - cells = json.loads(cell) - - n_row += cells - else: - n_row += [cell] - - new_data += [n_row] - - for col in header: - - if col in cols_to_split: - ncols = cols_to_split[col] - - for i in range(ncols): - new_header += ['{col}_{num}'.format(col=col.replace('_agg',''), num=i)] - else: - new_header += [col] - - self.setDF(pandas.DataFrame(new_data, columns=new_header)) - diff --git a/mindsdb/libs/data_sources/file_ds.py b/mindsdb/libs/data_sources/file_ds.py --- a/mindsdb/libs/data_sources/file_ds.py +++ b/mindsdb/libs/data_sources/file_ds.py @@ -57,7 +57,6 @@ def cleanRow(self, row): def _getDataIo(self, file): """ This gets a file either url or local file and defiens what the format is as well as dialect - :param file: file path or url :return: data_io, format, dialect """ @@ -180,7 +179,6 @@ def _setup(self,file, clean_header = True, clean_rows = True, custom_parser = No :param clean_header: if you want to clean header column names :param clean_rows: if you want to clean rows for strange null values :param custom_parser: if you want to parse the file with some custom parser - """ # get file data io, format and dialect diff --git a/mindsdb/libs/data_sources/window_ds.py b/mindsdb/libs/data_sources/window_ds.py deleted file mode 100644 --- a/mindsdb/libs/data_sources/window_ds.py +++ /dev/null @@ -1,38 +0,0 @@ -import pandas - - -from mindsdb.libs.data_types.data_source import DataSource - - - - -class WindowDS(DataSource): - - def _setup(self, df, col_max, col_min, window_size_samples=300, step_size=30, min_size = 100): - - header = list(df.columns.values) - data = df.values.tolist() - - max_index = header.index(col_max) - min_index = header.index(col_min) - - ret = [] - - for row in data: - max = row[max_index] - min = row[min_index] - - new_max = max - new_min = max - window_size_samples - - while new_max-min > min_size: - - row[max_index] = new_max - row[min_index] = new_min - - ret += [row.copy()] - - new_max = new_max - step_size - new_min = new_max - window_size_samples if new_max - window_size_samples > min else min - - self.setDF(pandas.DataFrame(ret, columns=header)) diff --git a/mindsdb/libs/data_types/batch.py b/mindsdb/libs/data_types/batch.py deleted file mode 100644 --- a/mindsdb/libs/data_types/batch.py +++ /dev/null @@ -1,186 +0,0 @@ -""" -******************************************************* - * Copyright (C) 2017 MindsDB Inc. <[email protected]> - * - * This file is part of MindsDB Server. - * - * MindsDB Server can not be copied and/or distributed without the express - * permission of MindsDB Inc - ******************************************************* -""" - -from mindsdb.libs.constants.mindsdb import * -import numpy as np -import torch -from mindsdb.libs.data_types.mindsdb_logger import log -import traceback - -class Batch: - def __init__(self, sampler, data_dict, mirror = False, group=None, column=None, start=None, end=None, blank_columns=[]): - """ - - :param sampler: The object generating batches - :type sampler: libs.data_types.sampler.Sampler - :param data_dict: the actual data - :param mirror: if you want input and target to be the same - """ - self.data_dict = data_dict - self.sampler = sampler - self.mirror = mirror - self.number_of_rows = None - self.blank_columns = blank_columns - - # these are pointers to trace it back to the original data - self.group_pointer = group - self.column_pointer = column - self.start_pointer = start - self.end_pointer = end - - # some usefule variables, they can be obtained from sambpler and metada but its a pain, so we keep them here - self.target_column_names = self.sampler.meta_data.predict_columns - self.input_column_names = [col for col in self.sampler.model_columns if col not in self.target_column_names] - - # This is the template for the response - ret = {'input':{}, 'target':{}} - - # this may change as we have more targets - # TODO: if the target is * how do we go about it here - # Ideas: iterate over the missing column - - # populate the ret dictionary, - # where there are inputs and targets and each is a dictionary - # with key name being the column and the value the column data - for col in self.sampler.model_columns: - # this is to populate always in same order - if col not in self.data_dict: - continue - - if self.mirror: - ret['target'][col] = self.data_dict[col] - ret['input'][col] = self.data_dict[col] - - elif col in self.sampler.meta_data.predict_columns: - ret['target'][col] = self.data_dict[col] - else: - ret['input'][col] = self.data_dict[col] - - if self.number_of_rows is None: - self.number_of_rows = self.data_dict[col][0].size - - - self.xy = ret - - return - - def getColumn(self, what, col, by_buckets = False): - if by_buckets and self.sampler.stats[col]['data_type']==DATA_TYPES.NUMERIC: - #col_name = EXTENSION_COLUMNS_TEMPLATE.format(column_name=col) - col_name = col - if col_name in self.data_dict: - return self.data_dict[col_name] - else: - raise Exception('No extension column {col}'.format(col=col_name)) - - else: - return self.xy[what][col] - if col in self.blank_columns: - return np.zeros_like(ret) - - def get(self, what, flatten = True, by_buckets = False): - ret = None - if flatten: - # make sure we serialize in the same order that input metadata columns - for col in self.sampler.model_columns: - if col not in self.xy[what]: - continue - - # do not include full text as its a variable length tensor, which we cannot wrap - if self.sampler.stats[col]['data_type'] == DATA_TYPES.SEQUENTIAL: - continue - - # make sure that this is always in the same order, use a list or make xw[what] an ordered dictionary - if ret is None: - ret = self.getColumn(what,col, by_buckets) - else: - ret = np.concatenate((ret, self.getColumn(what,col, by_buckets)), axis=1) - - if self.sampler.variable_wrapper is not None: - return self.sampler.variable_wrapper(ret) - else: - return ret - - if self.sampler.variable_wrapper is not None: - ret = {} - for col in self.xy[what]: - if self.sampler.stats[col]['data_type'] == DATA_TYPES.SEQUENTIAL: - continue - #try: - ret[col] = self.sampler.variable_wrapper(self.getColumn(what,col, by_buckets)) - #except: - # log.error(traceback.format_exc()) - # raise ValueError('Could not decode column {what}:{col}'.format(what=what, col=col)) - return ret - else: - return self.xy[what] - - def getFullTextInput(self): - """ - TODO: mOVE THE WRAPPER function to the model, so we can keep this batch framework agnostic - :return: - """ - what = 'input' - ret = {} - for col in self.sampler.model_columns: - if col not in self.xy[what] or self.sampler.stats[col]['data_type'] != DATA_TYPES.SEQUENTIAL: - continue - - ret[col] = [torch.tensor(row, dtype=torch.long).view(-1, 1) for row in self.getColumn(what, col)] - - - - return ret - - - - def getInput(self, flatten = True): - return self.get('input', flatten) - - - def getTarget(self, flatten = True, by_buckets = False): - return self.get('target', flatten, by_buckets) - - def deflatTarget(self, flat_vector): - ret = {} - start = 0 - for col in self.sampler.model_columns: - # this is to populate always in same order - - if col in self.sampler.meta_data.predict_columns: - end = self.data_dict[col].shape[1] # get when it ends - ret[col] = flat_vector[:,start:end] - start = end - - return ret - - def getTargetStats(self): - - stats = {} - - for col in self.sampler.meta_data.predict_columns: - stats[col] = self.sampler.stats[col] - - return stats - - def getInputStats(self): - - stats = {} - - for col in self.sampler.model_columns: - if col not in self.sampler.meta_data.predict_columns: - stats[col] = self.sampler.stats[col] - - return stats - - - def size(self): - return self.number_of_rows diff --git a/mindsdb/libs/data_types/file_saved_response.py b/mindsdb/libs/data_types/file_saved_response.py deleted file mode 100644 --- a/mindsdb/libs/data_types/file_saved_response.py +++ /dev/null @@ -1,5 +0,0 @@ -class FileSavedResponse: - - def __init__(self, file_id, path): - self.file_id = file_id - self.path = path \ No newline at end of file diff --git a/mindsdb/libs/data_types/heavy_model_metadata.py b/mindsdb/libs/data_types/heavy_model_metadata.py new file mode 100644 --- /dev/null +++ b/mindsdb/libs/data_types/heavy_model_metadata.py @@ -0,0 +1,9 @@ +class HeavyModelMetadata(): + + _entity_name = 'heavy_model_metadata' + _pkey = ['model_name'] + + def setup(self): + + self.probabilistic_validators = None + self.model_name = None diff --git a/mindsdb/libs/data_entities/persistent_model_metadata.py b/mindsdb/libs/data_types/light_model_metadata.py similarity index 86% rename from mindsdb/libs/data_entities/persistent_model_metadata.py rename to mindsdb/libs/data_types/light_model_metadata.py --- a/mindsdb/libs/data_entities/persistent_model_metadata.py +++ b/mindsdb/libs/data_types/light_model_metadata.py @@ -1,6 +1,4 @@ -from mindsdb.libs.data_types.persistent_object import PersistentObject - -class PersistentModelMetadata(PersistentObject): +class LightModelMetadata(): _entity_name = 'model_metadata' _pkey = ['model_name'] @@ -31,8 +29,6 @@ def setup(self): self.stop_training = False self.kill_training = False - self.probabilistic_validators = None - self.ludwig_data = None self.column_importances = None diff --git a/mindsdb/libs/data_types/model_data.py b/mindsdb/libs/data_types/model_data.py deleted file mode 100644 --- a/mindsdb/libs/data_types/model_data.py +++ /dev/null @@ -1,13 +0,0 @@ - -class ModelData(): - - def __init__(self): - - self.train_set = {} - self.test_set = {} - self.validation_set = {} - - self.predict_set = {} - - self.predict_set_map = {} - diff --git a/mindsdb/libs/data_types/object_dict.py b/mindsdb/libs/data_types/object_dict.py deleted file mode 100644 --- a/mindsdb/libs/data_types/object_dict.py +++ /dev/null @@ -1,24 +0,0 @@ -from mindsdb.libs.data_types.mindsdb_logger import log - -class ObjectDict(): - - def getAsDict(self): - if hasattr(self, '_ignore_keys'): - ignore_keys = self._ignore_keys - else: - ignore_keys = [] - ret = {key:self.__dict__[key] for key in self.__dict__ if key[0] != '_' and key not in ignore_keys} - return ret - - def setFromDict(self, dict): - """ - This tries to populate object from a dictionary - :param dict: dict - :return: None - """ - for key in dict: - if key in self.__dict__: - self.__setattr__(key, dict[key]) - else: - log.warn('no {key} in class'.format(key=key)) - diff --git a/mindsdb/libs/data_types/persistent_object.py b/mindsdb/libs/data_types/persistent_object.py deleted file mode 100644 --- a/mindsdb/libs/data_types/persistent_object.py +++ /dev/null @@ -1,13 +0,0 @@ -from mindsdb.libs.data_types.object_dict import ObjectDict -from mindsdb.libs.data_types.persistent_object_mongo import PersistentObjectMongo -from mindsdb.libs.data_types.persistent_object_tinydb import PersistentObjectTinydb - -from pymongo import MongoClient -from mindsdb.config import CONFIG -from bson.objectid import ObjectId - -if CONFIG.STORE_INFO_IN_MONGODB: - PersistentObject = PersistentObjectMongo -else: - PersistentObject = PersistentObjectTinydb - diff --git a/mindsdb/libs/data_types/persistent_object_mongo.py b/mindsdb/libs/data_types/persistent_object_mongo.py deleted file mode 100644 --- a/mindsdb/libs/data_types/persistent_object_mongo.py +++ /dev/null @@ -1,98 +0,0 @@ -from mindsdb.libs.data_types.object_dict import ObjectDict -from pymongo import MongoClient -from mindsdb.config import CONFIG -from bson.objectid import ObjectId -from mindsdb.libs.data_types.mindsdb_logger import log - -class PersistentObjectMongo(ObjectDict): - - _entity_name = 'generic' - _pkey = [] - - def __init__(self): - - self._mongo = MongoClient(CONFIG.MONGO_SERVER_HOST) - self._collection = self._mongo.mindsdb[self._entity_name] - self.setup() - - - def setup(self): - - pass - - - def insert(self): - - dict = self.getAsDict() - dict["_id"] = str(ObjectId()) - self._collection.insert(dict) - - - def getPkey(self): - - return {key:self.__dict__[key] for key in self._pkey} - - - def update(self): - - vals = {key:self.__getattribute__(key) for key in self.getAsDict() if self.__getattribute__(key) != None} - self._collection.update_one( - self.getPkey(), - {'$set': vals} - ) - - def push(self, vals): - - self._collection.update_one( - self.getPkey(), - {'$push': vals} - ) - - - def delete(self): - orig_pkey = self.getPkey() - pkey = {key:orig_pkey[key] for key in orig_pkey if orig_pkey[key] != None} - self._collection.delete_many(pkey) - - - def find_one(self, p_key_data): - resp = self._collection.find_one(p_key_data) - class_object = self.__class__() - if resp is None: - return None - - for var_name in resp: - if hasattr(class_object, var_name): - setattr(class_object,var_name, resp[var_name]) - - return class_object - - - def find(self, conditions, order_by= None, limit = None): - resp = self._collection.find(conditions) - if order_by is not None: - resp = resp.sort(order_by) - - if resp is None: - return [] - - if limit is not None: - if hasattr(resp, 'limit'): - resp = resp.limit(limit) - else: - log.info('This driver supports no limit on query') - - ret = [] - - if resp is None: - return [] - - for item in resp: - class_object = self.__class__() - for var_name in item: - if hasattr(class_object, var_name): - setattr(class_object,var_name, item[var_name]) - - ret.append(class_object) - - return ret diff --git a/mindsdb/libs/data_types/persistent_object_tinydb.py b/mindsdb/libs/data_types/persistent_object_tinydb.py deleted file mode 100644 --- a/mindsdb/libs/data_types/persistent_object_tinydb.py +++ /dev/null @@ -1,49 +0,0 @@ -from mindsdb.libs.data_types.persistent_object_mongo import PersistentObjectMongo -from tinymongo import TinyMongoClient -from mindsdb.libs.data_types.mindsdb_logger import log - -from mindsdb.config import CONFIG -import shutil - - -class PersistentObjectTinydb(PersistentObjectMongo): - - _entity_name = 'generic' - _pkey = [] - - def __init__(self): - self._mongo = TinyMongoClient(CONFIG.LOCALSTORE_PATH) - try: - self._collection = self._mongo.mindsdb[self._entity_name] - except: - log.error('No collection will be found, db corrupted, truncating it') - shutil.rmtree(CONFIG.LOCALSTORE_PATH) - raise ValueError('MindsDB local document store corruped. No other way to put this, trained model data will be lost') - - self.setup() - - - - - def push(self, vals): - """ - Tinymongo does not support push, so here we have it - :param vals: - :return: - """ - obj_dict = self._collection.find_one(self.getPkey()) - - new_vals = {} - for key in vals: - if key not in obj_dict: - obj_dict[key] = [] - if type(vals[key]) == type([]): - new_vals[key] = obj_dict[key] + vals[key] - else: - new_vals[key] = obj_dict[key] + [vals[key]] - - - self._collection.update_one( - self.getPkey(), - {'$set': vals} - ) diff --git a/mindsdb/libs/data_types/sampler.py b/mindsdb/libs/data_types/sampler.py deleted file mode 100644 --- a/mindsdb/libs/data_types/sampler.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -******************************************************* - * Copyright (C) 2017 MindsDB Inc. <[email protected]> - * - * This file is part of MindsDB Server. - * - * MindsDB Server can not be copied and/or distributed without the express - * permission of MindsDB Inc - ******************************************************* -""" - -import time - -from mindsdb.libs.data_types.mindsdb_logger import log - -from mindsdb.config import CONFIG -from mindsdb.libs.constants.mindsdb import * -from mindsdb.libs.data_types.batch import Batch -from mindsdb.libs.data_entities.persistent_model_metadata import PersistentModelMetadata - - -# this implements sampling without replacement and encodes sequential data using encoders -# as described here, its best to sample without replacement: -# https://stats.stackexchange.com/questions/235844/should-training-samples-randomly-drawn-for-mini-batch-training-neural-nets-be-dr - -class Sampler: - - def __init__(self, data, metadata_as_stored, batch_size = CONFIG.SAMPLER_MAX_BATCH_SIZE, ignore_types = [], blank_columns=[]): - """ - - :param data: - :param metadata_as_stored: - :type metadata_as_stored: PersistentModelMetadata - :param batch_size: - :param ignore_types: - """ - self.data = data - - self.meta_data = metadata_as_stored - self.stats = metadata_as_stored.column_stats - self.model_columns = [col for col in metadata_as_stored.columns if self.stats[col]['data_type'] not in ignore_types] - self.ignore_columns_with_type = ignore_types - - self.batch_size = batch_size - self.variable_wrapper = None - self.variable_unwrapper = None - self.blank_columns = blank_columns - - def getSampleBatch(self): - """ - Get one single sample batch - :return: - """ - for batch in self: - return batch - - def __iter__(self): - """ - - :return: - """ - - # here we will also determine based on the query if we should do a moving window for the training - # TODO: if order by encode - ret = {} - - total_groups = len(self.data) - for group in self.data: - - group_pointer = 0 - first_column = next(iter(self.data[group])) - total_length = len(self.data[group][first_column]) - log.debug('Iterator on group {group}/{total_groups}, total rows: {total_rows}'.format(group=group, total_groups=total_groups, total_rows=total_length)) - - while group_pointer < total_length: - limit = group_pointer + self.batch_size - limit = limit if limit < total_length else total_length - - allcols_time = time.time() - - for column in self.model_columns: - - # log.debug('Generating: pytorch variables, batch: {column}-[{group_pointer}:{limit}]-{column_type}'.format(column=column, group_pointer=group_pointer, limit=limit, column_type=self.stats[column]['data_type'])) - # col_start_time = time.time() - #if self.stats[column]['data_type'] != DATA_TYPES.FULL_TEXT: - ret[column] = self.data[group][column][group_pointer:limit] - - ext_col_name = EXTENSION_COLUMNS_TEMPLATE.format(column_name=column) - if ext_col_name in self.data[group]: - ret[ext_col_name] = self.data[group][ext_col_name][group_pointer:limit] - # else: - # # Todo: figure out how to deal with full text features here - # ret[column] =[0]*(limit-group_pointer) - - # log.debug('Generated: {column} [OK] in {time_delta:.2f} seconds'.format(column=column, time_delta=(time.time()-col_start_time))) - - log.debug('Generated: [ALL_COLUMNS] in batch [OK], {time_delta:.2f} seconds'.format(time_delta=(time.time() - allcols_time))) - - yield Batch(self, ret, group=group, column=column, start=group_pointer, end=limit, blank_columns=self.blank_columns) - - ret = {} - group_pointer = limit diff --git a/mindsdb/libs/data_types/tester_response.py b/mindsdb/libs/data_types/tester_response.py deleted file mode 100644 --- a/mindsdb/libs/data_types/tester_response.py +++ /dev/null @@ -1,9 +0,0 @@ - -class TesterResponse(): - - def __init__(self, error=0, accuracy =0 , predicted_targets={}, real_targets={}): - self.error = error - self.accuracy = accuracy - self.predicted_targets = predicted_targets - self.real_targets = real_targets - diff --git a/mindsdb/libs/data_types/trainer_response.py b/mindsdb/libs/data_types/trainer_response.py deleted file mode 100644 --- a/mindsdb/libs/data_types/trainer_response.py +++ /dev/null @@ -1,9 +0,0 @@ - -class TrainerResponse(): - - def __init__(self, model, epoch=0, batch=0, loss=0): - self.model = model - self.epoch = epoch - self.batch = batch - self.loss = loss - diff --git a/mindsdb/libs/data_types/transaction_output_data.py b/mindsdb/libs/data_types/transaction_output_data.py --- a/mindsdb/libs/data_types/transaction_output_data.py +++ b/mindsdb/libs/data_types/transaction_output_data.py @@ -16,7 +16,7 @@ def __init__(self, transaction = None, data = {}, evaluations = {}, ): self.transaction = transaction def __iter__(self): - for i, cell in enumerate(self.data[self.transaction.persistent_model_metadata.columns[0]]): + for i, cell in enumerate(self.data[self.transaction.lmd.columns[0]]): yield TransactionOutputRow(self, i).as_dict() def __getitem__(self, item): @@ -26,4 +26,4 @@ def __str__(self): return str(self.data) def __len__(self): - return len(self.data[self.transaction.persistent_model_metadata.columns[0]]) + return len(self.data[self.transaction.lmd.columns[0]]) diff --git a/mindsdb/libs/data_types/transaction_output_row.py b/mindsdb/libs/data_types/transaction_output_row.py --- a/mindsdb/libs/data_types/transaction_output_row.py +++ b/mindsdb/libs/data_types/transaction_output_row.py @@ -21,7 +21,7 @@ def __str__(self): def as_list(self): #Note that here we will not output the confidence columns - return [self.transaction_output.evaluations[col][self.row_index] for col in self.transaction_output.transaction.persistent_model_metadata.columns] + return [self.transaction_output.evaluations[col][self.row_index] for col in self.transaction_output.transaction.lmd.columns] @property def _predicted_values(self): diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -2,12 +2,9 @@ from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.phases.base_module import BaseModule from mindsdb.libs.data_types.mindsdb_logger import log -from mindsdb.libs.data_entities.persistent_model_metadata import PersistentModelMetadata from mindsdb.libs.helpers.text_helpers import hashtext from mindsdb.external_libs.stats import calculate_sample_size - - import random import traceback import pandas @@ -25,8 +22,8 @@ def _get_data_frame_from_when_conditions(self, train_metadata): :return: """ - columns = self.transaction.persistent_model_metadata.columns - when_conditions = self.transaction.persistent_model_metadata.model_when_conditions + columns = self.transaction.lmd.columns + when_conditions = self.transaction.lmd.model_when_conditions when_conditions_list = [] # here we want to make a list of the type ( ValueForField1, ValueForField2,..., ValueForFieldN ), ... @@ -62,7 +59,7 @@ def _apply_sort_conditions_to_df(self, df, train_metadata): asc_values = [True for i in train_metadata.model_group_by] + asc_values df = df.sort_values(sort_by, ascending=asc_values) - elif self.transaction.persistent_model_metadata.type == TRANSACTION_LEARN: + elif self.transaction.lmd.type == TRANSACTION_LEARN: # if its not a time series, randomize the input data and we are learning df = df.sample(frac=1) @@ -73,24 +70,24 @@ def _get_prepared_input_df(self, train_metadata): """ :param train_metadata: - :type train_metadata: PersistentModelMetadata + :type train_metadata: LightModelMetadata :return: """ df = None # if transaction metadata comes with some data as from_data create the data frame - if self.transaction.persistent_model_metadata.from_data is not None: + if self.transaction.lmd.from_data is not None: # make sure we build a dataframe that has all the columns we need - df = self.transaction.persistent_model_metadata.from_data + df = self.transaction.lmd.from_data df = df.where((pandas.notnull(df)), None) # if this is a predict statement, create use model_when_conditions to shape the dataframe - if self.transaction.persistent_model_metadata.type == TRANSACTION_PREDICT: - if self.transaction.persistent_model_metadata.when_data is not None: - df = self.transaction.persistent_model_metadata.when_data + if self.transaction.lmd.type == TRANSACTION_PREDICT: + if self.transaction.lmd.when_data is not None: + df = self.transaction.lmd.when_data df = df.where((pandas.notnull(df)), None) - elif self.transaction.persistent_model_metadata.model_when_conditions is not None: + elif self.transaction.lmd.model_when_conditions is not None: # if no data frame yet, make one df = self._get_data_frame_from_when_conditions(train_metadata) @@ -127,9 +124,11 @@ def _validate_input_data_integrity(self): # make sure that the column we are trying to predict is on the input_data # else fail, because we cannot predict data we dont have - if self.transaction.persistent_model_metadata.model_is_time_series or self.transaction.persistent_model_metadata.type == TRANSACTION_LEARN: + #if self.transaction.lmd.model_is_time_series or self.transaction.lmd.type == TRANSACTION_LEARN: + # ^ How did this even make sense before ? Why did it not crash tests ? Pressumably because the predict col was loaded into `input_data` as an empty col - for col_target in self.transaction.persistent_model_metadata.predict_columns: + if self.transaction.lmd.type == TRANSACTION_LEARN: + for col_target in self.transaction.lmd.predict_columns: if col_target not in self.transaction.input_data.columns: err = 'Trying to predict column {column} but column not in source data'.format(column=col_target) self.log.error(err) @@ -140,7 +139,7 @@ def _validate_input_data_integrity(self): def run(self): - result = self._get_prepared_input_df(self.transaction.persistent_model_metadata) + result = self._get_prepared_input_df(self.transaction.lmd) columns = list(result.columns.values) data_array = list(result.values.tolist()) @@ -150,8 +149,8 @@ def run(self): self._validate_input_data_integrity() - is_time_series = self.transaction.persistent_model_metadata.model_is_time_series - group_by = self.transaction.persistent_model_metadata.model_group_by + is_time_series = self.transaction.lmd.model_is_time_series + group_by = self.transaction.lmd.model_group_by # create a list of the column numbers (indexes) that make the group by, this is so that we can greate group by hashes for each row if len(group_by)>0: @@ -180,10 +179,10 @@ def run(self): continue length = len(self.transaction.input_data.all_indexes[key]) - if self.transaction.persistent_model_metadata.type == TRANSACTION_LEARN: + if self.transaction.lmd.type == TRANSACTION_LEARN: sample_size = int(calculate_sample_size(population_size=length, - margin_error=self.transaction.persistent_model_metadata.sample_margin_of_error, - confidence_level=self.transaction.persistent_model_metadata.sample_confidence_level)) + margin_error=self.transaction.lmd.sample_margin_of_error, + confidence_level=self.transaction.lmd.sample_confidence_level)) # this evals True if it should send the entire group data into test, train or validation as opposed to breaking the group into the subsets should_split_by_group = type(group_by) == list and len(group_by) > 0 @@ -208,7 +207,7 @@ def run(self): self.transaction.input_data.validation_indexes[key] = self.transaction.input_data.all_indexes[key][validation_window[0]:validation_window[1]] # log some stats - if self.transaction.persistent_model_metadata.type == TRANSACTION_LEARN: + if self.transaction.lmd.type == TRANSACTION_LEARN: total_rows_used_by_subset = {'train': 0, 'test': 0, 'validation': 0} average_number_of_rows_used_per_groupby = {'train': 0, 'test': 0, 'validation': 0} @@ -226,12 +225,12 @@ def run(self): total_number_of_groupby_groups = len(self.transaction.input_data.all_indexes) if total_rows_used != total_rows_in_input: - self.log.info('You requested to sample with a *margin of error* of {sample_margin_of_error} and a *confidence level* of {sample_confidence_level}. Therefore:'.format(sample_confidence_level=self.transaction.persistent_model_metadata.sample_confidence_level, sample_margin_of_error= self.transaction.persistent_model_metadata.sample_margin_of_error)) + self.log.info('You requested to sample with a *margin of error* of {sample_margin_of_error} and a *confidence level* of {sample_confidence_level}. Therefore:'.format(sample_confidence_level=self.transaction.lmd.sample_confidence_level, sample_margin_of_error= self.transaction.lmd.sample_margin_of_error)) self.log.info('Using a [Cochran\'s sample size calculator](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/find-sample-size/) we got the following sample sizes:') data = { 'total': [total_rows_in_input, 'Total number of rows in input'], 'subsets': [[total_rows_used, 'Total number of rows used']], - 'label': 'Sample size for margin of error of ({sample_margin_of_error}) and a confidence level of ({sample_confidence_level})'.format(sample_confidence_level=self.transaction.persistent_model_metadata.sample_confidence_level, sample_margin_of_error= self.transaction.persistent_model_metadata.sample_margin_of_error) + 'label': 'Sample size for margin of error of ({sample_margin_of_error}) and a confidence level of ({sample_confidence_level})'.format(sample_confidence_level=self.transaction.lmd.sample_confidence_level, sample_margin_of_error= self.transaction.lmd.sample_margin_of_error) } self.log.infoChart(data, type='pie') diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -1,7 +1,6 @@ -from mindsdb.libs.helpers.general_helpers import pickle_obj, unpickle_obj +from mindsdb.libs.helpers.general_helpers import pickle_obj from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.phases.base_module import BaseModule -from mindsdb.libs.data_types.sampler import Sampler from mindsdb.libs.model_examination.probabilistic_validator import ProbabilisticValidator from mindsdb.libs.model_examination.column_evaluator import ColumnEvaluator @@ -17,12 +16,12 @@ def run(self): # Runs the model on the validation set in order to fit a probabilistic model that will evaluate the accuracy of future predictions """ - output_columns = self.transaction.persistent_model_metadata.predict_columns - input_columns = [col for col in self.transaction.persistent_model_metadata.columns if col not in output_columns] + output_columns = self.transaction.lmd.predict_columns + input_columns = [col for col in self.transaction.lmd.columns if col not in output_columns] validation_dataset = {} for row_ind in self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY]: - for col_ind, col in enumerate(self.transaction.persistent_model_metadata.columns): + for col_ind, col in enumerate(self.transaction.lmd.columns): if col not in validation_dataset: validation_dataset[col] = [] validation_dataset[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) @@ -30,19 +29,19 @@ def run(self): # Test some hypotheses about our columns column_evaluator = ColumnEvaluator() column_importances = column_evaluator.get_column_importance(model=self.transaction.model_backend, output_columns=output_columns, input_columns=input_columns, - full_dataset=validation_dataset, stats=self.transaction.persistent_model_metadata.column_stats) + full_dataset=validation_dataset, stats=self.transaction.lmd.column_stats) - self.transaction.persistent_model_metadata.column_importances = column_importances + self.transaction.lmd.column_importances = column_importances # Create the probabilistic validators for each of the predict column probabilistic_validators = {} for col in output_columns: - if 'percentage_buckets' in self.transaction.persistent_model_metadata.column_stats[col]: + if 'percentage_buckets' in self.transaction.lmd.column_stats[col]: probabilistic_validators[col] = ProbabilisticValidator( - col_stats=self.transaction.persistent_model_metadata.column_stats[col]) + col_stats=self.transaction.lmd.column_stats[col]) else: probabilistic_validators[col] = ProbabilisticValidator( - col_stats=self.transaction.persistent_model_metadata.column_stats[col]) + col_stats=self.transaction.lmd.column_stats[col]) # Run on the validation set multiple times, each time with one of the column blanked out for column_name in input_columns: @@ -66,11 +65,9 @@ def run(self): probabilistic_validators[pcol].partial_fit() # Pickle for later use - self.transaction.persistent_model_metadata.probabilistic_validators = {} + self.transaction.hmd.probabilistic_validators = {} for col in probabilistic_validators: - self.transaction.persistent_model_metadata.probabilistic_validators[col] = pickle_obj(probabilistic_validators[col]) - - self.transaction.persistent_model_metadata.update() + self.transaction.hmd.probabilistic_validators[col] = pickle_obj(probabilistic_validators[col]) def test(): from mindsdb.libs.controllers.predictor import Predictor diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -849,13 +849,11 @@ def run(self): validation_rows = len(self.transaction.input_data.validation_indexes) train_rows = len(self.transaction.input_data.train_indexes) - self.transaction.persistent_model_metadata.column_stats = stats - self.transaction.persistent_model_metadata.total_row_count = total_rows - self.transaction.persistent_model_metadata.test_row_count = test_rows - self.transaction.persistent_model_metadata.train_row_count = train_rows - self.transaction.persistent_model_metadata.validation_row_count = validation_rows - - self.transaction.persistent_model_metadata.update() + self.transaction.lmd.column_stats = stats + self.transaction.lmd.total_row_count = total_rows + self.transaction.lmd.test_row_count = test_rows + self.transaction.lmd.train_row_count = train_rows + self.transaction.lmd.validation_row_count = validation_rows self._log_interesting_stats(stats) return stats
diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py --- a/integration_testing/run_tests.py +++ b/integration_testing/run_tests.py @@ -342,7 +342,7 @@ def test_multilabel_prediction(): except: print(traceback.format_exc()) logger.error(f'Failed to create mindsdb Predictor') - exit(1) + exit(1)0 try: results = mdb.predict(when_data=test_file_name)
Change presistent data A few changes should be made to the shared transaction state: * Light persistent metadata (current to the similar persistent_transaction_metadata). Easy to save and transfer to a server (in order to generate pretty looking logs and aggregate statistics about various models). Ideally, this shouldn't go above 10-100MB or so. * Heavy persistent metadata. This would include stuff like the Probabilistic Validator, or the weights of the ML model itself (unless the backend saves it separately). * Non persistent metadata. This would include what's currently in the transaction metadata, but with less duplication (currently there's a lot of duplication between the persistent and non persistend metadata) Changes will be made as to how this is also saved: * The 2 persistent states will be pickled throughout the training and predicting and saved to files in mindsdb's storage directory. This should also deal with the concurrency issues outlined in #13. * We'll also have some helper functions to move this pickled metadata into various databases (mongo, mariadb, postgres... etc), in case users want to use a database instead of a directory for storage. But they will be treated as binary blobs (unlike now, when the data in mongo is an actual BSON) and simply unpickled when they are read and overwritten when they are inserted/updated.
2019-04-12T09:52:16Z
[]
[]
mindsdb/mindsdb
159
mindsdb__mindsdb-159
[ "24" ]
da4fff1970a4cdc8406791f8b0e66f02a22eecd3
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -41,7 +41,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co other_col_names = [] for feature_def in model_definition['input_features']: - if feature_def['name'] not in self.transaction.lmd.model_group_by and feature_def['name'] not in previous_predict_col_names: + if feature_def['name'] not in self.transaction.lmd['model_group_by'] and feature_def['name'] not in previous_predict_col_names: feature_def['type'] = 'sequence' if feature_def['name'] not in timeseries_cols: other_col_names.append(feature_def['name']) @@ -50,13 +50,13 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co previous_predict_col_names.append(previous_predict_col_name) new_cols = {} - for col in [*other_col_names,*previous_predict_col_names,timeseries_col_name,*predict_col_names,*self.transaction.lmd.model_group_by]: + for col in [*other_col_names,*previous_predict_col_names,timeseries_col_name,*predict_col_names,*self.transaction.lmd['model_group_by']]: new_cols[col] = [] nr_ele = len(df[timeseries_col_name]) - if self.transaction.lmd.window_size_seconds is not None: - window_size_seconds = self.transaction.lmd.window_size_seconds + if self.transaction.lmd['window_size_seconds'] is not None: + window_size_seconds = self.transaction.lmd['window_size_seconds'] i = 0 while i < nr_ele: current_window = 0 @@ -70,7 +70,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[col] = [] for col in predict_col_names: new_row[col] = df[col][i] - for col in self.transaction.lmd.model_group_by: + for col in self.transaction.lmd['model_group_by']: new_row[col] = df[col][i] inverted_index_range = list(range(i)) @@ -101,11 +101,11 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[timeseries_col_name] = timeseries_row for col in new_row: - if col not in predict_col_names and col not in self.transaction.lmd.model_group_by: + if col not in predict_col_names and col not in self.transaction.lmd['model_group_by']: new_row[col].reverse() new_cols[col].append(new_row[col]) else: - window_size_samples = self.transaction.lmd.window_size_samples + window_size_samples = self.transaction.lmd['window_size_samples'] i = 0 while i < nr_ele: new_row = {} @@ -118,7 +118,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[col] = [] for col in predict_col_names: new_row[col] = df[col][i] - for col in self.transaction.lmd.model_group_by: + for col in self.transaction.lmd['model_group_by']: new_row[col] = df[col][i] inverted_index_range = list(range(i)) @@ -148,7 +148,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co new_row[timeseries_col_name] = timeseries_row for col in new_row: - if col not in predict_col_names and col not in self.transaction.lmd.model_group_by: + if col not in predict_col_names and col not in self.transaction.lmd['model_group_by']: new_row[col].reverse() new_cols[col].append(new_row[col]) @@ -159,32 +159,32 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co def _create_ludwig_dataframe(self, mode): if mode == 'train': indexes = self.transaction.input_data.train_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns)] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd['columns'])] elif mode == 'predict': indexes = self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns) if col not in self.transaction.lmd.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd['columns']) if col not in self.transaction.lmd['predict_columns']] elif mode == 'validate': indexes = self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns)] #if col not in self.transaction.lmd.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd['columns'])] #if col not in self.transaction.lmd['predict_columns']] elif mode == 'test': indexes = self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY] - columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd.columns)] #if col not in self.transaction.lmd.predict_columns] + columns = [[col, col_ind] for col_ind, col in enumerate(self.transaction.lmd['columns'])] #if col not in self.transaction.lmd['predict_columns']] else: raise Exception(f'Unknown mode specified: "{mode}"') model_definition = {'input_features': [], 'output_features': []} data = {} - if self.transaction.lmd.model_order_by is None: + if self.transaction.lmd['model_order_by'] is None: timeseries_cols = [] else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd.model_order_by)) + timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd['model_order_by'])) for ele in columns: col = ele[0] col_ind = ele[1] data[col] = [] - col_stats = self.transaction.lmd.column_stats[col] + col_stats = self.transaction.lmd['column_stats'][col] data_subtype = col_stats['data_subtype'] ludwig_dtype = None @@ -211,7 +211,7 @@ def _create_ludwig_dataframe(self, mode): ludwig_dtype = 'category' elif data_subtype in (DATA_SUBTYPES.DATE): - if col not in self.transaction.lmd.predict_columns: + if col not in self.transaction.lmd['predict_columns']: ludwig_dtype = 'date' else: ludwig_dtype = 'category' @@ -252,7 +252,7 @@ def _create_ludwig_dataframe(self, mode): elif ludwig_dtype == 'sequence': arr_str = self.transaction.input_data.data_array[row_ind][col_ind] - arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd.column_stats[col]['separator']))) + arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd['column_stats'][col]['separator']))) data[col].append(arr) # Date isn't supported yet, so we hack around it @@ -306,7 +306,7 @@ def _create_ludwig_dataframe(self, mode): if custom_logic_continue: continue - if col not in self.transaction.lmd.predict_columns: + if col not in self.transaction.lmd['predict_columns']: input_def = { 'name': col ,'type': ludwig_dtype @@ -348,10 +348,10 @@ def _create_ludwig_dataframe(self, mode): def train(self): training_dataframe, model_definition = self._create_ludwig_dataframe('train') - if self.transaction.lmd.model_order_by is None: + if self.transaction.lmd['model_order_by'] is None: timeseries_cols = [] else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd.model_order_by)) + timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd['model_order_by'])) if len(timeseries_cols) > 0: training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') @@ -360,27 +360,27 @@ def train(self): model = LudwigModel(model_definition) # Figure out how to pass `model_load_path` - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd.model_name) + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name']) #model.model.weights_save_path.rstrip('/model_weights_progress') + '/model' - ludwig_model_savepath = Config.LOCALSTORE_PATH.rstrip('local_jsondb_store') + self.transaction.lmd.model_name + ludwig_model_savepath = Config.LOCALSTORE_PATH.rstrip('local_jsondb_store') + self.transaction.lmd['name'] model.save(ludwig_model_savepath) model.close() - self.transaction.lmd.ludwig_data = {'ludwig_save_path': ludwig_model_savepath, 'model_definition': model_definition} - + self.transaction.lmd['ludwig_data'] = {'ludwig_save_path': ludwig_model_savepath} + self.transaction.hmd['ludwig_data'] = {'model_definition': model_definition} def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) - model_definition = self.transaction.lmd.ludwig_data['model_definition'] + model_definition = self.transaction.hmd['ludwig_data']['model_definition'] - model = LudwigModel.load(self.transaction.lmd.ludwig_data['ludwig_save_path']) + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - if self.transaction.lmd.model_order_by is None: + if self.transaction.lmd['model_order_by'] is None: timeseries_cols = [] else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd.model_order_by)) + timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd['model_order_by'])) if len(timeseries_cols) > 0: predict_dataframe, model_definition = self._translate_df_to_timeseries_format(predict_dataframe, model_definition, timeseries_cols) @@ -393,7 +393,7 @@ def predict(self, mode='predict', ignore_columns=[]): predict_dataframe[ignore_col + date_appendage] = [None] * len(predict_dataframe[ignore_col + date_appendage]) with disable_ludwig_output(): - model = LudwigModel.load(self.transaction.lmd.ludwig_data['ludwig_save_path']) + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) predictions = model.predict(data_df=predict_dataframe) for col_name in predictions: diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -3,15 +3,14 @@ import _thread import uuid import traceback - +import pickle from mindsdb.libs.data_types.mindsdb_logger import MindsdbLogger from mindsdb.libs.helpers.multi_data_source import getDS from mindsdb.libs.helpers.general_helpers import check_for_updates +from mindsdb.__about__ import __version__ from mindsdb.config import CONFIG -from mindsdb.libs.data_types.light_model_metadata import LightModelMetadata -from mindsdb.libs.data_types.heavy_model_metadata import HeavyModelMetadata from mindsdb.libs.controllers.transaction import Transaction from mindsdb.libs.constants.mindsdb import * @@ -76,6 +75,33 @@ def export(self, model_zip_file='mindsdb_storage'): except: return False + def get_models(self): + models = [] + for fn in os.listdir(CONFIG.MINDSDB_STORAGE_PATH): + if '_light_model_metadata.pickle' in fn: + model_name = fn.replace('_light_model_metadata.pickle','') + lmd = self.get_model_data(model_name) + for k in ['name', 'version', 'is_active', 'data_source', 'predict', 'accuracy', + 'status', 'train_end_at', 'updated_at', 'created_at']: + model = {} + if k == 'predict': + model[k] = lmd['predict_columns'] + elif k in lmd: + model[k] = lmd[k] + else: + model[k] = None + print(f'Key {k} not found in the light model metadata !') + models.append(model) + return models + + def get_model_data(self, model_name): + with open(CONFIG.MINDSDB_STORAGE_PATH + f'/{model_name}_light_model_metadata.pickle', 'rb') as fp: + light_metadata = pickle.load(fp) + # ADAPTOR CODE + adapted_light_metadata = {} + # ADAPTOR CODE + adapted_light_metadata = light_metadata #temporary + return adapted_light_metadata def load(self, model_zip_file='mindsdb_storage.zip'): """ @@ -155,28 +181,32 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No else: self.log.warning('Note that after version 1.0, the default value for argument rename_strange_columns in MindsDB().learn, will be flipped from True to False, this means that if your data has columns with special characters, MindsDB will not try to rename them by default.') - transaction_metadata = LightModelMetadata() - heavy_transaction_metadata = HeavyModelMetadata() - transaction_metadata.model_name = self.name - heavy_transaction_metadata.model_name = self.name - transaction_metadata.model_backend = backend - transaction_metadata.predict_columns = predict_columns - transaction_metadata.model_columns_map = {} if rename_strange_columns else from_ds._col_map - transaction_metadata.model_group_by = group_by - transaction_metadata.model_order_by = order_by - transaction_metadata.window_size_samples = window_size_samples - transaction_metadata.window_size_seconds = window_size_seconds - transaction_metadata.model_is_time_series = is_time_series - transaction_metadata.type = transaction_type - transaction_metadata.from_data = from_ds - transaction_metadata.test_from_data = test_from_ds - transaction_metadata.ignore_columns = ignore_columns - transaction_metadata.sample_margin_of_error = sample_margin_of_error - transaction_metadata.sample_confidence_level = sample_confidence_level - transaction_metadata.stop_training_in_x_seconds = stop_training_in_x_seconds - transaction_metadata.stop_training_in_accuracy = stop_training_in_accuracy - - Transaction(session=self, transaction_metadata=transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log, breakpoint=breakpoint) + heavy_transaction_metadata = {} + heavy_transaction_metadata['name'] = self.name + heavy_transaction_metadata['from_data'] = from_ds + heavy_transaction_metadata['test_from_data'] = test_from_ds + + light_transaction_metadata = {} + light_transaction_metadata['version'] = str(__version__) + light_transaction_metadata['name'] = self.name + light_transaction_metadata['data_preparation'] = {} + light_transaction_metadata['model_backend'] = backend + light_transaction_metadata['predict_columns'] = predict_columns + light_transaction_metadata['model_columns_map'] = {} if rename_strange_columns else from_ds._col_map + light_transaction_metadata['model_group_by'] = group_by + light_transaction_metadata['model_order_by'] = order_by + light_transaction_metadata['window_size_samples'] = window_size_samples + light_transaction_metadata['window_size_seconds'] = window_size_seconds + light_transaction_metadata['model_is_time_series'] = is_time_series + light_transaction_metadata['data_source'] = from_data + light_transaction_metadata['type'] = transaction_type + light_transaction_metadata['ignore_columns'] = ignore_columns + light_transaction_metadata['sample_margin_of_error'] = sample_margin_of_error + light_transaction_metadata['sample_confidence_level'] = sample_confidence_level + light_transaction_metadata['stop_training_in_x_seconds'] = stop_training_in_x_seconds + light_transaction_metadata['stop_training_in_accuracy'] = stop_training_in_accuracy + + Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log, breakpoint=breakpoint) def predict(self, when={}, when_data = None, update_cached_model = False): @@ -194,21 +224,24 @@ def predict(self, when={}, when_data = None, update_cached_model = False): breakpoint = CONFIG.DEBUG_BREAK_POINT when_ds = None if when_data is None else getDS(when_data) - transaction_metadata = LightModelMetadata() - heavy_transaction_metadata = HeavyModelMetadata() - transaction_metadata.model_name = self.name - heavy_transaction_metadata.model_name = self.name + heavy_transaction_metadata = {} + + heavy_transaction_metadata['name'] = self.name if update_cached_model: self.predict_worker = None # lets turn into lists: when when = [when] if type(when) in [type(None), type({})] else when + heavy_transaction_metadata['when_data'] = when_ds + + light_transaction_metadata = {} - transaction_metadata.model_when_conditions = when - transaction_metadata.type = transaction_type - transaction_metadata.when_data = when_ds + light_transaction_metadata['name'] = self.name + light_transaction_metadata['model_when_conditions'] = when + light_transaction_metadata['type'] = transaction_type + light_transaction_metadata['data_preparation'] = {} - transaction = Transaction(session=self, transaction_metadata=transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, breakpoint=breakpoint) + transaction = Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, breakpoint=breakpoint) return transaction.output_data diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -1,7 +1,6 @@ -from mindsdb.libs.helpers.general_helpers import pickle_obj, unpickle_obj +from mindsdb.libs.helpers.general_helpers import unpickle_obj from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.helpers.general_helpers import * -from mindsdb.libs.data_types.light_model_metadata import LightModelMetadata from mindsdb.libs.data_types.transaction_data import TransactionData from mindsdb.libs.data_types.transaction_output_data import PredictTransactionOutputData, TrainTransactionOutputData from mindsdb.libs.data_types.mindsdb_logger import log @@ -15,10 +14,11 @@ import importlib import copy import pickle +import datetime class Transaction: - def __init__(self, session, transaction_metadata, heavy_transaction_metadata, logger = log, breakpoint = PHASE_END): + def __init__(self, session, light_transaction_metadata, heavy_transaction_metadata, logger = log, breakpoint = PHASE_END): """ A transaction is the interface to start some MindsDB operation within a session @@ -26,14 +26,16 @@ def __init__(self, session, transaction_metadata, heavy_transaction_metadata, lo :type session: utils.controllers.session_controller.SessionController :param transaction_type: :param transaction_metadata: - :type transaction_metadata: LightModelMetadata + :type transaction_metadata: dict + :type heavy_transaction_metadata: dict :param breakpoint: """ self.breakpoint = breakpoint self.session = session - self.lmd = transaction_metadata #type: LightModelMetadata + self.lmd = light_transaction_metadata + self.lmd['created_at'] = str(datetime.datetime.now()) self.hmd = heavy_transaction_metadata # variables to de defined by setup @@ -59,6 +61,7 @@ def _call_phase_module(self, module_name): :return: """ + self.lmd['is_active'] = True module_path = convert_cammelcase_to_snake_string(module_name) module_full_path = 'mindsdb.libs.phases.{module_path}.{module_path}'.format(module_path=module_path) try: @@ -71,6 +74,8 @@ def _call_phase_module(self, module_name): self.log.error(traceback.format_exc()) raise ValueError(error) return None + finally: + self.lmd['is_active'] = False def _execute_learn(self): @@ -87,34 +92,40 @@ def _execute_learn(self): try: # start populating data - self.lmd.current_phase = MODEL_STATUS_ANALYZING - self.lmd.columns = self.input_data.columns # this is populated by data extractor + self.lmd['current_phase'] = MODEL_STATUS_ANALYZING + self.lmd['columns'] = self.input_data.columns # this is populated by data extractor self._call_phase_module('StatsGenerator') - self.lmd.current_phase = MODEL_STATUS_TRAINING + self.lmd['current_phase'] = MODEL_STATUS_TRAINING - if self.lmd.model_backend == 'ludwig': + if self.lmd['model_backend'] == 'ludwig': + self.lmd['is_active'] = True self.model_backend = LudwigBackend(self) self.model_backend.train() + self.lmd['is_active'] = False - self._call_phase_module('ModelAnalyzer') - # @STARTFIX Null out some non jsonable columns, temporary - self.lmd.from_data = None - self.lmd.test_from_data = None - # @ENDFIX + self.lmd['train_end_at'] = str(datetime.datetime.now()) + + self._call_phase_module('ModelAnalyzer') - with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_light_model_metadata.pickle', 'wb') as fp: + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd['name'] + '_light_model_metadata.pickle', 'wb') as fp: + self.lmd['updated_at'] = str(datetime.datetime.now()) pickle.dump(self.lmd, fp) - with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_heavy_model_metadata.pickle', 'wb') as fp: + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.hmd['name'] + '_heavy_model_metadata.pickle', 'wb') as fp: + # Don't save data for now + self.hmd['from_data'] = None + self.hmd['test_from_data'] = None + # Don't save data for now pickle.dump(self.hmd, fp) return except Exception as e: - self.lmd.current_phase = MODEL_STATUS_ERROR - self.lmd.error_msg = traceback.print_exc() + self.lmd['is_active'] = False + self.lmd['current_phase'] = MODEL_STATUS_ERROR + self.lmd['error_msg'] = traceback.print_exc() self.log.error(str(e)) raise e @@ -127,7 +138,7 @@ def _execute_drop_model(self): """ - self.output_data.data_array = [['Model '+self.lmd.model_name+' deleted.']] + self.output_data.data_array = [['Model '+self.lmd['name']+' deleted.']] self.output_data.columns = ['Status'] return @@ -140,27 +151,22 @@ def _execute_predict(self): :return: """ old_lmd = {} - for k in self.lmd.__dict__.keys(): - old_lmd[k] = self.lmd.__dict__[k] + for k in self.lmd: old_lmd[k] = self.lmd[k] old_hmd = {} - for k in old_hmd: - if old_hmd[k] is not None: - self.hmd.__dict__[k] = old_hmd[k] + for k in self.hmd: old_hmd[k] = self.hmd[k] - with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_light_model_metadata.pickle', 'rb') as fp: + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd['name'] + '_light_model_metadata.pickle', 'rb') as fp: self.lmd = pickle.load(fp) - with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd.model_name + '_heavy_model_metadata.pickle', 'rb') as fp: + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.hmd['name'] + '_heavy_model_metadata.pickle', 'rb') as fp: self.hmd = pickle.load(fp) for k in old_lmd: - if old_lmd[k] is not None: - self.lmd.__dict__[k] = old_lmd[k] + if old_lmd[k] is not None: self.lmd[k] = old_lmd[k] for k in old_hmd: - if old_hmd[k] is not None: - self.hmd.__dict__[k] = old_hmd[k] + if old_hmd[k] is not None: self.hmd[k] = old_hmd[k] if self.lmd is None: self.log.error('No metadata found for this model') @@ -174,21 +180,21 @@ def _execute_predict(self): self.output_data = PredictTransactionOutputData(transaction=self) - if self.lmd.model_backend == 'ludwig': + if self.lmd['model_backend'] == 'ludwig': self.model_backend = LudwigBackend(self) predictions = self.model_backend.predict() - # self.transaction.lmd.predict_columns + # self.transaction.lmd['predict_columns'] self.output_data.data = {col: [] for i, col in enumerate(self.input_data.columns)} - input_columns = [col for col in self.input_data.columns if col not in self.lmd.predict_columns] + input_columns = [col for col in self.input_data.columns if col not in self.lmd['predict_columns']] for row in self.input_data.data_array: for index, cell in enumerate(row): col = self.input_data.columns[index] self.output_data.data[col].append(cell) - for predicted_col in self.lmd.predict_columns: - probabilistic_validator = unpickle_obj(self.hmd.probabilistic_validators[predicted_col]) + for predicted_col in self.lmd['predict_columns']: + probabilistic_validator = unpickle_obj(self.hmd['probabilistic_validators'][predicted_col]) predicted_values = predictions[predicted_col] self.output_data.data[predicted_col] = predicted_values @@ -203,6 +209,18 @@ def _execute_predict(self): #output_data[col][row_number] = prediction_evaluation.most_likely_value Huh, is this correct, are we replacing the predicted value with the most likely one ? Seems... wrong self.output_data.evaluations[predicted_col][row_number] = prediction_evaluation + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd['name'] + '_light_model_metadata.pickle', 'wb') as fp: + self.lmd['updated_at'] = str(datetime.datetime.now()) + pickle.dump(self.lmd, fp) + + with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.hmd['name'] + '_heavy_model_metadata.pickle', 'wb') as fp: + # Don't save data for now + self.hmd['from_data'] = None + self.hmd['test_from_data'] = None + # Don't save data for now + pickle.dump(self.hmd, fp) + + print(self.lmd) return @@ -212,18 +230,18 @@ def run(self): :return: """ - if self.lmd.type == TRANSACTION_BAD_QUERY: + if self.lmd['type'] == TRANSACTION_BAD_QUERY: self.log.error(self.errorMsg) self.error = True return - if self.lmd.type == TRANSACTION_DROP_MODEL: + if self.lmd['type'] == TRANSACTION_DROP_MODEL: self._execute_drop_model() return - if self.lmd.type == TRANSACTION_LEARN: - self.output_data.data_array = [['Model ' + self.lmd.model_name + ' training.']] + if self.lmd['type'] == TRANSACTION_LEARN: + self.output_data.data_array = [['Model ' + self.lmd['name'] + ' training.']] self.output_data.columns = ['Status'] if CONFIG.EXEC_LEARN_IN_THREAD == False: @@ -232,7 +250,7 @@ def run(self): _thread.start_new_thread(self._execute_learn, ()) return - elif self.lmd.type == TRANSACTION_PREDICT: + elif self.lmd['type'] == TRANSACTION_PREDICT: self._execute_predict() - elif self.lmd.type == TRANSACTION_NORMAL_SELECT: + elif self.lmd['type'] == TRANSACTION_NORMAL_SELECT: self._execute_normal_select() diff --git a/mindsdb/libs/data_types/heavy_model_metadata.py b/mindsdb/libs/data_types/heavy_model_metadata.py deleted file mode 100644 --- a/mindsdb/libs/data_types/heavy_model_metadata.py +++ /dev/null @@ -1,9 +0,0 @@ -class HeavyModelMetadata(): - - _entity_name = 'heavy_model_metadata' - _pkey = ['model_name'] - - def setup(self): - - self.probabilistic_validators = None - self.model_name = None diff --git a/mindsdb/libs/data_types/light_metadata.py b/mindsdb/libs/data_types/light_metadata.py new file mode 100644 --- /dev/null +++ b/mindsdb/libs/data_types/light_metadata.py @@ -0,0 +1,323 @@ +# UNDER CONSTRUCTION ! + +light_metadata = { + "name": { + "type": "string" + }, + "version": { + "type": "string" + }, + "data_preparation": { + "type": "object", + "properties": { + "accepted_margin_of_error": { + "type": "number" + }, + "total_row_count": { + "type": "number" + }, + "used_row_count": { + "type": "number" + }, + "test_row_count": { + "type": "number" + }, + "train_row_count": { + "type": "number" + }, + "validation_row_count": { + "type": "number" + } + } + }, + "data_analysis": { + "type": "object", + "properties": { + "target_columns_metadata": { + "type": "array", + "items": { + "type": "object", + "properties": { + "column_name": { + "type": "string" + } + } + } + } + } + } +} + +scores = ['duplicates_score','empty_cells_score','data_type_distribution_score', +'similarity_score','z_test_based_outlier_score','value_distribution_score' +,'variability_score','redundancy_score','consistency_score','consistency_score','quality_score'] + +def gen_score(score_name): + return [ + score_name: { + "type": "object", + "properties": { + "score": { + "type": "number" + }, + "description": { + "type": "string" + } + } + } + ] + + + + "data_analysis": { + "target_columns_metadata": [ + { + "column_name": "string", + "importance_score": 0, + "data_type": "categorical", + "data_type_distribution": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "data_distribution": { + "data_histogram": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "clusters": [ + { + "group": "string", + "members": [ + "string" + ] + } + ], + "mean": "string" + }, + "consistency": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + }, + "completeness": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + }, + "variability": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + } + } + ], + "input_columns_metadata": [ + { + "column_name": "string", + "importance_score": 0, + "data_type": "categorical", + "data_type_distribution": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "data_distribution": { + "data_histogram": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "clusters": [ + { + "group": "string", + "members": [ + "string" + ] + } + ], + "mean": "string" + }, + "consistency": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + }, + "completeness": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + }, + "variability": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + } + } + ] + }, + "model_analysis": [ + { + "column_name": "string", + "overall_input_importance": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "train_accuracy_over_time": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "test_accuracy_over_time": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "accuracy_histogram": { + "x": [ + "string" + ], + "y": [ + 0 + ], + "x_explained": [ + [ + { + "column_name": "string", + "importance_score": 0, + "data_type": "categorical", + "data_type_distribution": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "data_distribution": { + "data_histogram": { + "type": "categorical", + "x": [ + "string" + ], + "y": [ + 0 + ] + }, + "clusters": [ + { + "group": "string", + "members": [ + "string" + ] + } + ], + "mean": "string" + }, + "consistency": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + }, + "completeness": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + }, + "variability": { + "score": "string", + "metrics": [ + { + "type": "error", + "score": 0, + "description": "string" + } + ], + "description": "string" + } + } + ] + ] + } + } + ] +}) diff --git a/mindsdb/libs/data_types/light_model_metadata.py b/mindsdb/libs/data_types/light_model_metadata.py deleted file mode 100644 --- a/mindsdb/libs/data_types/light_model_metadata.py +++ /dev/null @@ -1,41 +0,0 @@ -class LightModelMetadata(): - - _entity_name = 'model_metadata' - _pkey = ['model_name'] - - def setup(self): - - self.model_name = None - self.model_backend = None - self.predict_columns = None - - self.columns = None - self.current_phase = None - self.column_stats = None - self.start_time = None - self.end_time = None - self.error_msg = None - self.max_group_by_count = 0 - self.total_row_count = None - self.test_row_count = None - self.train_row_count= None - self.validation_row_count = None - - self.model_order_by = None - self.model_group_by = None - self.window_size_samples = None - self.window_size_seconds = None - - self.stop_training = False - self.kill_training = False - - self.ludwig_data = None - - self.column_importances = None - - # From transaction meadata - self.from_data = None - self.test_from_data = None - self.type = None - self.when_data = None - self.model_is_time_series = None diff --git a/mindsdb/libs/data_types/transaction_output_data.py b/mindsdb/libs/data_types/transaction_output_data.py --- a/mindsdb/libs/data_types/transaction_output_data.py +++ b/mindsdb/libs/data_types/transaction_output_data.py @@ -16,7 +16,7 @@ def __init__(self, transaction = None, data = {}, evaluations = {}, ): self.transaction = transaction def __iter__(self): - for i, cell in enumerate(self.data[self.transaction.lmd.columns[0]]): + for i, cell in enumerate(self.data[self.transaction.lmd['columns'][0]]): yield TransactionOutputRow(self, i).as_dict() def __getitem__(self, item): @@ -26,4 +26,4 @@ def __str__(self): return str(self.data) def __len__(self): - return len(self.data[self.transaction.lmd.columns[0]]) + return len(self.data[self.transaction.lmd['columns'][0]]) diff --git a/mindsdb/libs/data_types/transaction_output_row.py b/mindsdb/libs/data_types/transaction_output_row.py --- a/mindsdb/libs/data_types/transaction_output_row.py +++ b/mindsdb/libs/data_types/transaction_output_row.py @@ -21,7 +21,7 @@ def __str__(self): def as_list(self): #Note that here we will not output the confidence columns - return [self.transaction_output.evaluations[col][self.row_index] for col in self.transaction_output.transaction.lmd.columns] + return [self.transaction_output.evaluations[col][self.row_index] for col in self.transaction_output.transaction.lmd['columns']] @property def _predicted_values(self): diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -15,15 +15,13 @@ class DataExtractor(BaseModule): phase_name = PHASE_DATA_EXTRACTOR - def _get_data_frame_from_when_conditions(self, train_metadata): + def _get_data_frame_from_when_conditions(self): """ - - :param train_metadata: :return: """ - columns = self.transaction.lmd.columns - when_conditions = self.transaction.lmd.model_when_conditions + columns = self.transaction.lmd['columns'] + when_conditions = self.transaction.lmd['model_when_conditions'] when_conditions_list = [] # here we want to make a list of the type ( ValueForField1, ValueForField2,..., ValueForFieldN ), ... @@ -41,55 +39,53 @@ def _get_data_frame_from_when_conditions(self, train_metadata): return result - def _apply_sort_conditions_to_df(self, df, train_metadata): + def _apply_sort_conditions_to_df(self, df): """ :param df: - :param train_metadata: :return: """ # apply order by (group_by, order_by) - if train_metadata.model_is_time_series: - asc_values = [order_tuple[ORDER_BY_KEYS.ASCENDING_VALUE] for order_tuple in train_metadata.model_order_by] - sort_by = [order_tuple[ORDER_BY_KEYS.COLUMN] for order_tuple in train_metadata.model_order_by] + if self.transaction.lmd['model_is_time_series']: + asc_values = [order_tuple[ORDER_BY_KEYS.ASCENDING_VALUE] for order_tuple in self.transaction.lmd['model_order_by']] + sort_by = [order_tuple[ORDER_BY_KEYS.COLUMN] for order_tuple in self.transaction.lmd['model_order_by']] - if train_metadata.model_group_by: - sort_by = train_metadata.model_group_by + sort_by - asc_values = [True for i in train_metadata.model_group_by] + asc_values + if self.transaction.lmd['model_group_by']: + sort_by = self.transaction.lmd['model_group_by'] + sort_by + asc_values = [True for i in self.transaction.lmd['model_group_by']] + asc_values df = df.sort_values(sort_by, ascending=asc_values) - elif self.transaction.lmd.type == TRANSACTION_LEARN: + elif self.transaction.lmd['type'] == TRANSACTION_LEARN: # if its not a time series, randomize the input data and we are learning df = df.sample(frac=1) return df - def _get_prepared_input_df(self, train_metadata): + def _get_prepared_input_df(self): """ - :param train_metadata: - :type train_metadata: LightModelMetadata :return: """ df = None # if transaction metadata comes with some data as from_data create the data frame - if self.transaction.lmd.from_data is not None: + if self.transaction.hmd['from_data'] is not None: # make sure we build a dataframe that has all the columns we need - df = self.transaction.lmd.from_data + df = self.transaction.hmd['from_data'] df = df.where((pandas.notnull(df)), None) # if this is a predict statement, create use model_when_conditions to shape the dataframe - if self.transaction.lmd.type == TRANSACTION_PREDICT: - if self.transaction.lmd.when_data is not None: - df = self.transaction.lmd.when_data + if self.transaction.lmd['type'] == TRANSACTION_PREDICT: + if self.transaction.hmd['when_data'] is not None: + df = self.transaction.hmd['when_data'] df = df.where((pandas.notnull(df)), None) - elif self.transaction.lmd.model_when_conditions is not None: + elif self.transaction.lmd['model_when_conditions'] is not None: + # if no data frame yet, make one - df = self._get_data_frame_from_when_conditions(train_metadata) + df = self._get_data_frame_from_when_conditions() # if by now there is no DF, throw an error @@ -99,7 +95,7 @@ def _get_prepared_input_df(self, train_metadata): raise ValueError(error) return None - df = self._apply_sort_conditions_to_df(df, train_metadata) + df = self._apply_sort_conditions_to_df(df) g = df.columns.to_series().groupby(df.dtypes).groups if np.dtype('<M8[ns]') in g: @@ -124,11 +120,11 @@ def _validate_input_data_integrity(self): # make sure that the column we are trying to predict is on the input_data # else fail, because we cannot predict data we dont have - #if self.transaction.lmd.model_is_time_series or self.transaction.lmd.type == TRANSACTION_LEARN: + #if self.transaction.lmd['model_is_time_series'] or self.transaction.lmd['type'] == TRANSACTION_LEARN: # ^ How did this even make sense before ? Why did it not crash tests ? Pressumably because the predict col was loaded into `input_data` as an empty col - if self.transaction.lmd.type == TRANSACTION_LEARN: - for col_target in self.transaction.lmd.predict_columns: + if self.transaction.lmd['type'] == TRANSACTION_LEARN: + for col_target in self.transaction.lmd['predict_columns']: if col_target not in self.transaction.input_data.columns: err = 'Trying to predict column {column} but column not in source data'.format(column=col_target) self.log.error(err) @@ -139,7 +135,7 @@ def _validate_input_data_integrity(self): def run(self): - result = self._get_prepared_input_df(self.transaction.lmd) + result = self._get_prepared_input_df() columns = list(result.columns.values) data_array = list(result.values.tolist()) @@ -149,8 +145,8 @@ def run(self): self._validate_input_data_integrity() - is_time_series = self.transaction.lmd.model_is_time_series - group_by = self.transaction.lmd.model_group_by + is_time_series = self.transaction.lmd['model_is_time_series'] + group_by = self.transaction.lmd['model_group_by'] # create a list of the column numbers (indexes) that make the group by, this is so that we can greate group by hashes for each row if len(group_by)>0: @@ -179,10 +175,10 @@ def run(self): continue length = len(self.transaction.input_data.all_indexes[key]) - if self.transaction.lmd.type == TRANSACTION_LEARN: + if self.transaction.lmd['type'] == TRANSACTION_LEARN: sample_size = int(calculate_sample_size(population_size=length, - margin_error=self.transaction.lmd.sample_margin_of_error, - confidence_level=self.transaction.lmd.sample_confidence_level)) + margin_error=self.transaction.lmd['sample_margin_of_error'], + confidence_level=self.transaction.lmd['sample_confidence_level'])) # this evals True if it should send the entire group data into test, train or validation as opposed to breaking the group into the subsets should_split_by_group = type(group_by) == list and len(group_by) > 0 @@ -207,7 +203,7 @@ def run(self): self.transaction.input_data.validation_indexes[key] = self.transaction.input_data.all_indexes[key][validation_window[0]:validation_window[1]] # log some stats - if self.transaction.lmd.type == TRANSACTION_LEARN: + if self.transaction.lmd['type'] == TRANSACTION_LEARN: total_rows_used_by_subset = {'train': 0, 'test': 0, 'validation': 0} average_number_of_rows_used_per_groupby = {'train': 0, 'test': 0, 'validation': 0} @@ -225,12 +221,12 @@ def run(self): total_number_of_groupby_groups = len(self.transaction.input_data.all_indexes) if total_rows_used != total_rows_in_input: - self.log.info('You requested to sample with a *margin of error* of {sample_margin_of_error} and a *confidence level* of {sample_confidence_level}. Therefore:'.format(sample_confidence_level=self.transaction.lmd.sample_confidence_level, sample_margin_of_error= self.transaction.lmd.sample_margin_of_error)) + self.log.info('You requested to sample with a *margin of error* of {sample_margin_of_error} and a *confidence level* of {sample_confidence_level}. Therefore:'.format(sample_confidence_level=self.transaction.lmd['sample_confidence_level'], sample_margin_of_error= self.transaction.lmd['sample_margin_of_error'])) self.log.info('Using a [Cochran\'s sample size calculator](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/find-sample-size/) we got the following sample sizes:') data = { 'total': [total_rows_in_input, 'Total number of rows in input'], 'subsets': [[total_rows_used, 'Total number of rows used']], - 'label': 'Sample size for margin of error of ({sample_margin_of_error}) and a confidence level of ({sample_confidence_level})'.format(sample_confidence_level=self.transaction.lmd.sample_confidence_level, sample_margin_of_error= self.transaction.lmd.sample_margin_of_error) + 'label': 'Sample size for margin of error of ({sample_margin_of_error}) and a confidence level of ({sample_confidence_level})'.format(sample_confidence_level=self.transaction.lmd['sample_confidence_level'], sample_margin_of_error= self.transaction.lmd['sample_margin_of_error']) } self.log.infoChart(data, type='pie') diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -16,12 +16,12 @@ def run(self): # Runs the model on the validation set in order to fit a probabilistic model that will evaluate the accuracy of future predictions """ - output_columns = self.transaction.lmd.predict_columns - input_columns = [col for col in self.transaction.lmd.columns if col not in output_columns] + output_columns = self.transaction.lmd['predict_columns'] + input_columns = [col for col in self.transaction.lmd['columns'] if col not in output_columns] validation_dataset = {} for row_ind in self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY]: - for col_ind, col in enumerate(self.transaction.lmd.columns): + for col_ind, col in enumerate(self.transaction.lmd['columns']): if col not in validation_dataset: validation_dataset[col] = [] validation_dataset[col].append(self.transaction.input_data.data_array[row_ind][col_ind]) @@ -29,19 +29,19 @@ def run(self): # Test some hypotheses about our columns column_evaluator = ColumnEvaluator() column_importances = column_evaluator.get_column_importance(model=self.transaction.model_backend, output_columns=output_columns, input_columns=input_columns, - full_dataset=validation_dataset, stats=self.transaction.lmd.column_stats) + full_dataset=validation_dataset, stats=self.transaction.lmd['column_stats']) - self.transaction.lmd.column_importances = column_importances + self.transaction.lmd['column_importances'] = column_importances # Create the probabilistic validators for each of the predict column probabilistic_validators = {} for col in output_columns: - if 'percentage_buckets' in self.transaction.lmd.column_stats[col]: + if 'percentage_buckets' in self.transaction.lmd['column_stats'][col]: probabilistic_validators[col] = ProbabilisticValidator( - col_stats=self.transaction.lmd.column_stats[col]) + col_stats=self.transaction.lmd['column_stats'][col]) else: probabilistic_validators[col] = ProbabilisticValidator( - col_stats=self.transaction.lmd.column_stats[col]) + col_stats=self.transaction.lmd['column_stats'][col]) # Run on the validation set multiple times, each time with one of the column blanked out for column_name in input_columns: @@ -65,9 +65,9 @@ def run(self): probabilistic_validators[pcol].partial_fit() # Pickle for later use - self.transaction.hmd.probabilistic_validators = {} + self.transaction.hmd['probabilistic_validators'] = {} for col in probabilistic_validators: - self.transaction.hmd.probabilistic_validators[col] = pickle_obj(probabilistic_validators[col]) + self.transaction.hmd['probabilistic_validators'][col] = pickle_obj(probabilistic_validators[col]) def test(): from mindsdb.libs.controllers.predictor import Predictor diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -849,11 +849,11 @@ def run(self): validation_rows = len(self.transaction.input_data.validation_indexes) train_rows = len(self.transaction.input_data.train_indexes) - self.transaction.lmd.column_stats = stats - self.transaction.lmd.total_row_count = total_rows - self.transaction.lmd.test_row_count = test_rows - self.transaction.lmd.train_row_count = train_rows - self.transaction.lmd.validation_row_count = validation_rows + self.transaction.lmd['column_stats'] = stats + self.transaction.lmd['data_preparation']['total_row_count'] = total_rows + self.transaction.lmd['data_preparation']['test_row_count'] = test_rows + self.transaction.lmd['data_preparation']['train_row_count'] = train_rows + self.transaction.lmd['data_preparation']['validation_row_count'] = validation_rows self._log_interesting_stats(stats) return stats
diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py --- a/integration_testing/run_tests.py +++ b/integration_testing/run_tests.py @@ -342,7 +342,7 @@ def test_multilabel_prediction(): except: print(traceback.format_exc()) logger.error(f'Failed to create mindsdb Predictor') - exit(1)0 + exit(1) try: results = mdb.predict(when_data=test_file_name) diff --git a/integration_testing/run_travis_tests.py b/integration_testing/run_travis_tests.py --- a/integration_testing/run_travis_tests.py +++ b/integration_testing/run_travis_tests.py @@ -105,6 +105,9 @@ def run_tests(): exit(1) logger.info(f'--------------- Predicting ran succesfully ---------------') + + models = mdb.get_models() + print(models) except: print(traceback.format_exc()) logger.error(f'Failed whilst predicting') diff --git a/test.json b/test.json new file mode 100644 --- /dev/null +++ b/test.json @@ -0,0 +1 @@ +{"version": "1.0.2", "name": "test_one_label_prediction", "model_backend": "ludwig", "predict_columns": ["JcWtsFNEUi"], "model_columns_map": {"oZUxhCSA": "oZUxhCSA", "xkCaitlB": "xkCaitlB", "cxWEVwECy": "cxWEVwECy", "uvZdEBLZ": "uvZdEBLZ", "ZpDsCbso": "ZpDsCbso", "IuXFoKpu": "IuXFoKpu", "JcWtsFNEUi": "JcWtsFNEUi"}, "model_group_by": [], "model_order_by": [], "window_size_samples": null, "window_size_seconds": null, "model_is_time_series": false, "data_source": "train_data.csv", "type": "predict", "ignore_columns": [], "sample_margin_of_error": 0.0, "sample_confidence_level": 1.0, "stop_training_in_x_seconds": null, "stop_training_in_accuracy": null, "created_at": datetime.datetime(2019, 4, 16, 16, 41, 4, 500054), 'is_active': False, 'current_phase': 'Training', 'columns': ['oZUxhCSA', 'xkCaitlB', 'cxWEVwECy', 'uvZdEBLZ', 'ZpDsCbso', 'IuXFoKpu', 'JcWtsFNEUi'], 'column_stats': {'oZUxhCSA': {'data_type': 'Numeric', 'data_subtype': 'Int', 'mean': 6281.701333333333, 'median': 8043.0, 'variance': 23318282223.537468, 'skewness': -0.054900566797119646, 'kurtosis': -1.2094409311358003, 'max': 261879.0, 'min': -260800.0, 'is_float': False, 'histogram': {'x': [-255573.21000000002, -245119.63, -234666.05, -224212.47, -213758.89, -203305.31, -192851.73, -182398.15, -171944.57, -161490.99, -151037.41, -140583.83000000002, -130130.25, -119676.67, -109223.09, -98769.51, -88315.93000000001, -77862.35, -67408.77, -56955.19, -46501.61, -36048.03, -25594.449999999997, -15140.87000000001, -4687.290000000008, 5766.290000000008, 16219.869999999995, 26673.449999999983, 37127.03, 47580.610000000015, 58034.19, 68487.76999999999, 78941.35, 89394.93, 99848.50999999998, 110302.09, 120755.67000000001, 131209.25, 141662.83, 152116.41, 162569.99, 173023.56999999998, 183477.15, 193930.73, 204384.31, 214837.88999999998, 225291.47, 235745.05, 246198.62999999998, 256652.21], 'y': [22, 16, 13, 10, 12, 16, 16, 14, 10, 8, 12, 19, 20, 11, 19, 11, 15, 20, 17, 14, 12, 17, 12, 10, 21, 12, 12, 19, 14, 17, 15, 10, 17, 11, 19, 8, 15, 14, 16, 17, 18, 15, 15, 19, 18, 19, 16, 16, 18, 13]}, 'percentage_buckets': [-260800.0, -255573.21, -255050.531, -254475.58409999998, -253843.14250999998, -253147.45676099998, -252382.20243709997, -251540.42268080998, -250614.46494889096, -249595.91144378006, -248475.50258815807, -247243.05284697388, -245887.35813167127, -244396.0939448384, -242755.70333932224, -240951.27367325447, -238966.40104057992, -236783.04114463791, -234381.3452591017, -231739.47978501188, -228833.42776351306, -225636.77053986437, -222120.4475938508, -218252.4923532359, -213997.7415885595, -209317.51574741545, -204169.267322157, -198506.1940543727, -192276.81345980996, -185424.49480579095, -177886.94428637004, -169595.63871500705, -160475.20258650774, -150442.7228451585, -139406.99512967435, -127267.69464264179, -113914.46410690596, -99225.91051759655, -83068.5015693562, -65295.35172629182, -45744.886898921, -24239.375588813098, -583.3131476944072, 25438.355537536154, 54062.19109128977, 85548.41020041874, 120183.25122046062, 158281.57634250668, 200189.73397675736, 246288.7073744331], 'data_type_dist': {'Numeric': 750}, 'data_subtype_dist': {'Int': 750}, 'column': 'oZUxhCSA', 'empty_cells': 0, 'empty_percentage': 0.0, 'nr_duplicates': 750, 'duplicates_percentage': 100.0, 'duplicates_score': 1.0, 'empty_cells_score': 0.0, 'data_type_distribution_score': 0.0, 'z_score_outliers': [], 'mean_z_score': 0.8677838540441631, 'z_test_based_outlier_score': 0.0, 'lof_outliers': [], 'lof_based_outlier_score': 0.0, 'percentage_of_log_based_outliers': 0.0, 'similarities': [('xkCaitlB', 0.0), ('cxWEVwECy', 0.0), ('uvZdEBLZ', 0.0), ('ZpDsCbso', 0.0), ('IuXFoKpu', 0.0), ('JcWtsFNEUi', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'xkCaitlB', 'bucket_probabilities': {-255573.21000000002: 0.029333333333333333, -245119.63: 0.021333333333333333, -234666.05: 0.017333333333333333, -224212.47: 0.013333333333333334, -213758.89: 0.016, -203305.31: 0.021333333333333333, -192851.73: 0.021333333333333333, -182398.15: 0.018666666666666668, -171944.57: 0.013333333333333334, -161490.99: 0.010666666666666666, -151037.41: 0.016, -140583.83000000002: 0.025333333333333333, -130130.25: 0.02666666666666667, -119676.67: 0.014666666666666666, -109223.09: 0.025333333333333333, -98769.51: 0.014666666666666666, -88315.93000000001: 0.02, -77862.35: 0.02666666666666667, -67408.77: 0.02266666666666667, -56955.19: 0.018666666666666668, -46501.61: 0.016, -36048.03: 0.02266666666666667, -25594.449999999997: 0.016, -15140.87000000001: 0.013333333333333334, -4687.290000000008: 0.028, 5766.290000000008: 0.016, 16219.869999999995: 0.016, 26673.449999999983: 0.025333333333333333, 37127.03: 0.018666666666666668, 47580.610000000015: 0.02266666666666667, 58034.19: 0.02, 68487.76999999999: 0.013333333333333334, 78941.35: 0.02266666666666667, 89394.93: 0.014666666666666666, 99848.50999999998: 0.025333333333333333, 110302.09: 0.010666666666666666, 120755.67000000001: 0.02, 131209.25: 0.018666666666666668, 141662.83: 0.021333333333333333, 152116.41: 0.02266666666666667, 162569.99: 0.024, 173023.56999999998: 0.02, 183477.15: 0.02, 193930.73: 0.025333333333333333, 204384.31: 0.024, 214837.88999999998: 0.025333333333333333, 225291.47: 0.021333333333333333, 235745.05: 0.021333333333333333, 246198.62999999998: 0.024, 256652.21: 0.017333333333333333}, 'value_distribution_score': 0.318181818181818, 'max_probability_key': -255573.21000000002, 'consistency_score': 0.2, 'redundancy_score': 0.0, 'variability_score': 0.10606060606060601, 'quality_score': 0.10202020202020201}, 'xkCaitlB': {'data_type': 'Numeric', 'data_subtype': 'Float', 'mean': 3247.584855353289, 'median': 2248.3842778345734, 'variance': 7896794535.188481, 'skewness': 0.03567279857759453, 'kurtosis': 0.27080064191254216, 'max': 247583.85500327544, 'min': -249732.906786796, 'is_float': True, 'histogram': {'x': [-244759.7391688953, -234813.40393309385, -224867.06869729242, -214920.733461491, -204974.39822568954, -195028.06298988813, -185081.72775408672, -175135.39251828525, -165189.05728248385, -155242.72204668244, -145296.38681088097, -135350.05157507956, -125403.71633927814, -115457.3811034767, -105511.04586767528, -95564.71063187385, -85618.37539607241, -75672.04016027099, -65725.70492446957, -55779.36968866813, -45833.034452866705, -35886.69921706528, -25940.363981263843, -15994.02874546242, -6047.693509660996, 3898.641726140442, 13844.976961941866, 23791.312197743304, 33737.64743354474, 43683.98266934615, 53630.31790514759, 63576.65314094903, 73522.98837675044, 83469.32361255187, 93415.65884835331, 103361.99408415472, 113308.32931995616, 123254.6645557576, 133200.999791559, 143147.33502736044, 153093.67026316188, 163040.0054989633, 172986.34073476473, 182932.67597056617, 192879.01120636758, 202825.34644216902, 212771.68167797045, 222718.01691377186, 232664.3521495733, 242610.68738537474], 'y': [2, 1, 1, 3, 4, 1, 8, 6, 6, 8, 8, 11, 10, 14, 14, 10, 13, 7, 25, 23, 25, 28, 28, 39, 62, 59, 43, 37, 27, 27, 24, 26, 19, 12, 12, 19, 17, 7, 8, 12, 7, 6, 4, 4, 4, 6, 4, 1, 5, 3]}, 'percentage_buckets': [-249732.906786796, -244759.7391688953, -244262.42240710522, -243715.37396913613, -243113.62068737013, -242451.69207742755, -241723.5706064907, -240922.63698846017, -240041.6100086266, -239072.48033080966, -238006.437685211, -236833.7907750525, -235543.87917387814, -234124.97641258634, -232564.1833751654, -230847.31103400234, -228958.75145872298, -226881.3359259157, -224596.17883982765, -222082.5060451308, -219317.4659709643, -216275.9218893811, -212930.22339963962, -209249.955060924, -205201.65988833678, -200748.53519849086, -195850.09803966034, -190461.81716494678, -184534.70820276186, -178014.88834435845, -170843.0865001147, -162954.10447144657, -154276.22423991162, -144730.55598522318, -134230.3209050659, -122680.06231689287, -109974.77786990255, -95998.9649782132, -80625.57079735491, -63714.83719841081, -45113.03023957229, -24651.042584849918, -2142.85616465531, 22616.14889755876, 49851.05446599424, 79809.45059127326, 112763.68632908018, 149013.3456406678, 188887.97088341418, 232750.0586504352], 'data_type_dist': {'Numeric': 750}, 'data_subtype_dist': {'Float': 750}, 'column': 'xkCaitlB', 'empty_cells': 0, 'empty_percentage': 0.0, 'nr_duplicates': 750, 'duplicates_percentage': 100.0, 'duplicates_score': 1.0, 'empty_cells_score': 0.0, 'data_type_distribution_score': 0.0, 'z_score_outliers': [], 'mean_z_score': 0.7493676376025693, 'z_test_based_outlier_score': 0.0, 'lof_outliers': [106, 481, 570, 685], 'lof_based_outlier_score': 0.005333333333333333, 'percentage_of_log_based_outliers': 0.5333333333333333, 'similarities': [('oZUxhCSA', 0.0), ('cxWEVwECy', 0.0), ('uvZdEBLZ', 0.0), ('ZpDsCbso', 0.0), ('IuXFoKpu', 0.0), ('JcWtsFNEUi', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'oZUxhCSA', 'bucket_probabilities': {-244759.7391688953: 0.0026666666666666666, -234813.40393309385: 0.0013333333333333333, -224867.06869729242: 0.0013333333333333333, -214920.733461491: 0.004, -204974.39822568954: 0.005333333333333333, -195028.06298988813: 0.0013333333333333333, -185081.72775408672: 0.010666666666666666, -175135.39251828525: 0.008, -165189.05728248385: 0.008, -155242.72204668244: 0.010666666666666666, -145296.38681088097: 0.010666666666666666, -135350.05157507956: 0.014666666666666666, -125403.71633927814: 0.013333333333333334, -115457.3811034767: 0.018666666666666668, -105511.04586767528: 0.018666666666666668, -95564.71063187385: 0.013333333333333334, -85618.37539607241: 0.017333333333333333, -75672.04016027099: 0.009333333333333334, -65725.70492446957: 0.03333333333333333, -55779.36968866813: 0.030666666666666665, -45833.034452866705: 0.03333333333333333, -35886.69921706528: 0.037333333333333336, -25940.363981263843: 0.037333333333333336, -15994.02874546242: 0.052, -6047.693509660996: 0.08266666666666667, 3898.641726140442: 0.07866666666666666, 13844.976961941866: 0.05733333333333333, 23791.312197743304: 0.04933333333333333, 33737.64743354474: 0.036, 43683.98266934615: 0.036, 53630.31790514759: 0.032, 63576.65314094903: 0.034666666666666665, 73522.98837675044: 0.025333333333333333, 83469.32361255187: 0.016, 93415.65884835331: 0.016, 103361.99408415472: 0.025333333333333333, 113308.32931995616: 0.02266666666666667, 123254.6645557576: 0.009333333333333334, 133200.999791559: 0.010666666666666666, 143147.33502736044: 0.016, 153093.67026316188: 0.009333333333333334, 163040.0054989633: 0.008, 172986.34073476473: 0.005333333333333333, 182932.67597056617: 0.005333333333333333, 192879.01120636758: 0.005333333333333333, 202825.34644216902: 0.008, 212771.68167797045: 0.005333333333333333, 222718.01691377186: 0.0013333333333333333, 232664.3521495733: 0.006666666666666667, 242610.68738537474: 0.004}, 'value_distribution_score': 0.7580645161290323, 'max_probability_key': -6047.693509660996, 'consistency_score': 0.2, 'redundancy_score': 0.0, 'variability_score': 0.2544659498207885, 'quality_score': 0.15148864994026284}, 'cxWEVwECy': {'data_type': 'Date', 'data_subtype': 'Date', 'mean': 520661376.0, 'median': 511056000.0, 'variance': 9.320150582658662e+16, 'skewness': 0.014959769122661202, 'kurtosis': -1.231259342541527, 'max': 1036281600.0, 'min': 1036800.0, 'is_float': False, 'histogram': {'x': [11389248.0, 32094144.0, 52799040.0, 73503936.0, 94208832.0, 114913728.0, 135618624.0, 156323520.0, 177028416.0, 197733312.0, 218438208.0, 239143104.0, 259848000.0, 280552896.0, 301257792.0, 321962688.0, 342667584.0, 363372480.0, 384077376.0, 404782272.0, 425487168.0, 446192064.0, 466896960.0, 487601856.0, 508306752.0, 529011648.0, 549716544.0, 570421440.0, 591126336.0, 611831232.0, 632536128.0, 653241024.0, 673945920.0, 694650816.0, 715355712.0, 736060608.0, 756765504.0, 777470400.0, 798175296.0, 818880192.0, 839585088.0, 860289984.0, 880994880.0, 901699776.0, 922404672.0, 943109568.0, 963814464.0, 984519360.0, 1005224256.0, 1025929152.0], 'y': [18, 15, 20, 9, 15, 20, 7, 18, 16, 10, 18, 17, 14, 23, 12, 15, 12, 11, 16, 15, 20, 16, 12, 15, 16, 11, 14, 12, 14, 19, 15, 14, 15, 15, 9, 20, 14, 8, 11, 14, 11, 18, 18, 18, 12, 16, 20, 14, 25, 13]}, 'percentage_buckets': [1036800.0, 11389248.0, 12424492.8, 13563262.08, 14815908.288, 16193819.1168, 17709521.02848, 19376793.131328, 21210792.4444608, 23228191.688906882, 25447330.85779757, 27888383.943577327, 30573542.33793506, 33527216.571728565, 36776258.22890142, 40350204.05179156, 44281544.45697072, 48606018.90266779, 53362940.79293457, 58595554.87222803, 64351430.35945083, 70682893.39539592, 77647502.7349355, 85308573.00842905, 93735750.30927196, 103005645.34019916, 113202529.87421907, 124419102.86164099, 136757333.1478051, 150329386.4625856, 165258645.10884416, 181680829.61972857, 199745232.58170143, 219616075.8398716, 241474003.42385876, 265517723.76624465, 291965816.1428691, 321058717.757156, 353060909.5328716, 388263320.4861588, 426985972.53477466, 469580889.7882521, 516435298.7670773, 567975148.643785, 624668983.5081635, 687032201.8589798, 755631742.0448778, 831091236.2493656, 914096679.8743021, 1005402667.8617324], 'data_type_dist': {'Date': 750}, 'data_subtype_dist': {'Date': 750}, 'column': 'cxWEVwECy', 'empty_cells': 0, 'empty_percentage': 0.0, 'nr_duplicates': 750, 'duplicates_percentage': 100.0, 'c': 0, 'empty_cells_score': 0.0, 'data_type_distribution_score': 0.0, 'similarities': [('oZUxhCSA', 0.0), ('xkCaitlB', 0.0), ('uvZdEBLZ', 0.0), ('ZpDsCbso', 0.0), ('IuXFoKpu', 0.0), ('JcWtsFNEUi', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'oZUxhCSA', 'bucket_probabilities': {11389248.0: 0.024, 32094144.0: 0.02, 52799040.0: 0.02666666666666667, 73503936.0: 0.012, 94208832.0: 0.02, 114913728.0: 0.02666666666666667, 135618624.0: 0.009333333333333334, 156323520.0: 0.024, 177028416.0: 0.021333333333333333, 197733312.0: 0.013333333333333334, 218438208.0: 0.024, 239143104.0: 0.02266666666666667, 259848000.0: 0.018666666666666668, 280552896.0: 0.030666666666666665, 301257792.0: 0.016, 321962688.0: 0.02, 342667584.0: 0.016, 363372480.0: 0.014666666666666666, 384077376.0: 0.021333333333333333, 404782272.0: 0.02, 425487168.0: 0.02666666666666667, 446192064.0: 0.021333333333333333, 466896960.0: 0.016, 487601856.0: 0.02, 508306752.0: 0.021333333333333333, 529011648.0: 0.014666666666666666, 549716544.0: 0.018666666666666668, 570421440.0: 0.016, 591126336.0: 0.018666666666666668, 611831232.0: 0.025333333333333333, 632536128.0: 0.02, 653241024.0: 0.018666666666666668, 673945920.0: 0.02, 694650816.0: 0.02, 715355712.0: 0.012, 736060608.0: 0.02666666666666667, 756765504.0: 0.018666666666666668, 777470400.0: 0.010666666666666666, 798175296.0: 0.014666666666666666, 818880192.0: 0.018666666666666668, 839585088.0: 0.014666666666666666, 860289984.0: 0.024, 880994880.0: 0.024, 901699776.0: 0.024, 922404672.0: 0.016, 943109568.0: 0.021333333333333333, 963814464.0: 0.02666666666666667, 984519360.0: 0.018666666666666668, 1005224256.0: 0.03333333333333333, 1025929152.0: 0.017333333333333333}, 'value_distribution_score': 0.4, 'max_probability_key': 1005224256.0, 'consistency_score': 0.0, 'redundancy_score': 0.0, 'variability_score': 0.2, 'quality_score': 0.06666666666666667}, 'uvZdEBLZ': {'data_type': 'Date', 'data_subtype': 'Timestamp', 'mean': 520661376.0, 'median': 511056000.0, 'variance': 9.320150582658662e+16, 'skewness': 0.014959769122661202, 'kurtosis': -1.231259342541527, 'max': 1036281600.0, 'min': 1036800.0, 'is_float': False, 'histogram': {'x': [11389248.0, 32094144.0, 52799040.0, 73503936.0, 94208832.0, 114913728.0, 135618624.0, 156323520.0, 177028416.0, 197733312.0, 218438208.0, 239143104.0, 259848000.0, 280552896.0, 301257792.0, 321962688.0, 342667584.0, 363372480.0, 384077376.0, 404782272.0, 425487168.0, 446192064.0, 466896960.0, 487601856.0, 508306752.0, 529011648.0, 549716544.0, 570421440.0, 591126336.0, 611831232.0, 632536128.0, 653241024.0, 673945920.0, 694650816.0, 715355712.0, 736060608.0, 756765504.0, 777470400.0, 798175296.0, 818880192.0, 839585088.0, 860289984.0, 880994880.0, 901699776.0, 922404672.0, 943109568.0, 963814464.0, 984519360.0, 1005224256.0, 1025929152.0], 'y': [18, 15, 20, 9, 15, 20, 7, 18, 16, 10, 18, 17, 14, 23, 12, 15, 12, 11, 16, 15, 20, 16, 12, 15, 16, 11, 14, 12, 14, 19, 15, 14, 15, 15, 9, 20, 14, 8, 11, 14, 11, 18, 18, 18, 12, 16, 20, 14, 25, 13]}, 'percentage_buckets': [1036800.0, 11389248.0, 12424492.8, 13563262.08, 14815908.288, 16193819.1168, 17709521.02848, 19376793.131328, 21210792.4444608, 23228191.688906882, 25447330.85779757, 27888383.943577327, 30573542.33793506, 33527216.571728565, 36776258.22890142, 40350204.05179156, 44281544.45697072, 48606018.90266779, 53362940.79293457, 58595554.87222803, 64351430.35945083, 70682893.39539592, 77647502.7349355, 85308573.00842905, 93735750.30927196, 103005645.34019916, 113202529.87421907, 124419102.86164099, 136757333.1478051, 150329386.4625856, 165258645.10884416, 181680829.61972857, 199745232.58170143, 219616075.8398716, 241474003.42385876, 265517723.76624465, 291965816.1428691, 321058717.757156, 353060909.5328716, 388263320.4861588, 426985972.53477466, 469580889.7882521, 516435298.7670773, 567975148.643785, 624668983.5081635, 687032201.8589798, 755631742.0448778, 831091236.2493656, 914096679.8743021, 1005402667.8617324], 'data_type_dist': {'Date': 750}, 'data_subtype_dist': {'Timestamp': 750}, 'column': 'uvZdEBLZ', 'empty_cells': 0, 'empty_percentage': 0.0, 'nr_duplicates': 750, 'duplicates_percentage': 100.0, 'c': 0, 'empty_cells_score': 0.0, 'data_type_distribution_score': 0.0, 'similarities': [('oZUxhCSA', 0.0), ('xkCaitlB', 0.0), ('cxWEVwECy', 0.0), ('ZpDsCbso', 0.0), ('IuXFoKpu', 0.0), ('JcWtsFNEUi', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'oZUxhCSA', 'bucket_probabilities': {11389248.0: 0.024, 32094144.0: 0.02, 52799040.0: 0.02666666666666667, 73503936.0: 0.012, 94208832.0: 0.02, 114913728.0: 0.02666666666666667, 135618624.0: 0.009333333333333334, 156323520.0: 0.024, 177028416.0: 0.021333333333333333, 197733312.0: 0.013333333333333334, 218438208.0: 0.024, 239143104.0: 0.02266666666666667, 259848000.0: 0.018666666666666668, 280552896.0: 0.030666666666666665, 301257792.0: 0.016, 321962688.0: 0.02, 342667584.0: 0.016, 363372480.0: 0.014666666666666666, 384077376.0: 0.021333333333333333, 404782272.0: 0.02, 425487168.0: 0.02666666666666667, 446192064.0: 0.021333333333333333, 466896960.0: 0.016, 487601856.0: 0.02, 508306752.0: 0.021333333333333333, 529011648.0: 0.014666666666666666, 549716544.0: 0.018666666666666668, 570421440.0: 0.016, 591126336.0: 0.018666666666666668, 611831232.0: 0.025333333333333333, 632536128.0: 0.02, 653241024.0: 0.018666666666666668, 673945920.0: 0.02, 694650816.0: 0.02, 715355712.0: 0.012, 736060608.0: 0.02666666666666667, 756765504.0: 0.018666666666666668, 777470400.0: 0.010666666666666666, 798175296.0: 0.014666666666666666, 818880192.0: 0.018666666666666668, 839585088.0: 0.014666666666666666, 860289984.0: 0.024, 880994880.0: 0.024, 901699776.0: 0.024, 922404672.0: 0.016, 943109568.0: 0.021333333333333333, 963814464.0: 0.02666666666666667, 984519360.0: 0.018666666666666668, 1005224256.0: 0.03333333333333333, 1025929152.0: 0.017333333333333333}, 'value_distribution_score': 0.4, 'max_probability_key': 1005224256.0, 'consistency_score': 0.0, 'redundancy_score': 0.0, 'variability_score': 0.2, 'quality_score': 0.06666666666666667}, 'ZpDsCbso': {'data_type': 'Numeric', 'data_subtype': 'Int', 'mean': 520661376.0, 'median': 511056000.0, 'variance': 9.320150582658662e+16, 'skewness': 0.014959769122661202, 'kurtosis': -1.231259342541527, 'max': 1036281600.0, 'min': 1036800.0, 'is_float': False, 'histogram': {'x': [11389248.0, 32094144.0, 52799040.0, 73503936.0, 94208832.0, 114913728.0, 135618624.0, 156323520.0, 177028416.0, 197733312.0, 218438208.0, 239143104.0, 259848000.0, 280552896.0, 301257792.0, 321962688.0, 342667584.0, 363372480.0, 384077376.0, 404782272.0, 425487168.0, 446192064.0, 466896960.0, 487601856.0, 508306752.0, 529011648.0, 549716544.0, 570421440.0, 591126336.0, 611831232.0, 632536128.0, 653241024.0, 673945920.0, 694650816.0, 715355712.0, 736060608.0, 756765504.0, 777470400.0, 798175296.0, 818880192.0, 839585088.0, 860289984.0, 880994880.0, 901699776.0, 922404672.0, 943109568.0, 963814464.0, 984519360.0, 1005224256.0, 1025929152.0], 'y': [18, 15, 20, 9, 15, 20, 7, 18, 16, 10, 18, 17, 14, 23, 12, 15, 12, 11, 16, 15, 20, 16, 12, 15, 16, 11, 14, 12, 14, 19, 15, 14, 15, 15, 9, 20, 14, 8, 11, 14, 11, 18, 18, 18, 12, 16, 20, 14, 25, 13]}, 'percentage_buckets': [1036800.0, 11389248.0, 12424492.8, 13563262.08, 14815908.288, 16193819.1168, 17709521.02848, 19376793.131328, 21210792.4444608, 23228191.688906882, 25447330.85779757, 27888383.943577327, 30573542.33793506, 33527216.571728565, 36776258.22890142, 40350204.05179156, 44281544.45697072, 48606018.90266779, 53362940.79293457, 58595554.87222803, 64351430.35945083, 70682893.39539592, 77647502.7349355, 85308573.00842905, 93735750.30927196, 103005645.34019916, 113202529.87421907, 124419102.86164099, 136757333.1478051, 150329386.4625856, 165258645.10884416, 181680829.61972857, 199745232.58170143, 219616075.8398716, 241474003.42385876, 265517723.76624465, 291965816.1428691, 321058717.757156, 353060909.5328716, 388263320.4861588, 426985972.53477466, 469580889.7882521, 516435298.7670773, 567975148.643785, 624668983.5081635, 687032201.8589798, 755631742.0448778, 831091236.2493656, 914096679.8743021, 1005402667.8617324], 'data_type_dist': {'Numeric': 750}, 'data_subtype_dist': {'Int': 750}, 'column': 'ZpDsCbso', 'empty_cells': 0, 'empty_percentage': 0.0, 'nr_duplicates': 750, 'duplicates_percentage': 100.0, 'duplicates_score': 1.0, 'empty_cells_score': 0.0, 'data_type_distribution_score': 0.0, 'z_score_outliers': [], 'mean_z_score': 0.8690922820450708, 'z_test_based_outlier_score': 0.0, 'lof_outliers': [101, 577], 'lof_based_outlier_score': 0.0026666666666666666, 'percentage_of_log_based_outliers': 0.26666666666666666, 'similarities': [('oZUxhCSA', 0.0), ('xkCaitlB', 0.0), ('cxWEVwECy', 0.0), ('uvZdEBLZ', 0.0), ('IuXFoKpu', 0.0), ('JcWtsFNEUi', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'oZUxhCSA', 'bucket_probabilities': {11389248.0: 0.024, 32094144.0: 0.02, 52799040.0: 0.02666666666666667, 73503936.0: 0.012, 94208832.0: 0.02, 114913728.0: 0.02666666666666667, 135618624.0: 0.009333333333333334, 156323520.0: 0.024, 177028416.0: 0.021333333333333333, 197733312.0: 0.013333333333333334, 218438208.0: 0.024, 239143104.0: 0.02266666666666667, 259848000.0: 0.018666666666666668, 280552896.0: 0.030666666666666665, 301257792.0: 0.016, 321962688.0: 0.02, 342667584.0: 0.016, 363372480.0: 0.014666666666666666, 384077376.0: 0.021333333333333333, 404782272.0: 0.02, 425487168.0: 0.02666666666666667, 446192064.0: 0.021333333333333333, 466896960.0: 0.016, 487601856.0: 0.02, 508306752.0: 0.021333333333333333, 529011648.0: 0.014666666666666666, 549716544.0: 0.018666666666666668, 570421440.0: 0.016, 591126336.0: 0.018666666666666668, 611831232.0: 0.025333333333333333, 632536128.0: 0.02, 653241024.0: 0.018666666666666668, 673945920.0: 0.02, 694650816.0: 0.02, 715355712.0: 0.012, 736060608.0: 0.02666666666666667, 756765504.0: 0.018666666666666668, 777470400.0: 0.010666666666666666, 798175296.0: 0.014666666666666666, 818880192.0: 0.018666666666666668, 839585088.0: 0.014666666666666666, 860289984.0: 0.024, 880994880.0: 0.024, 901699776.0: 0.024, 922404672.0: 0.016, 943109568.0: 0.021333333333333333, 963814464.0: 0.02666666666666667, 984519360.0: 0.018666666666666668, 1005224256.0: 0.03333333333333333, 1025929152.0: 0.017333333333333333}, 'value_distribution_score': 0.4, 'max_probability_key': 1005224256.0, 'consistency_score': 0.2, 'redundancy_score': 0.0, 'variability_score': 0.13422222222222221, 'quality_score': 0.1114074074074074}, 'IuXFoKpu': {'data_type': 'Categorical', 'data_subtype': 'Binary Category', 'histogram': {'x': ['pvOhgCgELkaaMBqkgSDOcOIBYlLClfrIKiRweXkAQVFeolLfRxIvKMexFXbUlDbiZumXFGBkSloqLb', 'awTrTjOVNHsVmDYmGYtHJOixvMPTVoIXPIGKvSLjAqbHwJsZDXOITwBsWmRwYCIwMALNueFXObBarmMjvnUvRHvfwBZAQ', 'OgJDLfrqlfOYVL', 'DuZeGlhxDtvLWEYPtykPDtQPjrDXLYQgxRsHvGIWIbMlyEFhNoZkDI', 'LckDmodLHQCZCAinCfnCNmWPOwKfgaBwlkfPvKNsPRdHflLwYrbFWiJbeDNsjugROeAvYZsUIaY', None, 'ZETmgXBJgqomZWmZsKfKLutUSyONCuwktpfyJpULzVkBNPToLfDjADeOHFbJIKBWpfj', 'XWx', 'bIwabRIgdhUMcruyLWaeOYGfGLHsuWLfgIVAgeXLuWkLFeQnSAvsPTSfsIeVndjsqpOOcEcgLpkKkgAwmgllmYgyFHBnc', 'wpOQCOIeePsRRsHfWUBqtGEfssLXzkWprYCyWWnVaYWTjBVwzwIYGLlksvUaXeLBwsHLzGaGENCHvApXrXwXlmDdcEtbhLQSPaSoONmGSSUzIablkVZ', 'MtBSZxQZHDvbHdJMXMhfpdeoIVNtslDoeZelPj', 'CndyxzErKWVyGZzBajuKUXoKbjxUuEJlqVMZFPneXSNmtJRIDcjSOxdAGsbsqPUNALvdZRwOhLvcoYfvAVhkRUUUtWjiVR', 'xuyTQNmuleYVFVXuakwdOyQIRAVhVQiPwOEkiaMPRiumzPwzViVbPsLgbuytVSNQHlagqRGPPOEYTSMibecZVoWhMMjePrdgxnJqieptyTTSWsWAVi', 'Kw', 'DW', 'CPAujwKawOeBpoHCQGdTgvNmoEbjGXCXjmDibYDOJQqydnOEbUFWdaKBAgtZAvldowbNQYgAhmpIBoPZkNy', 'nRLaNoEbIXSShFafKC', 'khCKdnRPuocQItfILphpLaWrrKUKXtZQjQpyJBHDWsOKPpenRoHwIBERTBiEoEyRxcuIfnlATzAeeHVcq', 'u', 'rEMJukiFuVPkISDhDGcLZBOJwGcvkjxKOSaCBbPhUBnJVxdOZYiJUsTADfULPNaiwFWbRPDSFsnyyQMGqUeRvDvbxqCglLkmcGkCeGDwrVFlW', 'OuQbTHglbCI', 'APOLDDElYcJhPPTyrnCVNmWwsMyyxjUiixEqKLSGGgWKdzjfdbiGZ', 'zjLhSvJyrejSgyGKNvHAAMqRInDJAArjfuTunFOutTeKGTGCYJKhgnmHwNCPWepSWw', 'EBkzQh', 'KPxfmoKpsmukSEiZJNYvTLiLnGKZPBBjnwDgkQCgHDhQhQKQPbbSCQUraUkrVK', 'GlwshFctFpRFhNMSNdLoscBXFwsbgjhJqnnZpYyORyCoOXCCzKHHbuBOTUHvadvVkexLld', 'XuEGRJgzcRhkyXSHJLznDkFVi', 'KmjSVVAtlXzzKyOJuPWZJUPTBFaPYizsiTXpUwOOLGylYuZLoaf', 'E', 'CPpLuBNtiWOmydKPXasAEvgipfBarvKysBupsNEfkXkypJltnBzXDNxnNYqYjabghjomImMjJNRuYUxFNWOPVQTcEbwIX', 'lxcqgDGuHKsksCOrHDmfYWXVzKoeeObUBNS', 'PBwRVXSqOMEEGZNwfZUEorqXXLUrzGqBiPzaZSWWTIMjCaBWWoWjYPolYBLVcBXIQCSTAwVuNy', 'ganBMhOXiBwpRdQYLdAylLaXgJJBhlOCSTnlGmwRfdPuJ', 'JQMPMzgdyBaOwqYvOmgsLsWjGFrWpbfyYGqyzHoOorwAQphNcfqOISGeyAEqaARNsQEDSutmdHhgAQVIgTGvYHKuWhgZnyEtraQjzGBLe', 'ronjINvPvVvSrwsBzSsBKWJmZbcvicjtEw', 'DsQwGRlTkZYlVBICprpjpYkdtWnrtlwbZJVlypwzwgecqKCexObGWCIJBkqDD', 'zSgiDNzomFUHinynrKwkkfOZfPaxrrBnjCrHcVsiRCTnxgCJdAbaDObfsAsgRE', 'yxBRhTzyHHROYidWTqPGChcHsfjZvuYjFQadXCl', 'qhmzxuyPedagfADDMBloRFjbpMOcwjzsnlrME', 'IjzRvIcGcrBmNcNoNXWslFIgglRNXdMVRVKEwrYVgcmQbiDQZhePGMnWhmqXwshBMDIuGyFnSrnzvcMXsVpaAKdxSdrHEsYas', 'YOZTVGThEhXUaSszguSoTySPAgIfWuqhnChYKgYVzzoDDTqKIycosaamrHMnGlFbB', 'fXQtZsflJPjTkmSJdqobVdxLVnABkfFGHJsLLiUYcMjviRxnUKltiUGSSBAjvaLwTMcWpKOzYhanLiv', 'CwHqvIbcukYfCBYLxpn', 'ggcZlUuefojoCBImuIgCvBWwMStecCNnWtAGr', 's', 'UJqXxbENQDdecADiwhskOdgBvA', 'HrHEDeMiMLCFbVHuIBjXlxcloEHXyEdxaQToailWpV', 'ODAqKmoFBSugmWNOpSuXaiBzYndnPfHdwMpOofNdxvkTpLrlyuDACoshrJoeIaqxWLKcWPGSedtDUsKIkLFaYuYoKiWwPVJiuwxhmDGUqishzKo', 'rNvoTTOIfuoXwDzBacNrClgQQnYSYpgKzzkKCypoRS', 'XjbXDxpgsVgLNfeKKcZXOAYUyMeUsHNWtCqhvAwjRxRbeqqcSpfOWzIvWxbvcGZPQHjYfrcAWVNEPBWLcCUMOggHfzPtxhouleosAjFWEvsvKngDbGr', 'GPnNMiEsjtqRYZzkYTUAdeviMlTIAiygPYYsCFAphNxwXJUvkksApTqyHeIvqJFsFShvWSMpasVSvpVZnBLNCQcRlPCCag', 'WYQnibsLcgXonoYNhiyxJTxVzVttqqmWBSTAjYOPzjAKGJyaMKjYfdNrHG', 'efNAeYDlSHnEdlaTTeQcLETxZyySdddOfwBahtdaXojOnqhCupQwpQNeqZIYvcmlLF', 'EsJzroanPFPlyKAsLdQfIhTSThdKvalMVuIxrMEyznXZzRVndnjrOJgzhItQXMDGkZSMMBfXKLOtsUmFENOVOPcPBUMtWZIdfCHfzlRyhtzaOrySDQxcCE', 'wcTKDWgRsrSQJhECKstImyWSKRcXNLAqjNEPeLMKBKTgq', 'fOfzeJazeYYNrKGFwYZnTEIGeUQkIhjponEnUthiEusswpoUrTDrVOb', 'XxLkhfvYiesraaMiRQGKwqbmVsqFFugKUIbvIgIvFsZQoHwODhRKkBeBFSUaYfKNXjhePJxyFXjSCBkeNUBSV', 'bPXOXdzMisozSZptLolxIQBqgabMdmDBIrLWbpBchZNyhLwDNQXgtImBff', 'HdFxaCJ', 'lVplnUqpsLfIlrcphOwhHbQLLUpQwDNFArxKtmjflTKTHCaYoaXNbwKFRryDaZBWaJKiWBQluXNBTHbqAewswqAvNBSKfXCyssUQLoFfZHWAe', 'EGTqxCzroAJxFHtJmzkGrZKlzGehizjxRJwGhdNOeiKrPLAhnyrlqRPPBVkPUigJHU', 'AeOGenjjqHvnjlKJSWNJopaXLTqAdDyiWkpKQpvEJGveRFxbjkKUyBvlQxacPCIEjPXTVy', 'LzVIcRNRwWursUfcULKSVaDexzyNVlRsqtMbVQcbGOizLWJqixrmtBvVGaRGyJxzvwRMRChIbjwdAsUKrOdHZKbowzfCPDQkVS', 'FLQdVkAKswuNEpaPROljusLyVmpkFnNyZPdXpWlnlRILtfYnEKPqzwVwdXuunvidvCYUjahUpmYtbclgtziZydfcqIpuOVzSWOLzPqVwZMjvhYNVsmsiK', 'gaLqVoLKwLamNaOsUAiNWuHyvkgxTGwLnylbUrUEdHxmqOBCtQTvgbMOTJFhepnqyxmyctzgPCboiRSdNlg', 'UwiQWuFICAJMhZQgUcruMkcvfAxoWlkxM', 'dPJbRUWQWsjCjGHUdTohQeGRqAQuxwQpxnIcsdPKNZJCLK', 'yOSsrBlRtYujJAXGpiDuyVcjNTHsUiIagbXHkgOrdlFCYMmCyojcGqLbYdIciMDWWibIrJuqSDrnQLshGGacxrShRLSyI', 'QjXAbOyIjOrFsUoabvopqYswzjGBMvDzZWFCgqetZcsMinOQyFkqagVZjT', 'IwTlPGlcRHIWbcDfUlVvSVJFNfGIMqlSFrvRyVNOwGLmEpUvp', 'ydvhTKlxnFtQWbwzBuPamMafHZbMEHEWlJoQmzUifZsFnyCNDkthWTVBVtvmXpgQKvMHwbqntWAjU', 'GwucVkdtOnvrIpVCnPIGMbZqjqIdhaCZFmsUtlUjpdvfFZXHRNgPySTdNxitGPHagunfsWrwqpfOHN', 'HDSxNtUSbYYKoLfTaPcoadLTdhmoqgJMETueZl', 'AAylIpNInXHWQyYBNlqnPQVvrJuZVbgrcJSitMssvYw', 'GgtLKRrfwwRiEbn', 'eLMuvPMURMIeOEIMOhenjxkoaNStHmfWjJvHsHqqWcOtofMBvyiSXbwaSVvPlcLOqwuFaTpqAztXrCTnPQphxfkzzERSHavjPmdzzbH', 'HPxMnuxjbCScMxJBPiJOXlbLHMfCsnxlMYMuXRRnEdSCMZrpXivbGHGEledNESEBQEoBuRDHEAbmfezbESTeRfnKGWEVv', 'LTpcchfFmvWThDubEyKYcEvsJcgBQqjdgslPhuZNOHAxxfSnyXfMtnZJeKXdTUNtuWMtADPZLiobGlkwiRkRhv', 'FUcLtOmXwIfEmdTfwSPhvfPabVwruVevqKHDJPkItlSecQCzaYnDxvdHWMNLRprxnwGuDEfDRxdSYphsLXTuQzs', 'EuxOgpepTXBUsYOkvyajejLDbStKjmtkdAZfZejjxDylsgHRjuUPNJFdzeHPkUorzVENNwCkxvcVtoJxxyBvDiILGBSXEvyHKLlrfnieOzeEiDINW', 'auUlDnsLqHoMFpVuHZuHkPUCEnGEoMtdLysHUULaGtHHGfsvUjzFkbcPWkHewJDSJhEYkEEGpMrjREIBHPmtmEkDYLCAzDBGzbbdVBaTmovNotx', 'fxqRXvYFUTAIEaOoVgcTolYasDgMcxqizxCXgMPdAmpuxnwEKeUWBWqKXOmhNqfLdgvNrSWSTSYPLqgDOlSkWHRbkJxmOujfLe', 'WvJixwMSzUPTXysxVLDpRLPWfaTIZaYTntbqkUtVhDCNuJQMATzyYaNrWg', 'cwWsXztmMFyOQsQgtgwjhzyoUxKuCbyqUXuaLRMyPCXsdfTsZDrSZl', 'NXCuIxuOjRgOBCQlORhxZAkJZQmeWIUBPhTUrshKqGBMiAQwTMqowemnxfUuxuzTIUBZmGaVVWi', 'zqphqtlXmyUcquafpytJNkYsFayVBNdvtfPjdgQrnenskBIJVouYW', 'sGutiwH', 'KRmKeIzKWymefzFRkbtFjbszHaFBXwRoNaUeClOhfGOVGXgJTIAQcNVTgWgricOgMCbOuyPPjFduaCapXvU', 'wWlPwddxsQtvNNqAKghiYmWfJPsfotjGmWJnlRNZB', 'DiwkrJAxlIyjIPpCXXwUHrgLYbfIQBoovYfXbMDrvgxMXKLnCOrXrWZpKpOfRiNASXG', 'oUqFGmZCRYavihUISMZTbeGSbNWrACDELwNTbFLrDtPBQCYVobbuSeaDyCGdmJUkNfzWOFWcCIBdDZtVseMukohybjDlyNNKMDgiYZYFEfmyvU', 'oMFxwgveGTrgedpLkhkyNNkpOqbkWeRdVoerHiqorXsEbSuWuyPWClJVYLYETJexxoyRifppMwafIENtAMKHAS', 'DaOzzhOMuFITkCIQObmjPUAgVAviEdGLQOZXesoUScqihimCF', 'GXQHKqBoMupFjvTMDDYexTMNlhrCvgqQSNAYkyXJenbLFrFqgfiGvOARzJliqJLampoOJKdRrkSIdvtvuOPoTrO', 'WcLGRLUXsmwcRcLaZvzYSTPngRyLQWFftPHCjWzAwqCfjJrThGikTIKvV', 'xZaFJJsbhosjjjaWTDGfzjDibSTbhpJnogmtoQJYqlZKgTuHiLbShMvkuHFXoXVcyloCFeDtfuhUYBqQEjqVobTXk', 'BpnSjROmAvSbW', 'ChpBXgvrkNWmtFrarkzAAMEJXcBodixzbwsBtKjxiuyLCxFfQsJTpTEnBSjWfIxPkZtZnZrZZByvIkKCbvlMdElIeyQRCGmwKgNpEwOkDTjBaNgfqSzMH', 'YTRcOlYpGyadTjIiKrHWFaU', 'DSNxkJAWVfCfLSaOoPEQxCZgxjSilwZIXNHYEDnkJcp', 'XPYabIOxsIdgXDmaSrFVlHlaasRjsIftTEVrWbKgzrGplttQwPJfftgpShvzjfnczDscUmNzAXJsMFRxDYaeqtPpumLzPbWOjU', 'BdQPpEAYfVqdFQkWIaJdYGzfiEdqLUXmIJrpFtOtUsULFYPBJlsKHVJbcIFafoaBnO', 'kvHMGRoBLWlVcWuZuzhuMBROhnkCqrHmRWzCpvgEIzPibzUUt', 'YbMrBEoYKNKEfHFPZxIVcYvaUfTFBrWMUZOZuwdUieMksjRaAPZvvA', 'pwJIAldKpshZNqoDnVmWASdVFEKIvOfFqXaTBqAwyvCeDasjHuGTTymiLELzoxbmbjujvVFtBCIPJRtPCnOdNmSqBIjSkfMijQU', 'KmronfjAaTpoeEYxhSejQIYBpfbGromwQSpmgyjJpOFENgjvxDIgPQbBdRnoqjaukYBynbxNffpuANEnHlRfR', 'bYXRKLswAxpQeyckeIhjebVLPjZCuBxEXqDfaliMYp', 'pm', 'WwpfJpPskbGNozFTLgWaAPtsuxmhxypnZJMaFZSXUZvZIuomEtyyCsYrToqRMjRXLVdrBPHvsOXckzrpknUiEC', 'RjUYWQlwcmM', 'ridRtNiAcbtXCvngMhkHMLBtDVMuUlGkftfOuCCvHPFMeNGemXGxlGcqsQaTIPdusstjtWRYRgfmZYkOFqeJnWb', 'BRGSHxGgVrHvFhr', 'hbAbdzCVcDzJPMadezzTdOdPvCBsGYEaGrYMqQpLHsOPTMgdGAyuyLwhkktpjYiZxhsxbDctGzwTxx', 'xUSFgFriIkmfgnQfWwGgLnkjlBaldKtopMVaMfqAfKlHSaxBXGWWBUGDOCjDdTHLgKpuFADthVreoLnsoOivdiosTrPJBHBjaNLygmtBKdxAfVbxBm', 'SblFSBkILFAkaySotUXbrOqqNTCOzWPHMtALUuEIpLiKWhYEYybXlMEodXyycnsVHmPLoHYfEUYqUEjHausvSs', 'dWjmRfyZNEIkAGAcHxFdMOcqlvPktGzalYhXPbMovrxJZKMCxInSnCtWIQdtXbuqgZeERXNaSsHwUkaPLTTpGYhYKweJGNnDeyOJPVLwQaLCMbN', 'lyeSFpucQOlEikQNYkLYDUrekIsWzXAanZxbHTKXYlnwyCbawkRKXjtvKybShHGlNDWpXIegd', 'VXrNXqfumcnpAmNAFLtGgWJLtAeJOYeWIJkqEpNpOaMbaj', 'HeDBRBMSkoJYhPcBehFdQVnaXrMQiGG', 'uECdAmshUyorMVKyQzrFJFMGVAWxFeTMq', 'VTUMkOpVESvgbRXtWZReGGYNVdHbmsHDSZQEEcqyEKyiRjfpaUMoMpyNktBKoxlMACaFsohrCBTMKUtMc', 'iKbwADdpujazCoYdVFAMgiyxsXiUuvkwxKWcigDGSwPtyqnVCBCnlbhfXIysynpUrQbdipYlSpQhMEITYKo', 'guoLcTyLkhzTpqunbCSOzdXfETZkmMvYxmLzcgCkZaEQKv', 'YIrlWpeNIqChfFISrbaIhSDjjVbmlbgABlRcKKiodRmMRWCxuSLnnvYKtQVRHkpvttybbHwqkEJHrEiwZDHEAJfpgdfIlHzAhV', 'JaqHVxczPacVyaerOmhrBheuCKJAqXvbxmUhfihpTXmEmFemMOmixUJHguiIKdJeklOMLJUViuUWQwwJHtJfyLh', 'fBxnu', 'F', 'DhUgnpkqjfwrUzuBYXhsIHJosFbFTupwKBLqTfgOURJTPtGtJTJMlpcLPod', 'hWgrUOuVtgfFosaoWiIuXNWmwClrIosSCZMBTotTRnEsJLOPADphiULbXzuFUtBpQlDcsaknLBpVw', 'qDuBCybEbEqjZPFkmywJLlCNgnFEusgYeCHaThPHPNzyzytBqMQPweKIUbehOdHVMslHE', 'MxxtpQcoaPlmRWxhkQvvdlzfRgSNSGNNmMmOElJIcLAwq', 'McacbAiKCpbLxpcGNUJvHLGLFshhhbuZyFIfMJBptXoBsVjaadmikBvEbSQHJtbIsvNcBxeCn', 'igUPUniYhMgHdgKRKrmTxEMvpZvoCMIElXoqmAgiChrpNDNFwexXLFGdxPH', 'MSYiDVKcudqAsMKbtSvuEoJ', 'yfajlntqkbkPNvAKaUFQdLackFwpDYEKEzBFIlFDJrhqteERIgHJfpHpPiQFOSdenumeCZwsxnnBnXEMwbEKCTrwJWiVHkpZQbsXptvsWnzpZJx', 'XenqUtbjbWaZpDiWDduQCiq', 'BNxrRpiZpzYszTqtOXgPWgpMpPepyCEnghsjYezQruzTjabOmTpZjIaFOFdzPGOklOvoPidVhayRHKzZlslUIsziPlGzs', 'lLfxYjlTAOKGRxXjKTEJmujKqxyYugELcBUmsFAGPJPjpmwNMmiTedHhueMMdlSvbofjgItfjpiaAsrGDblNnaIGcaVkyAMllOZtVBEfbkIxeY', 'INydlTgjieMBWwokYrTsaXVYMnyGjcYdPCSHcVoyCDhwBUgtorLcbwDEhcXtIaPsaGVlJQodjidDXuxzlxCxdZNjcxuwsuLURApfTlUxvfLcYccrX', 'nhtkjZwQifuoFeVqJHEeBkwKKVgcEIf', 'zpImPXUIIuIzxQvKFvvmoZgbsFSJkFcUjaCLoFZNUuqdyqMLvWjpGIqInjFdY', 'qmbqXTLHaDXPHrhkCArnEhLIYYygtkzYZ', 'DNKLoohYeaQAupAnthodfdVezvJHBenjXlFmIiVkVakMTNZnkHykZstBbbwMxbFJUBDQV', 'CuAIlOWURzFcRdwXnxlQnRdAIffGIYfPefiYKIElFRQmNNQbeQTjCNEviECrHDvMEOYtiCEXxtblyB', 'AVoEJFYSdzIfrokLltEIwhEhvDdrKsiAchgxmLKqcvONbOpHNDBmTEXhbeETbLewlgeKxODgPDmwzsx', 'dfljnllelrmtnlnHBSrorpRMxUOKxAUfaZyIrinmioNfRrPMIgyvDFDltVRGYrkROFpgHiGQKlJQHeGsWseuhl', 'RmJLcTEdKgAevqMmWOo', 'yWHbIWnNlgYcpMWRdhVZJSVJx', 'OmzJZWdgrI', 'CxGsHRwpxWBndZp', 'PdQtROKSpBCBThKDLPdNmpivKYFBbbSSbmbIHMacmdlJsjbusVcqUan', 'LAxajVxmpAXACt', 'gsRvBpUwAxqmIXGASnSuAHfZMwJcFUeVGHJLTAscqtXaFGMLinYingAeZYWffocdVPDrnpfzHGeWZvXEZQvOTXleF', 'bnLmbghyT', 'zvYYcOELreeKhTtuIDExNqfbrFRCz', 'JkEsdHRkebyflwfdRfeqkSAiPhbmA', 'lPZeUPygzIGQqHmsEtWgWMdDbuClhGNgcddTx', 'eLzvNDEKZGeXYZRxQOvVKxrYhpgijuQucrBhUeQNSHlbBlLzgjBsPdRSy', 'qgGtqOE', 'hwOITIxPCiIUgmwIGWUWsKIIMCXoBYSkhDppvVtWrwHttUDLkytlrYMCfVqtJjpHaYizDbKOZPJWXcZ', 'fjvcCxQgaznhsPIgyVekgLUlArfyiPrbhbVMpZobVnnxPdHnlOhKfqMClPuztMeLZcVVXkxvaZWNHKmslUgeEJQFB', 'eqzGZpLqebyzdpQGBUDTtaGZkNeDIQaWpbjwpkMbNujkdqWkMCoCdaLFVgPdoEGRhtiWenMOqaORNxRHVPSeHNtegHMqpLRIPMMXlYKhcTrbBPahRx', 'kTYSOUcpjVSayErqOZFwAYJwGkKMOdhfcCylHnowldLufCGYdEr', 'rHyTnZtOUbZZgVKLaQOLnQQEQvOKcePovIURjzRJWwtOvbacyLMzSPBNUuBzJwWAzkAHidKaNKN', 'vhNlWmgZbkdhJbTzvtyHqKqNkDDPIEFFKpsitjylHPSQmZGfgarbTostbrNjoGBrPrNjQpqLANSxBqCxZCxJoslvpcXgGyKFXJwBzYXlpudhprYemjO', 'bRMRXDdWFfnIvJldNkmWFGdnVCwJKhWWDrQkaSpyAMAYhWynNYZIjQHDuOIBpGcDaQOFtvINpAxIYYZOrPNoSTXVQGyJFZyOzSgEtkfPqe', 'QdVUBXTsGpCWTiLtDlFsQmqndWZkAVBhZkNryxQYweQujCwSTYVUBnysRJCFthFOpFraNfNlFMLiZjARTzkjreMYktmob', 'VUBUoBkowRlqXCbsjQsirtiowXwWoTKpy', 'TWQuWdIsSQvecEczpCXJVDNdLPgqzBUdcoVbWuERbTafRGKLkfFsyKicRnsOOJHRmUrgQPYHACxMXtrIOotYgXnsqGdcMTueLYdhrOGCSRCCxRUbuQzVt', 'nbRienGSvDJYbNHMGSLrMnEOrEzFyanGRXQpKvMdHLrlBiwMYaIYapRVgRgisajjewRHYWydMkPmrlPmBFefSfk', 'iTFPvEgArZaEBIPKlBFvnRNBtrBFOGrQElILMVJmChhXhIu', 'tpTKnzjyWxlzcARCWVEvAYrCAnwkMZOmnzGjrYWtVoxgcBjoekMtBZXQYXFdlyyctWRxQqXenhQhfwxbrfeLqTsyIzsdRvdTSHm', 'sCRDcPWtpTHnrKWGjnaIhxAEUQabTUqHjQzvJSGeIKyaQcFoGJAyoAaMgzRMDRIFbL', 'sBRZaGpJmFSWqqJVRXRqhGpcuEWiUbjcbCjGGyeIoUHIXNXBZ', 'QjPlMOwPkKWzJOUIwPBGJtUNDIanHKIItuZsglvVECCoeDUfMYosrZwuLViXiIyXiInYJYKhrOjLRzTSvJVhU', 'xlwIiWVkwwWGJCPdlYjpCgHITqdVPqOIQeJtzWZlHmwmYXMZUqlSQjJZjAyjCtRjWDJRKpxrbShSmZhxnIGKe', 'gYlmozypePvVRoyHmFuaisSDQJvUaeMBxKBgrnhOFrXAXSVQzOhycaCUmkHmVDvmUIqlOzROBWeIWFaScVwcfjLXhPOFskfQdReezFtCUdulI', 'urtfAxdPIVgNVyYkXcwJKyCRyFlYBvDioXlgXQZzIwaLpUtfXwataHuWMOsAOKaLJIkyUwaHZQO', 'aeOIKH', 'QSsNeoaxAsVGJZMsmujsLLWAj', 'iglRNtHbHOyBPbKwLUmvQBUUqKVuwTkKrrdkOKhxhsLCIxkppKGQgYynqkZRDLbgrCQsYKyiprYKNDfpWVzKgQdRkc', 'gVXHmDUwevvnsJWkEojYzEl', 'VdJzpsaXfkyIjgrUzDACvbDmvsoiJUNZdAlSDYcyAegehocLdfRfmzztQoRPKDGRNF', 'EeRAPrQYLEziDqXVXLWfUjQJMFaOpwbJBhyzQjORXVLvrzSenWUwwqabhkxCBYPscNzJYvSzmOxrgivNILnQBUyyeRfiFVaqbyovLIt', 'NslllzHfMrDZJkGwtGpkNuamhFiYtbgOAqgsnryxSoaXepMnJLnoaLAyUAmgWPVHOTPBKLlzBzSGDg', 'CidfTiOvVtuMTTspSbTQuNqiwXgsKSzeUfXECFvzNqSghwjESBCsgCeQLcj', 'rmBwomOccCZNNCFdIyaiVDmTJTZSXtVDPgGqReJvfyLemoGxhiQfCIbUdkaMWSYOKFQKzXPDlj', 'uA', 'zZbMzlrxcCSDQwQWNgqZektqTEJqZfKqvVZDXfd', 'GTK', 'jrxBLXUDIoxgqHG', 'jyYhgwsHzqjQhZueYE', 'aPlZZQvTosSqxGsVeWQyAzQMpCFFnwrGYSWpWLVlcBqEVxLDbIOXeTYYAhkkVYRMAamaWHnMdBPYANStyfZblbY', 'PPxrOT', 'EcYfgyzNZzmqHvnLhKusyNgfHXYSAytezeIcCKEqDNvIIaRELCkLAZbwTvMpYeSNVXULYPQzoGaZAKFocTIOWEFkOdQLhTHtwjpCSNAmmBdxrRDlJLe', 'HENYJhngnEZnTvmOiSHnyehRtAnCjvPntHihmjMSFcvFhCnuwEEbqigiJmtpnyhoIimDTUWFdcQJqCgSYROcDLvdJYxPOrfyVtx', 'KEJZgxBHgKVQr', 'lwOtUZMivmlyLpvpcTxqKqJZKVlTEPAynKaXUNtcPkgWDdvexVzcvPYhJNDqqwQqkDThmzNUiSyXZ', 'FTYHKxQQIUQTtOggIuscxIdOih', 'DTPfzfoTavUJeWPxbAEFYahryhDVmdDQLxwzSSfthONuquQqfW', 'rhGoLvguchZUJOQURwLcbthkQwcScchKxFsMAZfgYmdOEBGxSneiBcSrILTmk', 'ilBdgAnOoTebgXDcfmkggbqgVWjSfvXPrRpZAblbXdTiGjZKeSaLjBZIPQbPGPpzrrXrVfCmxBbKEJluI', 'TqcnNBRyBAAHafhpQMvDLMWWsOiWGRZXjQCDgzLbzbGMTLPxUvlJLdMbOCimmsLJifROZHhxhghjKKchu', 'NbSfzTcDePylkajeYjmExaSJymmjKyvzuR', 'kVmRbrgHjLODsUzZXJzDfBsUqJUbEIZwLWhvPcjfskMhaOyzQbeBfpZPFsORWLruXitJJbaMOnsXMgSsDzBBfvISc', 'bOhnYKpVgfJMgADZdgAIydXPnekehfNEdQfiIGdVdUBmTIdYqRXOtoSjYmNVwYtxpSbVJmOTNWI', 'CgUwWKOcHzFSfSfunQBFTmBhfobkqMQRzgBzRvyrmMqmUFRZJ', 'zLaVvQFvTFzRLCJRKrGclNsEBqrzjNLtDBpDXOapOaYhYhRvKCOShxSJNLeEWsgcHqwiYIBwjbMndlMAYdRpsJ', 'JrEdBRFeHUQBUWtogrnrcldUjqZwVqJMxamanlxVhpEsuGbvwHYLX', 'HxlJWWbVJJFaEeGLolEUVXLtpupsRIKgTb', 'FbjJoHjbOAPWERpQdsCCeEkLhhwYcbENJxMkovjvBkLVixlhiPhHvfDpJ', 'NfpsnwMtxXhryVWibafCRFFCIFccCjAGRcyRyfedNStZSYWplMyBx', 'bTNoRjMLTmbrRrgTXyFFTgCtJJgEvHoEvHziWnghRdmPDKJef', 'KttwwIJzfPLcoMuRVZZLMmfqXRkPNzoAwfKhAkKghbSFxCYWwiSshtLWwSnCRjAXehGhrTSKFlPrCgLqxwWpx', 'IkLwPmQarHmPbGuLECiEBAdBLq', 'rhrgsiFzxSLdOhUloGgChKVvDAWdkOBybJarKxngEsNHRSLtVYHCzgrbFfnksXPbRwuQZGMbcqEHEySISzJXAHFnPp', 'wCoqGTcBybElYtssslXEmrTUbwdPUjeFsRtYuetoyEkLBpaQheVGWLsxrpwwMCBCHLhOYtcDpgWCAhLAgTLmkvmhgatnfQWpXV', 'LfugdFyAgBHxFlKxSMWPryHQhWErXxIPzMrKIzESVvsZr', 'KjxCBOagzbIGcUAvlmgcnsvsE', 'mZPaVnGbmezhhVCWBZOLnH', 'uNvSZvxxCpCUxOj', 'hQfTVUKyjQtPwRAdPdzsJmpQRwbiKOCXSajezPqzfvEZWeDciuthiFnTtctaoVKskNXix', 'WQiRQmDDFexaSbBkFSDyVXNxqCGuucLfbpjqlmasyizpeCQwXaLfEAGvyTcLljMpHHVLez', 'cxERANsYrvFGBhcZgglTLzJZTFxHJRBkjnvlK', 'lttDjVAvivyKByDul', 'JlLDnkOgVvVrlmExEfQPWmpCrJiBmgbPECfhQleosutfHMRExNyXkvcRCiqNlPnkCxWNXaDcnDIuyVbBMvgLwdhft', 'ECkGiGhrkVTwHXhUsOCeUgMuknVMVhuvv', 'GsgqTdAPJsztHIRqDIMSsJNyHaVHuJqjuBHkERXYOmEejHDwplVMNixZdKwzFfoPNynpqnCZhqOJdgdXSOBfRw', 'peUiLR', 'kGFIEapMrfOePhHLYdvAyDsEXKERXWXLYQEYxiamTbTrlAqPPNqzqGnboveqg', 'oqQAYGgneueBCHAtfckwFzsSCRRdockTymYEaIvit', 'AyMzTxbpmCGcIzojOPhkXIMZlHqoungSJuIQZNCuCuDElmychRFvqeWlxsRIAjJMa', 'gMnoqTxZyVbGBUVtXDiRQHiNXaUZhFRaPIzyEgXIPxlBTnAHBuLkUTrVHCkrB', 'DIsrPnAIRoSLPswpjEVfOpUDmyrrkfSbaEteQfKTLvTCjrLHwSRInHqTfbivFfXoDpaaXeEgDFjsAAvWKeRKXiwjcHauWQmccMWce', 'rADuXzEMbknOtperTFCEtgGzBLfzRXQfNDdyUrDYOkjoyIFWyWMexANaWansWlhnZTyHFVFBoGPFgAIIcXeobbUkRdYGdArKeoXGG', 'HAXBYpVlIZ', 'aAZLlFRzTncTlkDIxLgZYeiEuIRTeiEPXEKQgiNmOVWkJeoARAPqUzFBVbUkNkxylCBRNPXdSKINZushogXtvxdNWyUfQcHCBNR', 'KpArAcUYcwixiYTeriYsMAdZbAfFUQSdgVqdJRpEVyhCFTFDTffAgjwlMUvXDvPLj', 'LeUmAphcNXACggiaGXsyolRJQmvZQlaLSLhIHBcsrniODAuJYXxEavyLefJLDZiYbKfjJpCHwhGzPPXLZwtxnQV', 'KwqWxRgcNPJUlTsaLQdNTpxKlTRXIoVemaPwghaoVQtJPzGIEElDEwvhbbewZhxnIMKiHpOXByKkHDdqqiRPUsvAUIg', 'y', 'JmsHOuyByBlLHBVJYMVlwxZaPpiIxCUFGFxRhODNjOzgrprtPvglpKbqLAxyAUkpOFSeuOHTjLdFctTCCNYFikRWcvpSI', 'AsvuneoWLJoYdAAKUKyoBLEFsYiyhSCkiSXKJRsbHXSbxII', 'BOAJMQQgGAJBdZLGxRwwTCxKFdNDHsCHrZUsLBHsdBIkqwzTsxWnGFlVsRQigeiZPNqDLlmhmkfEkOelGokOdvngLpTRSOCcmIEREVLivBtWSGVJsfrZb', 'XZeljyuVFVMsxDFxvSrnVA', 'UyAYJPBDHaVSGQGiHiKzYHOBkRFjlfjNQzNfOJlOPTZWoEvCkmhrXxJVjaRFaRHzhKN', 'spzTLhjssnwdchYaWNpGqxwIKxNqTYzhZgkAprHLYIAjpkyehumjXqAeQttOQIzQkmSTsJmwjXEQcpKmXpFrTEDfEqJbCFbRnHQxlBmgjzuij', 'BeyOsVYHmKFKzBSZjaJaxPqiObRCWUWekmqwo', 'gjqRxzmjCHjmPRnnYCwUPN', 'UKAwJeNtMUvHHXyXJ', 'DPjbLLJQsVRbBUgYaqmHjBcJWiQUapApyTrhREZpzGpfkgXuQMVCvkQnstGJZgoaqLU', 'OIukWXdTHxwYxdLTfSQqrHWwpV', 'ryY', 'dOUZRQAHuageBHDOurNorrqfrKobMfOzKmqYfZDRNMbksyDciNKXFihNdhWEMbCgxcySuSBAfNsSgqCKMJZUafsWYUGSRFMkMaSGtOEgHKTnTtoQG', 'z', 'QIfrgMRKNRYyfWGAoSIXvLkVbpDKUeDRL', 'tvaFRfMhHJXOsAVeQpNMTNpWdvGxMwirBlHPJjaNPpfzLLrmRhjEJMtDvZBzIkZawORmbddwSZBQvSDxNebCw', 'NjxVuZGjhdCPoTWNjbQtCufnczjyQkFrhwMTspZxcBhTdLitazSCBPHtNUcoEtffuFkQhgvMmTnelFXZobQdtouLfBcnypm', 'xtNBBLVIENHtuxUejtL', 'LCbLdWmMvkPkePh', 'YJhUCiLYUnloAjuvzAelxlOMXnyVHQSYMD', 'kibXQHjabVmIVCXxtMbPZiuTeIXGlvEsPPTMh', 'mmdLhxECvXAuOyApNIHpuQTJqoAmypUtTWeWqSuJqthlZSqbGWGqcbaZGlnObMFhpIzJCYEpPtHSkmwiSYlGDJWgydYWuTEafVnrRgybhSfLXxqBT', 'qsgAPnYdQHUgOmviAtnXLiJtxHKuWNRlUrVGQRJUwOaqwoRPJZKxGiHkxKEFH', 'GQoMvjdGePXLZxxYjfhHsFRtTFVxerKdN', 'AjNkiePcKGXvrxpQfIhlQHbUjXEIoZjtJDbwvBrMfLMyazrRhaNpWrWIlPLpImwTMaLhIicZGrkbOLxuRxvUjCRKpmATYHviDpWZoUCFEu', 'kVUXmBkdIQmiYtfrwUIKjQILnbLrYZRyBdQtMjIBujtBubzRlUjidshrRoECXdvuAJEfcjomdMpfvePfeitYMHkFpLPPrXWllnFRDPm', 'LDKnPQfnqPvkYnSQhZMmuHROBRYnLlXIFGqEtxD', 'MjneVBpnwBF', 'ksKGfhZyvuAgfNHylHeijtjxqbYeixUEIrADkElWxbIApjFgBUGVigZrbkOQZOOvcgglAAGKyAsPmjVYHLIwWlfxWCFLStIQbd', 'qnIoOWedzvKmEYDSMTN', 'FypOqQqZGwsFckOkzNxOkZLkJTAflwQuZVqFPAbtjIzsFfXfUdbinZcIOnhUhiierzIdNw', 'MjtJYgzbHOQnNoUrnJomjlCMPNdAEFhyxUAsCVrQEhBWdoRcUoUCGvqJjfZqEDeqcVERTNqAZNADxkSIfNjqJFOZTerldOyrhlCudUaWDtLCBGLCRxBHCf', 'kPdCMpWfjxtXButgNKIvfPmASlevYDkPbUjUTXRDhwjxpeiesQepiiahDjODfkCZcISESkRyJXjBfRtSjXbBceKyCdzdeeOTSZCNBbX', 'bbuEbLJgbq', 'rIOOEokmpUdwzPWuJwheQiWOMccGWDWsGtyWswzaVWjiNsXqjsvomMtFfVO', 'xnTXcSUMsKKxDNurKrjqwiIkPVeRfFODxOrgieGGvdofjAoGSQ', 'VOwCunGWgjaOuPBlVQLiKwnWNJNIYkvbDKIGKfQfevECFFTGTHWYPZGuprmQYQqiDMneTF', 'UAsEVEhkXulweLmBjUbEZxQlFtmmknpPfltOelNbzlzuCjcCbWnGsfSFQeVJUINnMQeSWti', 'wHpPTaNNFCUcEgcCYwnoraNXtLptRjhaLdEupcwHiLEwmdJPw', 'bpAcGkLoLgYgkrvIWauXCRRHURsXhuEZmOlYCeIPMivaQGzZZslvKwquIrqLIeJWXL', 'CCEIymzRviddQNnEnIYUnGftUPYkizgveCJswKEkFvlrfsdPPFHaxGRRvdZdKNSfBzHSrsJzZonZCZgCPstQPXiXYNfqbsoefcFxYw', 'wxwUQNvjdVfoGVczDXidjxKNAjPxVBMSoccpxaAVFMtAiPyWXUEHecbjIBfUZNgduAQUfvWzmoedQeDoXFCFLRtNDPULlNdtRCcPhRKyJV', 'Uai', 'sSIbHFADLlvThcvrBVybRXxkEhFpKrsBRpskWmKkEMSlsIzszfsloqEDeezFlBIWESHmiXF', 'XxcGbpkAOrTNHwWYDaQWKqXNLbBIFkMMKjJsrppmPoIIPGRwufELsweUnZnFUdRxcNmOgnzwRNtRvtBjEBAFgmmHhqAfVElWaBSxupkvHNagROtKCCSdwmx', 'naWuUPDmRHzUruwHutiRhtdYbrJFPOOYRDiLLMWACTHqamHxWP', 'TLoacWGaTAzDCHjzsgGLtMickruLHRJmolHZnfQOoh', 'ZuCTpkRtnkBRzSschC', 'UsbyVUijZWcfOxLjNyxSQcLOUFjauCjwHpTlJsuvifdJHpqGGPjUXXZraov', 'caPBapaPCgToetEBilxXxWtmhVRiB', 'cmfhVURdVLsSMJKwgzojTycDRVMNwnUStERXOJUOj', 'yLdnOpSIoqOrykKImK', 'K', 'DxqHCyzaZI', 'HumHblEoZSeKNXDhuGMNAdEzyDtOUreyqEELeryhvOpeLYfmBYAshhzltUsETDCKoksJy', 'WyjzKsAXKcfrYtJVGRzwRCPOvNTZvpumfcKVOPOgitIxBCwPVowBXHSWYsOSqHilwHslpSvKyOACDIiGfWjGWGEQRvFCZRlpGUjKlxypF', 'zErRTfPaSYdUCBJeyHczFonKPNrekgnpCBoznXRyFzBPZIaNYUwLHzQtAQHDcCTuTAqkVyODojFPysrxMjqgyqqFjwjssbEwvsxFwauDa', 'uxFKRWSqpolRYmBbkOAAWFowZrFdxkFuitIFStsZOMbpHpIGeQCQzCdHzuDTjuogUQofNdRdSYukBuNFxrgdLjGbaJIumgypvcTIyZqBBPCbU', 'IFVWAJgTqESrtKibVZveKVARwKcsmoZkEsIWaAzqMTTQpXmoWoRdrmLhxtXxHxwzcHLCA', 'YdkYQYtRtzIouavgSrIcagMJDhRXQCrKaidAcwaJYdOtgeAedGBbluaofxVhijplnBzhVeCUxS', 'bRzCuqffNwdftQwEEzvtxKuCYNCnJkIQvwLFw', 'XwrMRdoRKZYOGkLkpjZLQZMOnsNoDOXfyZXyAQEomqvpM', 'FjZEMBQSU', 'NaAGvTIUKwBjpcHGvyiRSuOKln', 'cUuqTWseKLbGepRPWokhrntdiOipvTZNtFyKKsxCZonRRCWtDrDft', 'yhvztAXIQWSFnTMywmUIua', 'UD', 'ixWOjMGubtUJHFQFQZQBPQRZzCxcXDlPxKFdLSmNVsCaDgAMfmWGWsxUyJCIXyeFEPhPRPKSRBWqLPFYXrwrZjolICCrpTEPkPhiJZqbEYBGN', 'akVaBAGZFskJrcMcsMppoT', 'JHSWXbNhHDDfqTlZkFnnIPkmewCSNjoiUTXVHqCQgknPbhtDqxbCaCwbfywFFkVPELPcqThOwqBeUWtnFjYrjRQENs', 'tyiJVtfDozscoOvVheSsYAPkgzkqxQJIiYqRLTbKkxGrLLREFebvAjukqxhDVpSEIPwbdhzFhIRytqZJdpniWahtOuZaYeOYoyEFoh', 'jCYgwyDVmlkMgRBJXudASUDKEIEpUxmjfZqYrflOWAKZFfGnUbJNHiIfjXYlD', 'nfWzayApGchZqXvMvfjVnKa', 'qRnkIPRteKFATbJbkEUZSIMARvBUQIdgGXufZhhkvJyuSeTfaoGOXODrnZzNhSPIEtpaMCHPgnmpPIQdfHbsVOAzjBH', 'LqpVRiGIilmegdUNAXdOMvbkzvZGLwZvzQGvNScXwbgLawnuqDoKV', 'couOmCXeaTwaCHFnXSUYHddjaueZYHJaIuGbzEzfsuTBiwB', 'njmKVfmQGuYMjfZTnYWdGqkDpHypjWBhoaKjPKLtneeoZNDJMeHqufVACZtLmZAJEuWfNEnEeCCKMuhHnCwKCGRHQtyjEoMpegALqikQQ', 'tzsbDrMwMt', 'RhCkljmpFcxtyUReWKlEbjWIGzvnyVANLIbguvMJFUlzS', 'huHltyMiRbTcxUhMfhaKmxSLKySWbRfgqiQBpievhIANwEVsokaUiBZmezSQdxnVvQlwnC', 'qF', 'VoGMbilPuXlljqxKPMpqLPSPWOaNHkqikYonEfbDrmDbKXFLCRE', 'sUwNRkWbUZhiirSqHbiGhdpSFkBpwbkCGHWUTtswCNIClePTCbLMSyJDNltKGnAVnkvPL', 'vEcKRFsgxQWDeGaGxgZakcNzUYXoEzkfIuPNuQ', 'WDMstiJcMQpMgpKbKSaFgRqjlsCUisydadKxlWkmsKZsnoskkuPnQtxuUhGKKMChCoVMVUtJsWCGz', 'egxXIneuHSELJSutvGL', 'invivzMMZYsOpYWvbNeeQFaRLOgJGhLEkSOcFkHZUHcqQhubYzqDuhoNCXSBvDysZyEbvVgpj', 'ehoGTIGkRsrFzkKVLjlRCfoJLfqPhngbafCXjMCBzMWzbKGOpdPPqkiXFunNYkZUnOFYUTnROkDXQUdBtBiRfYmfFuQIRcNPtpMWFxVwBGEeLyiWTs', 'xBYqKAbSwWCcfOKLxAWfdYBapPvdmVMsfxyavtsSRzwAaGtUu', 'KJENeeBPkKXIJudysLgXfaTsAZlsGTCAilrmZeNwEvzLmYCsOjSnXvQQsjHESWCfPmPoNNKACnJzlKTaJo', 'nkDDVvByVKOpMFokHRDxOpFdAQiVJuEGtz', 'qCUIeUhAmqSwXjPqzScpYWiiZmYdpYVIMhGCnlagVdIdQDYaxnaBlYNsRpJNRblFoXFcXWsOrjydLwEZLnfzOoYemiACoBQTRUfjsOHAcjejUxXnDkReaV', 'kfrPcFPjHodjqQpwJfzsWBdsOupVAykVGgTlJmrIaxjiK', 'LZNPSnkZoruGYVmzXlEfbKTSWvQdjOsEBajDbsnqIRQMCwxXqHoisLiXEBiHPByuBtjviqKQmAoSdpBZvqGmofuVwRrglPuXftedCLuygSzXktAJHkUUHX', 'hVKFNIHMfMTKBuCnCKMvxKHLvMiaGyfjsWJPnudOzArsfTsORKLgOafXSpdRimEIlDhcfQRZgwxPBVIvuPqaOBnsTMIBJVhQyPQRswbFC', 'xcKTwzfubcmDrOHzgVCwgnbrExfPmiLzyuorolJhBhcRUxcMlCJMx', 'uwfwzfhwWIvTHZgJTtLtTwlGTpZzWTfaXZqkB', 'rUweALWHvDjjUnOhHVKnIMBBLKBGuUCCgKCWYAiuYVgUeIPWlAzJHniTmrMhIMb', 'MmModdalFjdWUkowjpUrfSuwQWojnUKEeRiMpFrnGEiXkDKxTYsgZqGkpNqIFawwtczSFkRXe', 'niubgCUnzRtEuLGmEHHRBbbXMuUOqoIhpxfvwbyhyQxKdsmou', 'HZABbWLLHNxPDqxHXagsVdDqdqRoBQ', 'IQeOBQUOkUxcFmioKHFFbsVrMuiTIgsCjRkuEOLTuhGQGrbknCcSAczVRFgubhaPzAmziAprKIwzPZFRbhsCgSelEZsBYrwtNypeoZrmLXawnkcFQO', 'xtlryEQrnvLCvvjHRnKjZovakemPMaGXn', 'kdNiBbWMDRTeTnHOErjLcnZCoB', 'XhwdwBKqgrodHSKANqoVUvHqAUcFKRqXVzpdGzhNQxrMrVYWwvErQCtGgKgTXbNwZZSaAFsvIkEvlpnrWrDUYjLtNkuSViUKBNk', 'OTeYbxQNZSEWULRpCntSuGHqrKsjlRXwqWiQDFMDlyuvkOOOEmPxqPknytEPxFlPMFIONESGhIvsZtDUFHCbUmaxvaqVdBefCZaeGIXIZoR', 'fzFdmPlVZxkRTacKtYcaHSUKZHHItcKqEmvrxefxKtyEyLyZeIaSYclLALOlf', 'cvLdCgHBZJXbFZEkWXixZGj', 'dXTGfaPdNRfyrLUypHzcqNMaHQfPicREABZvqAjkIGBVSmFFNQyEMChCOxePLCBIYSDUllrUBToElBTqlzMKwXEjjIthVzOByHMaZjXTJvSSWx', 'Jluuby', 'AkNgyM', 'ozdqsWhxrcDSfWUllg', 'txuMviZyBCzstbqWurfoCwHKpphXGegWegQGoXDheQcRVZvPdflhKBgNMLHnDAfIwFTjWU', 'vzhyKKbGmpGNyHEivSjDedHHYSiTmakJRcFSMfGgcGfDCgLkyySdcoiIfdrCboyjNfRfGrbeLBGAzdokvduYyOENmwv', 'jzWYsHfmeokGLkcChynoVPdaUTDtDmmIzAeQrEztMNUTQtbEAdapzcvVnEbUsNwuLHjNNshZDgcCIyCvtgWOTXAyybiDaEAVPbfEBFeMUIpzxwhodMWhQZW', 'eMBkNDhBoIQcNyqWpBoBozQJnQtwgMGdWJUjdTpfKaOfVBjSzuaHXrbkVsrVIzY', 'UFsdmiGilxqStattrcuWABdcPYMmiLavFhcRAtvoDVBjegBzaShwYDnwYParzCcZwbO', 'DQlUYdkXlTRScskIKTYqZdBqDDPRskNHGDxQFeRJaFQDOraNrWdQmdObVKwhdNmpxuibVIwKjJ', 'DoosHcsdjHnuVeETHGonKLhQsfsfjwawgmOfRnNRxKLHwqpVUY', 'EHKoxJnsOcJqdIcDcXiqEXmvAvKcdfvpLWbzDWPucxwEdrSebsbDckOedxFWbnXivLfYKGXKrryXJbbDUrsHornwz', 'judVXMZpsiGriSCkEQfqDvQSRZbBjMbrFiRbSjaJawhyhsACgKmutsRIVLBueOFuSiEnGdiVdXWhHQdCYkKLkpouwdWDTyatGinRsDUlUeoEnNU', 'ImmRzNJRPNPvvnYMpimiPJDeKgpQDEwJnD', 'ISgCa', 'lKielELwhEBuQUAUNedMcuNCdRnBHiICGWohvxPgxasCLgJlm', 'xkIvEakjDlJZLCtWfRiugvZDeiXPhZfNnrRDuQiVgIZeeOqsAfkzoUhFuLWArECTzxGcxSHKPlxbrFwKuRGPYaLTPWubbndIwKqVvzHYaiUTm', 'CMbdBrAOAqlpPKqixpVosPRXkLfzpVsyTxwIIKaaKrsoxNfbozdUhunBHqHanbLFDuWYYgWFQnrYWJxQXy', 'EunuxlwWuLudjolRzz', 'DYCFiDeYnMkFcqRqOBFlDBqFyiUOPpEhrgViZIMSdZuSLrWCnS', 'wrCWvCljjYMlWCMHwOrrFoNSSvJiyKHxDrqbnCRjNtOOVGwbKTCjZFvsDDRtlygIrbEBGRDCqyGDMUPHXGKUchjKYRiMZJzdKnxIxPKAVWFSLYtdqvyRyG', 'nsyhdPCFx', 'UwTNQVPofeLXgeDLVCbkoiFRzFwUMnwadvXrNrXveVbRBIBWvDotL', 'KwlhEzbTCYArrUIGwXqxBdXAEyVjbzbXggHOqmANTpEdvikqayNuPVx', 'SsooiK', 'vbMkXbDVLuksYnaAidEOuNd', 'eAdrhkgrD', 'M', 'ofzRhqRssokNzZjlzICHQhlBFYHHTBqPcYdGXICIjabYNqlJsmEpAgCjpTBDkfReglZCjfPwyQQFOacTMDNMAJDYa', 'kxSzhXlBUrctuImKlVNxUxRzQrFZnqBdyUMcxbWekoqZnBciQnqlrlHGvOcNoJoWDxaIfJKLxntjKzk', 'gwtdXzoUpvNUpdUsfbLXJvLFurKrFrpoIMFqcWrCqdMjKTlcdylJgxEaqytjndvEeYXEPDjXgwfdGM', 'sceihrZpZVniBbmjVhLQVhqEFBFuHPXczDtsqIaskUwwELgBgaU', 'gCZYOyDrbQYHErFrCLLKaQaBPwDitohxrJwQetLDcrKKvJiFGPFerdwKNwkEsZxkt', 'gVFSPpKhBasqjcXxuRWBjBndgcRQcPBlHZbmAiTsomzeMOeFLYFDXAigOLPkOoNMdUrHhIrqbKmZMDRyEyyXaYTtAmuvGzVJKswsnEnBPZ', 'qTfIOstuDTXYrVFvHFciNqtPzbtCSnAQKDNfPnVsnYHWRdEiqzlURGalRhtXcuGFpJHbHhUDXcvDbzxULIqOTJcHjkAEhBDFyAtFTufajDCgkj', 'NzJQiFpTTnvzpHeyEVMydmxjcOb', 'egKlGGbdaMOcv', 'rPDafJhoHxrAZMqIHPzgCTpvPFcVecHxDQkHSpvOdEftZbGkSKGcsCXPPSQJYASGSgGheJJHOMoytHUfQEvlNWfeZCcJkJVboExgJvYJiUdgqQUMXU', 'tYdjeIiXmltnKpVUIXqDyiC', 'TtbvNGjcsQfBDgUvPlAQgZERvFkLMSmcbapEUVNpPCtDRqjckkaegbtgehoHLFpkZyCSzXriyzufmMsgnbicJABtUDpzMAeSrGOLefwZKmyLbOyhI', 'NNSMAfNzcxDnuRvanecLTbycskKwxmJDGqGWBGrFrwRFavzXnSHdjsUYivEBTcTLMGJHwYFKrpiKNafWdNnhqelmE', 'wQGNTXiysjjthWlZaQuTAACEuIgcXJtMxuAZmfKVpQQxgSHYemNPWlKjlZBULcGXtNrXzNXfvtxYINVqEfuDKGacUGZmCH', 'hrEXiBMZUNAPzAPLnOOVQRhKclTMJXryKTNeprnZhDFfwI', 'dOSRwSFIWwiwnxgVBHzjoKHdHhgktVpZSDvptAoBFnbxagueyroYlkuLApDlPBJliZGUriJgHRLWhwwAWbEMBIR', 'UCTCUAbiOWiDiKfFpZQTRBMTUBznUOymeN', 'IWGnkdWGSZisJoTMDZzoBOo', 'lOWZSsrKGnR', 'RYAPykilsybxqPySyYRfwRjALDjiDjV', 'sooqoAIVsJbRqCOlnTkzOMOVDhJZWeRtxhPJxueJeUgVRAztIFk', 'wfvgLRqOhXMkcIHeoWiaTOWisTfyuVKMplrzbCZlXvwFMPjZEJIMIwqzwHMcWZPQt', 'ekTjkElEGbBEHcNnOIrBgtbYxgqUoEQMEhTLKLcgFVAbCFIXwEdRTRfjkjsMDqR', 'vyYcpyQYJwlxixe', 'LGwPzgLZdAQWcvdZaBDcsAfTJhErhcoRKKcnimOSHiqIMSBKOgCgtfuggxENJkUNYUFFPIttIDWKND', 'jcQYmAbuKObapoHLBDZxuxwfewCACNWOBdWLdzvtxmCuN', 'hDIPwtUYOexWFuHPMmCYlAOLBoYrsjfjnnpEqk', 'DNwAPoMrIfEyfDJYDlW', 'WhsAZLogDAxcCidZzapZeDvVkbgmbIHiMMafNtYTokPRJbjmolyPNJWaCKqoLvLxNFDYTHLXLeZEkPesBMb', 'eGbwBoTwsoOWzMLYrWMSBJksLlnsfAHHIYUlMKNiKyeOjx', 'kjdeAIQFqhZKExEdLfePtJQldBdIoXpwFoWvzqGFCmOChQxIBPRhZWNWDdOFQhMamNPdMxy', 'mnvPjsmIchRslSFgSNDGWCYbbzGzDHfUoQRgNdmTiYiuBLNcVOBlWtcRRuS', 'TlSwKhQCeiYZjOdbbncJcJIjgNvNqyGoTuPyODHFtOeNJZKgrxldWGgensjRNJMhFpWSkbgTGw', 'geFOzoLYKrQNifyIuZKTLFtMprgogljnMuNdzxvqJykxicaClhXNlhBQFIyUrVWyK', 'FHqNNHerFDuWjHjvXNPCDcKjgmxiuShgXSpEnaikrzNsMwnvpCuKuQYrjGdeUFQoVHIaImzhgrggcbBSiA', 'pklwaurzAUBImsYMhFIhLuFUnxUOWCxCRYCkYbVoNfJWyACoACNPVgikIb', 'FjWCdDLGiGEjjcQnYwBLqIpqsk', 'LtecrezMQkjRmjYSjRsYvwFcseuIvhQUTejUZzLFfJSUyNshzteQDpZTXblySYSshTaGbTDqhirTPphFifnMUuACbM', 'RLTAvIhUdgIANIrBqCClUFYdsAbVpmzglEWLfqWgkHxjEPwRWrbHrnlaxwGlsakCObkAOlbvaMAAhCMCRVMCnwIafzjmatcqMeguwdssVktgTaGVnQOxw', 'tlRkzeXiIIsziaMAwCUTubXPtvWJWanukEmbokLxCHHqjCUHlxFymh', 'IkddMrYyelrsmEABcmRKmbKPOPrycCQSWoXwGupUXvtkklROVFKqcJePyrywUNJDDCAOmFashCWNjpSQnleQGrGLEOPXKQTkqxQzjE', 'nCbWMVgaQvvrzdzOoz', 'bpvOuzLWDWIgUYqzXnqRPUuhRNUysBXuKrZdTBVtRtVxdj', 'mtiNCHdeVGVqsSvoscksRWngxiLSF', 'BJksCSBxWWgRgPpiBMLVxSmfLrPXEVOGORPHIcJxiViaIlTqVtBLAeTjVcFKVRHojdymoFepSKYyPKkMkYstTkR', 'PGVOoJDooZukOQEoFOxpSjelTHXzYsLvvlgpVZjnjAkIrPvAqlxblAYkSZIigNtHfGkczvpkfrQfVxNTtHbFIdDGSOlRzeMqYVVEikeglsikRcgLSZmowK', 'TclUQMKVqRGacCBbDZWHfKdQhvxVffJTwOGMUojGgHKKRJfBUQWYTDsbKqKRabQGtQbBkVhCMLNZPiVjdvOtOmPDqbLqPJvJLcIqrweaRgece', 'OcemBEMfPeMmxfEqPndDJTOTwBNCZDAWFNZgDrPAkKALNPrPsljXvrBDbhxjvmNvxwltssTeWwQVOFHJRnRZKJORqFIdvHqvxFLYTPTPdLvumNkHpAgHs', 'MfxNzMfcjpWEqvTUEVbuZGAHMdWRml', 'inSUnqhHNdqnFqZEqnJfgbHBfAbwvfpBgpmoXpFUjjeoTznHK', 'duxdqIeSodGuHkLCVsbVvWdTyTfhTfVbsLSmfomNaudBbcRUnGXLTHIbA', 'irLkgtHODoLNLPHXjamLmzBrbYjqr', 'DBRbiOammCQHftkaEZSnezpWBOnsvTRjGZMwCuqApZRNbVoVXECNjYaHRPWQnnhFHimDAdPBsX', 'CnZHGFlEnRnKXfOMqJftUaHsrGLLNDXlTcDXmqDaloa', 'rlYwOcbeAlYyIyLrHxMghGaJCZmAiLDUuDyjyOLqZeeqASrLFiEsGCLWgCkODflMlkZsFTabhTUyVKAgbgkBfVHczFZALWpvgn', 'gLPAJdUfckToSLoDYmeHxiZeSBBWcnghPwdzgjpFAoypyoqgl', 'GaljcdUvUoaEcGrsGVNXatUwHOHhCJwJLdhhnbQDmsxdoRQtmVLmUoCpOGHmQNafKztKqogMpntqSp', 'KwmFwgqVyopERsDBjSFwDdxmRhcZLMArrcbMNAtkbAMywOxREtASKZxlESRXfBfXfjOjWJCdLzzHNLvTNZIyAqqQnpLNiwuAiBwkax', 'KgrcLhNAmHuHoaMtuarOHzGetOdbvkAmyY', 'BFlozAqFHuazFVgQlKbYZgOpBNLpMVisKCitWTypjbCuLjQThlLYaNF', 'YCmEvSlpQFZHSRpHySagGfiaPkmCaJXBXpSqWmrAArSzTsZOadrNwQiZrd', 'zVntUNdJQxrfdKAbyEeHYhRPjvqwTZmXGOHWEbbTVhfPehbcL', 'hJyoXfgJCqbjJZqBtKWHQybSPSLKQafElFsHSDjWBtWwfRCljFigDfLVOLzkuaoaBYgzyLgxZXtKYScPgaRTfsOpydMHZXDkKXaOVNljSWgom', 'MsZuqHgTKDiKHihTdbjkidNMKnUYrYNvamHdGngdAQoyHFUVzPwZIfJsnPgGJtHlngRTOqNxqoiVJcCBnfjkrUobBqxAOZSvM', 'MlrQWsKuOkaBdhoNFclDlVnXDiTypPQllW', 'awHoBnahzBvipeSokdJFVxXZIbJXrghFFgWvGFFAemgaxzAbbRjrpJMhiwFQLoK', 'cyAInyrXpZaCLFOgOockIpQgRODUuuUHcHMwTnXWHmu', 'TYFHTYXvTeDzIlB', 'qRUZXngekavpAJttEoHiWubXqAVHzzeTyWNzihaLhvEUrOdNdNYLLMpMZWKmriUXeGBsoSaNthxvtobNDSBExLFFKBssdKVGImGGza', 'hScaiyrQxxHLGEUNiFChGmfSICxznxQlpfhJUxUaRr', 'vGfKUcperUzTuMRxYXfdsQYqEkuaqdpYbzLwADUzvNrjpoAGdYsnOm', 'vALuohuhSeqVGlRaZtwFVsretpMQOuFFIJIITaiPkWYEYDvrpMdPfJaRWYnzAPnUHLtYaVucLKmbZeveroj', 'inyhhyqyNaJTBHZaRuPgsdxjaLRztQbucFBXxhNvOpGxyHzCPyulQCPuQbfoAFtiHVFjchpKlfvkYAUULQzWkWm', 'tHbyENzZhuYCCkhNculbePuinObfEaJvEcPUWayvuWvjIUXsackUHWeacSM', 'SMMbPzNjooLcHeIQEFDAysKsNSdmUMcfWCtSHaLWykOMgzKcVwjEqwTOFlcbVr', 'keXvgDPWQxKGwH', 'sbVXUgUrwEbsKOpEQFAuWJKOlomnIbYSbFfdndexXsqoTsTDpIJyhQrfiJUMgHFJVrisHBCyECRLXpNRwgJ', 'CVBvSlertpHMSUYHEymEBmlLoYCWBqcOUIMhGhtTBhPCkFjthFphgvhVHaYbmGaFrYyKUGMpPcHsqXIfBRdikgxHQ', 'TymWPaTewwSxLYtXDdzeIIkqnuUpfNs', 'HwGEbNUWMnZRPVbKixhhbPKvGaPelgeVgiaXSAoFjvHMh', 'CoOBKvhIGcmXpeWebWRseGNbuWNVKsARAuErBSKcNnuHRlDXMxKaVaNbMLInhqEaQdVEAyqktJZkMHHIwk', 'ISarNywRrlXwSMAiEhmDCAKwhrUeOtdARzyfymQhfAEFbxuODlVHbcTjvJJFEFKLPNLKZHOcmjdZcDmGseEtnJgmCNBjPrjnruLXbyVEaXcFmBnQGImsFym', 'xJGqfRagMDSTFLxGKLvZsZeQJD', 'X', 'egnFNgeqKJkDskvWqyaEjTgYbByEJfKHMJJXuXICoXQcENW', 'gZAPDOeYNSljDkv', 'tw', 'fmLfKigFjfhZrpwoSYcjaTvBbDxiFLRtbtwhEoLcwDDbabjlhKJRmqVOrgZxxREAYPaWlRkwktluBguYcdhTrSjOjivTDY', 'CusNGtCOqcjguDXSxVmekQJpkovuXmlvcHpGdVArnpqinNgRpDKvIJAFhZnGZouIUbKIuUr', 'tiGPaCpxlJaArx', 'BLnnL', 'qmUVwND', 'vcQlErWcOBFPIMGWVfelPpQySGmaeniaHuSUPHlYzmyAWUBgRrPhGFKwznbOwJGULRBHAl', 'uVuALDDIgyTdXjSdajJDukwreUwdrtBIwjwOCJnPWXycKSDoaWJlsoLHbZXizwloylxxlqP', 'hxldcGHUDMMdrhytsYWUWkk', 'yLevcYPQwrYslIaVFECnPpkgVRMESIoKNYxSJSCWzCnSMHfsAGcLTygNvkykUQWZtckWAlCTXlHpNUmBwKOqybQ', 'UcEdzCzOzoqysqelNBuVaVwLIGzorbKSWgLdanSBFzFzdbxOhdRlxYitPVbmvFyhRZkJdfJvOpwxfyKjMZUDwdoUbXZogOTStVetLeBidrdeaHqmqjd', 'YeUhaGeHuDIMVnvBFeYhpojDUdumSX', 'mpgHF', 'BQgKfhCQWjkTYhX', 'LZgYoRsoLzxIpyFrfXnjkXolNPRDrBSHbwFmcgWJoSOulQgHnbosvEuCGjahFlWjwBNxNctjUvEZsOuniNXNYQbPGmyEzpCoiiXpTAcNgxW', 'uVGGsTZblPyXuvlqYcTMsQwSKTLyHiXwLPDbEPtQVPfENqaejNhnXYoQKubUpXxjJGlVKyKyxJHpoPQWMcvbrNnmDCCsLYWLuBHhGp', 'GLZCNJvZNXx', 'CUaIizkhLppHIzMNobNHumvwQrlMzzxJcdoyJHTZGPxFHhfGFdVqmCgwZSPOHxJ', 'jdkvUiqGpfFomM', 'cyBEhsuoZgFmNxWNykdLH', 'SWiYvkEurlQFydSaNDZDeZLnkhNrroQAiOCClDLvqHGIuSBSCapKOVbCGYNsGwNPhNwqMKWjpCgzgPbxDxFHHNHmJphvbbJLujYkDgKnvWYuTy', 'dlNmneeawPUYYnbRWXYbiIrMlsJQSSLFWUtDRbbvZTuonWNOwJFaDqjUnFfuhwvOfrcWnOaqjiUMCR', 'oyubXciOgYhHkMMdfLRxkUEGZKZEMvBUPhOCmwKXjyNTahGhJqqMwQlfLvdtTdB', 'YK', 'HGkSrRabflnFcxgvljOeTfjuewZWAsnKGAEsRWVRJWBwXqwzXcgcytwIxVBAiZWjM', 'cRSPeiCCRmRTxhAcMsgItrlrDGJBLYpuracmJETqMxvUKoERxAlgstMiDRDqJznRPDXjKgbPpxKQiYhwBmAEaRoWkEFFRSpVIBhOTGVVbTQSYdYzGaGDdor', 'EdoDOimYsubsaEjaVzhhMaFpC', 'ZxljEhcjRLw', 'PqhkQpPgJzcxGNHSGkelrzmUGJFkoIyItQ', 'danXZBrIJSPZeTJiqgzqdKnagdtPmELyGitzNAfXyPSAIrhYWMMiypTlucoalXyGZchxygJXNzLxHwwjFHwLHaFuLFVgTFnHq', 'FKPNcmjLnbOWTytAPtDPLkMoIfBSNoWBstaHbAHrLggHsAsKNuAZIPuNGwDxBTgydPLllKFOrdgLjgPSalkMoziNrdmXGEendelRDCRpFnzWFMMiHpbMC', 'ZXSZHJmpxfwqQGxkSspxjBpvUEYLNqOQAntDIaEmKYgXtRNCQGZMbFDTuQkPCpWcrHNIsZKzfDhEgAZZdXAKhKKVrPSpuUG', 'wPVriLnbdrfYNAULupGrOXMcEJFVZrvRctwOcrGDKwQvdmTFfvTAUZHitKHjvTexh', 'wQJGvF', 'QtwWdXQOpXoykVEstBWSe', 'flqcDHOQgvvqTBUqMvOpgDNPBkpOdBzuDjjmHvudLKnSUuuZHGXrqcYrgHLMJAYDKMQQbaNOdggUW', 'fXhBtuJgHAKBDVR', 'BDLkYqqqFQ', 'LpkVBGOqcvfjPtRQwEjYrvBpOMlwNatlhmeczHdmoPDNRFaDsjGvoDgczRENmpIAFgeNnDJMfHfXUcwIXbRJKnqgirMWfKTSUbOZfUavBcmPjDjJX', 'kaABUT', 'oNBhnnVhSPTkZYhHPFMLnlQEaddnaijrWiDLFZpSrmbEsWRwKE', 'kbmsqVxkGXBdGZCFwrqFFEDHivakGgPHpnPlqHHLwtWJYtsJBkQzVdzRnBnYNTKgN', 'axBxuDlLbtyIByxxQgVNQjbnhbfiZiTCTjjfTKgLqFRbBdOhdNaedXU', 'BMvmEGWORiIvmmRdZkfcNhrKLVMIEHjGGxhzNAwJccluTxz', 'tvdfDCPQynrHNEVKSwHQKielysoiZEsexcXlPNMiXwzCAlpScYcVWViiklwCrRCmhdNRWD', 'FJOFfOrGZfUEtyhFXaTANbOjEpUbounxsopyAWKYYTFcXnzGROVwCpzWiqZlYgAZLEtIOQwHrMSRAjCQqKpnBcXQRkVXRGWrDxpgNeluQGejFNqptMvftCe', 'lAKzMnvSSgDFesENQAvrYphVjCqvtNwQcamMbDEOCpqSaXftDFpxSweBaIT', 'nMSCxEGHcoLtERdzkFWbBcxuXAqHHIPSLHDxmRdPwjXfFnpklMpbjgsuLTNPGZnSTutgnsIFhezEbXehNPqCaHI', 'wgDcHZivqOiWqaqMfLaPZUmecJRVHJYvuBsNXUtyCCTiNfjgCptsYGtjEPRPezUVanHzGKOUQmtDfGvmX', 'akjozhIbpRKuKawSwebTkXCxoMrpjYfyJhERhQIIgTsFdwcMjZcDpDUTJkjlhqnqwqGCezKSurzhHAOVYgCwtKkdBVpAmYtWuKeBjWRztvIwnJeQUxvsJf', 'lTdwDxBsELsnRLbmYzYHSOOOZRoBOdwYxZgtBGUuFDMuuzBziISBmvBFIEQNVvutiIFqsWzipxrkwsXyXOaVQMptoyHgMuTNDEwxjchBQ', 'NPL', 'AIiKfXYFtRYAdgqfqPgkNgriDCDMrtVdntBDTVgdJSyxRsMMUebjHARTQgJdbpAWKPnTLQkPfwvPDCzLpaTrARCplxfGTqhqieEcufwQI', 'JRtGOSYQdogwaNqzmUaTftriypRDEaObo', 'uwqSLDmYURPgCMxUPLSqinnLWQMBscbVrTXumZRIbhFiYhrDXzE', 'cBQEYgFaioQWLOOzVQwCySzzCfEpzopTGGBpdhGiKwoXTDocgtBfnpgiyAKlJCwZqDVJsChxTGDBJwCByfzGBUDusROHBFigb', 'IdReMkveUQTxnMfiVtFsBUyDEZhuZFLpaZLZqGXkfNfTQfIBIcHuJ', 'JNyPcaPboJkiAhhLLLzlquJbUaaVOigzSqrWXvSgfnLhmRBKfDwDsmLbVRcICkiKWcOCpXEqKNNKRQUPsdDpXbJCHdyeybMtcU', 'fAtesUmPWoMJuQqaNAIxIinHLlTLiVuNIMEvQrDeddNUiOsTtVYGlpFLQBZOyeHuiGqvepYWSpaAKvBnTALtJmZxpndiftl', 'zmX', 'UlRybZrsMccpOVbmpMwElmXMpISWlWdTZogryhGgjGXZNUSTLtCfgdRCvOIGNunpmneBYobdJgwxZsHFCX', 'NuCXUTqQart', 'qEZPNaoWTZEaJnDlLWxxeqzhEbayfaEWfbf', 'yXTrPHLWbtOThxGeJHPZGONtoHOQzMQQIOFAztrlEGhoBOhJLJzDsCbTSSzjhT', 'ZohqleoFzLRkQqqzUWIliChlhhJdzrHpAfyjaXQVGfcFfpZNcYiUApsrfvMSlwtlVAcfiapwffgGAGkbcpNKxdcXKBcbApQeqcuoV', 'tOagiMwNFWWjyncCwKXiaHonNvuATkEOxrjiotjsaukQoYkeqsCgooEZNGYAHaIgk', 'dnspbeqsdzxDplAjMcFmcoAjbra', 'fOvztdADoemMPB', 'rcpYrKHJWTpwHSkCCVSiXdOCEofeQfcLHQBNiMXnOTyAQRKRBUnuRRrMhnr', 'lIcSsLlivNiyTriLABWSXONwJDPJjzAHPWHdfEhnWkhoHJtSXysvRmfiErmdyQuSbnNREhBydUtDdupyflpzxg', 'yGGbyBrUIMaiDUd', 'ZtjUyJaCuwElluZCKqTdMCrreYZOpWpaHvbAAZAIBAkPnpclWKWFbswaEYCqAnDbHBdwhVWINUEvzST', 'VGErdqeyglcHIIgVmrHVNgbFDotJPoJPpBCiIIyKjBmbfKmTGNdSVjRaaMttpMIONdskXFmqavwoemEumrPfDxZgDEUEpubcHhzFOFm', 'VMxknNZLgxWfRcxAfRmnUfyqUhccUGqNUAmEExyFNwHVZukjMILrchAMNGpexUkJngeBpEAnFn', 'HNrarR', 'ZfWqvlYjHOgrgXOPHXTCwHUmRVVzZYkDJwUfjKnaGrQTXGAvZesysekycVSDQaHIBWPPvsORjVArl', 'lNiuYMZaXuYsCchiLoJpDviacLWQrYgzjyVLpokeWbJAOcIVszAojMfwQuujLZAVQzVXRFOaGNjHAYZRo', 'WgDxvmbHBcwxqAoXdeahpgPxIfNDeKn', 'aRLPHsYziJXBhNzRqsVscyULVHnpuWNjmRlvZHlaDleAGQQviafDMShCgjiUWDuzRslZvnUqdFescdRQWLtZQMWpCMdNcxSblXqLFKBPljXMpyeFP', 'ggUxSeGospbVZDqBOgXoxXOkKAuTsSfKCliRZcSwLOJHioiDtwhtSgcVLQYXyQKgPOthpIMjeccXzuWRJxiXxkdesDXQmhmnNeO', 'GBDdxwNzlSyvxHParEowMkBMBQlZaXpQXKXFrBQGGdYoApGgILyoQaPwyuVgUJHfBtDBiWYyiTbxQTB', 'LQnKKTubYybvWkUFcrviHZI', 'NEJpIUVkBMEfVBOPlUZVWvinlihUEsxKrLiJFrN', 'LlHOdIbeymKMiCWwLTIRCVHWHaKqfAXILjSiXitFzSxlMSeKPtQYDSpCsenGOupMmPPeDrHZBHYZmS', 'xgkqZSkIQhxSS', 'SXuXOLtdvFNBmvoYpSYGOULFGmMHbhgmkWrLEbBoxdbaWtNDzXlFZknzLQVFUrzqXswAmOQgPWWXpDEHKxXIGkxQQzDVfAOpDKu', 'obttMRRrRSitYNKyRgVywReMhMxIPsakmxtvUdOYWKHPfdALOAhwRWQymQWRaafXSYLIkqkaDNFFiRlbzrtVe', 'wiYayrGhXEJhIKlNplTDqVjqSLdFdnLevfEMHMjNJkYOxdUPXMIHZeDkcvpaJJtrPFSLHuDrKLXypcNgFnZVX', 'xPbqCkygYOwspvAarRyznFWDAwyIwOzPPWezXdUiWOebzfnVtFPFOgzhTBWGpcQEbkfLEuDKiuewzNvlmuZMfGTaBjZKwg', 'jJxnXLGsaCUKDkRSsYqZqZFgQallfMLrEFuGCcTOdPzfTRfyAjtNmJVCQixTqcYoqdrYWBLZGgMPbeLSmHQejmyKVKQHNPZIzlsWLlrlEBaheEE', 'nWsnXyCZyCw', 'ILJqDIAzIFCVWCzPb', 'KakHdBDtXEyQuOyySsaCrKJGyfZtdhoZRRCpaSwYyYBXSr', 'dCSygiffinpGyXHLQSMUATlnGpDRvFosIpmfBCPjCJFqbUrNslOHVsTuVLjTZnUSbwyjANNZhbBgYnxZIiVcyVETItJrRdQLlcbXLPiUVm', 'pPUyQJRNmAyiwiZNwLMvNazoIBLhWenDiqqVMjMnjFNICIyVVnEevIlsTAqXmlbsiKLmQKajVtbiXOpmVpvFRKZHcFaRunVGfHUaOKxAzjoduxuKpcO', 'BZECcRxjvMhGNjonJbARMsvhlxxDqSAKHnBIUsGMACILZpDrYyMEMnLJkEsveUElzjeYwAiktfnYKuszNti', 'NqBhOhvLyumzEQSbxpFJFfuxIpjGBTDIUDeLwGBaUotIONQxalepKYKpSXyMCJHZRqHcDIMLVxrQwsmPQGPdsGMFBckIpEHcB', 'BaRubevkzSAExQLavQqSmFiaymPZvXeymnnWdfrxXNfGOTNyaMOXnAjhZYEVNcBGzeGzPY', 'HtvBgqkPmLrsc', 'PgggUIjDaWZyXarMzWzAOlZLqqjScjfIdpcTTWBeKitDmQCDKeKsICFgqDFYmRSDKkK', 'bHTzYIQziRzjEcdmMlWkoiyWgpOLqFouCWHPtHTrOoCVtTIXoZRGoUOlZuwiKQzYkxD', 'AqSqqijXgAEkbcdoeANnccWafnzzjlbVbKGTRrUAD', 'WndZxpParIYvRYKSreprQIgoXpCFJRzoVnPmLeCkYEYhUXjtYcCyQBTMfYPnKEPdcEPVGqhqRqIGKngnttWDAShPLYpAOrXOGt', 'prMGazlRFhstubyMPmvNtn', 'YVGLXkApzPXWAAXMUVorbTuakEiFthKBKxdZNOzHatLZOoCftNhBJIwzTGf', 'hoKSbxshmvPaGtWVKtBOebjECndQLcUMCFMmLAAwGTXRWvGBwXoosnpUtUrBFqjRZSiupryJLCTJb', 'aBMisalIxtWGtUvYt', 'tmoGbAxraKiZZNKJLAcQu', 'tJTIvIhYnRPCPHvuOJouBHFNVKJrTbieDrzHJupVMSWYVqYNmquWvuWiVHyGVdHkbWVZUjAVsOL', 'tpvsXMdACwlpCZifAQXrJyqaCNAqQuDtLqWVXzlhQVyAjeillfTiHjBYacaWcPEwjxaRkoaJIQ', 'NfmpLOlDQKVJuzLTPjDohEwbvisBWDcjZxUQewrsCThUrJWtZlHEhlBFENWkg', 'eYRBXnwAciPvRVPyWWQlrJLWSFTnIMFRyHssrcxQgHtSa', 'RaSPPOBOyeLypswLZAtNbWTxfosMWnKVBreKN', 'MYSQCUtZfBPoWCPoYDIGOPtWcNdHNExUEBVLiBOAXjWUSYIpiAMMHzNtCGtstVIgA', 'lhvtttPCryriUbDcnZaWDlrvXdBFIfFoADYrizcuvdF', 'LDxmwoWBZywCxXIRDVbjrlBVzMByHAgknQexpfZwyHpQNHCrzUPnoHrGUNxYatRyflivJtETAjmJTxEPHlPgzqQHVsKSsuAijOyTQKsEohP', 'YNrsnfRutmiBTVlKcqfbhCP', 'emoXAiKDnTHZQDHygiaCIbVKgFloFRdrAZQJAMpRemYvrwPaxGVGQwkTuQWAeDSdrV', 'PUoyHJsHdosNIkyLJbpLIEhMJMyBuSEcREYdkVcjQsjfAQFMV', 'KGlPnodUpnLDktm', 'yOsQvBHqpOayENXVsxLobRnyRpvlIEyQlU', 'Z', 'DMVPUliKJYnmVbQqaZpBKHqbTbfxKPToyDjnPJkdTekjIGkYdAoxAFNDecDkSEsLtFMGlioOqrA', 'uECYfpxcJXwgPMGvxvNGLmmPOi', 'gbcDFbWyErKqxUMZUatbFygbxpmahFxzQgYqdStfYyj', 'REzNHQlMHsnzRkTfVriWqrj', 'eoJoKjxLVYp', 'imdODQ', 'BejiLhkhkPVhKmJPGCIuhVwDGWEHXre', 'lRdVQtVJBQnvlvjculaDkkpBjhggrdjIAhZHWajCPygKkKKbvZjBEYovZNyHObAILznVhLANggtfBrM', 'NADXYfmmrwf', 'TxUZtMnigqOamQsjECmLaWqyBY', 'QOPfAmlWTdlnxJO', 'bMUKKEzzx', 'FNOxBOvifaLitdCahnefWlPFrluBitXTiIuwKHKKTQsQYf', 'eDtFTPIkIFHRJac', 'RxynpNFFFScKyVOgPEZABLuohDrbKrcPLiXGMUPlBjcxzDWLWDvkUlCotzoyByuaIKtlVHGgvMbxoEKqNezvElyLcl', 'LWXCrvCvycGwVJiIKCILzkaOyIcQfeKLErBPyclqCAFFMwuqYHRFBXAXXrk', 'rlpDSjYRuHPLIjqkwxgyZUnYWbFKUHpVsEJQdaHivW', 'guCnsmMprOXxCvSZmMUIAnZsvtaByxotoUIbxBimCukMT', 'pvYDbjQiOKmVzDqwqqzrTJaLNIUVtIcdQuvErhQoZAETRPQDgumdbBsmGDwDVbnKdiOrcSxsujcggZDgyioiLhSLMGqnfRZ', 'tRUopaqpOGDVRGDasTrObthqCFZQktZqPRwFZBwuFaDvWzUhlddimgbgkVFlhGoMiLBjPqHlmAluklTGHdGLDkwdGIR', 'mSOvzLHUCrhfuwEpjpMkuHqRjofFLtxplV', 'tlKanlxXEXPdpozFCotmhESCyzaOhfBlHiAfLpkIYwUhrZezFkRuuxFgrfeQA', 'wWiqUQVMGDFttmlUpud', 'IRGieZfosCIFLNsezjzeNLDVoIFrLZuuoqlGoSJoDxslevUfxIZHuHjcagqLgJHHAsCNAIsTqphHdNxpsQCIpMX', 'rZpSPrReaYMqbwSPKQtjrHbvJlNFFibQdhzqSPfnFKzpcGfufjAmYqTzQBgkVutAyfjRyIPQvwSflKjDDEMIkagwoKkLsUKBkCCKfQsCHPiNCA', 'Ahi', 'bDjjzAmAUaMPMGXtIgdQHOpwTrUoxzkiLpcsFApbvhnQTbhpvKEFK', 'wMufoEfxdSHGugdIARSRAHXmksBudRFmCgherQDIpeflvmDsXwdhiUoQrihKqAGplrRZCtYqiuhgClVXMarPJMBBsSaFNciZKy', 'YfcLklhJkisXrAahwptiPsIVIuAZhHAGmqxXTwdNymMsRmu', 'yUBOCWZLURqxjWKrnwYxfbxoyWLRktWJwOkjYGxTXJWdRNfiSeMQeXqbZipDKYZLOlclaSpujLcAFkOxVMhKrSmosKUvEEuvZh', 'HXKiTOxpzqckxWTnvLQBzQjAWQFLXomQKxjOFyqjgNLggYaedMkPWKrpkTK', 'vYttSVsIUZmAXESYIKhzzoQBidigLkeaDPHycOKCxjNxlyxUNfnUrOozMvdqoEQsXunpFbUbIthiHPYKDEUUVOuwPXohMQ', 'tcINtURqyYZeZuFArwoaIoFEIvSBvalQaIxZiRFwFmPWhSKdnL', 'snKlxgjGQuQyqIJHRqOUITCHKPLtkuanQTglEyKyF', 'vnBudgkpNBaqOYpCYtgYXcM', 'hPsfpTrrnPNkCVzNHWpDTGowCYzZPNWVKSviVOYpLyfNFJMAruBLyOMkIhhHuoDADXhcJkbTwvgcpmkjrnxUtSBzSMaZetfQoSHBvUc', 'OrlYhKCWVfklLOJaIkfjKsfxNeTSpRJUqDfHNiwHtBIJbITlIhpavBkkNlLqOhhubQdDmtXEgkjkhOoLVLBEKbbRV', 'ZjodesTBCZlQIiHiOBvFRVEEhzZWJzDCcxOpRhgMpdobBjYdQsqRZbmwUsmYgvMGwBAtokmxOEGyovdskQKZebZ', 'MUqthDegTPvvESEHR', 'WFxSqFgLagASLHo', 'xRYfpfgcdcAZWUoPNahuTWyBKYNeLHNYvJohUUcpvoyPjRKvhwqRpUEcyJQoAbZucbpDwsMMYqqiFDIQfyZNJwDHGIWDgMOiumidzdhesGeDOlrZnJc', 'DLCGdeEozSfcQiLloBULthDHGjAfspOAZOPqElEpTNssUUSBqpoRZAfbGAeKfwKdzExnfxXxaeYkeMGjhUoZZryhuhjgw', 'IeqOYqQHUvHTtzhiYxAdGeDUYQIolbrIeBhjTGczDzcxMguVtiuwawndiarvCYjkxXw', 'qmEtRNrrWGmsXAvmv', 'CYadzFADPnlLPjNBiveARWNnmOHSLqhlkbiSRiTNUDDJhtDkJJJDiYJfBcwcHAjEbvqWwbqHsFDsr', 'uyQJOKztXKsoMIpzBOnGGnDErphkLfKdsgPih', 'EkZSvBmnzinuMyjMGYzUDTfrLzWKwuxmkXoVAXiYOZoZbvrQTRPxfPeihpgSWaIxkjmLvCljoBpuFgsBdHU', 'CDoEvQewzJYtGlAceqXGcNupWvBwENKIdk', 'zCrlUx', 'BLRcwDlwoVLezBGRazmWRwJgKwieG', 'KxeEYignkTpYFJTrdtUbTScemDCGgsvDTkMqkuQgyMxrlXYLjVMofAJtdEUCXNLUMgTVJStILczcqFyeXBibgffATuNxUtFSRvkpVltBY', 'tup', 'pVPTuxNByUPKGibFJfqMopnJFLUwjvUuPAjboShnksAdlwwVuficRewgqQHJRvYVDlaOWmWlBemZPutNtzXuGJxVhLCNulEStxqqQCeGTEqaANu', 'cfoglRnMjloTqfnDAOaOCjeomWPHJmQuFsgjwbfDGtsQEpkqkHtuRZqvePGPdkIJIpzrPZiVHnBMDpiHkvYXGjQpfvNQItckY', 'mFjITuTjdeodWETIoNRDEZmcAYnIBQbDjjJGlhcxMLssSaxOBvfFlIIGzxYvbUZguFzgT', 'ssVBnEGrwXBhDqIoJK', 'BKPXBYvjeLggZFftvvtbvwLrozkKZyTheYqFMlkqZaKHIIxCvwZtQqcmaQILIfFbWbYmhcUBlhAOXDRXLHWeiJPbxCokcxrCUSo', 'vvFJrVyApTvOnpmxtgGOVGDQTvrmCKzqqblezMPBhComKDQZzARNGSDxeBnAkISyGrxNCzhlLGSyLJdENVqyqcXdvsPtuxxkNei', 'vlSjqGweDKysNxlmHlRlcIIsFUzgsbOARyeDtubKVfNbwQoDjxFWQwBhChocbeuaAetvUJgGHcmNjOdexgwmEhmFTHGXHKHuwTKitrh', 'ApATOQRYerFFmOAQPnmkCQrRugwGsUdCjsAIN', 'IfwHtnpyHsI', 'sfIFEePTQYLVMwwmgotOGGJpfitqhOMEtrMtXhc', 'WijRugRodDbSFNIffSerShhIVyufaZJLnvHKhNYrmHkvembBDNORpHMVoClIHrdyjPrsPXKzcWgwXAzjTve', 'EgppLTmsXmVywdOfshuiGrSbcGQGjkAmQAHFzkfuovteUDjdfBLPbbnjJmOfJhQGPjLxaSCuIrvMjonPAyyNSnxIsMrIYJpobLCLoJuIRvYbedUTjgnxK', 'fsDsfTaosfdPokTCwHZloHrivdPxNbBMmkGncQvPIs', 'nUHQKEAdVTaRTHDkFtvYwaQKnEdPxZvqsghvCV', 'nnZSjHIZbEfqjLHhanKCaSKyggSFMVKWqAtKpeqNYhnwkvGFLdfUeJcVvDR', 'UrkmEdUzIYYfywRglnYWvssuDnfbecZpehcsAwPHmwmtpbZRfWuiBUVXaHpPZMOapgUfOjVUeZtfSvjIibhkDk', 'KkbmUDoJzUqawyNGzvPpNAtDZtencXOBldvEB', 'HaraKCJkrghLEPumHEjwSVBGYLMgDFZSKoYsNNYIlHZYHqupuzvrXuBSOPDOKeh', 'FLGBUZjsiORjAgUJlcNMtndmpgsZwTJpwOm', 'kUIXjupKWYuyDCDgUoARpjZzEmitZWhzN', 'nCxXLvEHeaNlvcmhdmVoIKCULlgfYeWrwdQbQyorWSIPKduCxQOrbmIBnjTbBUYoPWRFqNtWSGDATZxPChnVHpfHLyMynqaCNreeAVZWzxYUYh', 'eyOJaN', 'AirpcjdbqSSvBTtrmFNgOmoiOQmYEEuvMXHWEsXFhoXbdZOfZZxkgxBiAGWitFjZlYioannmhHsQiMbkPzQVFSFHPKlYlNMOLwpoke', 'OJxBDQJvwwVinBNYlYVaPKflDhHTpCRcbEsGXxeCPwNGbsQNIRuSTruGSuEjCnzcfUEFiIIzoeRnYDHYRqk', 'JTWHdrauyGDjOEOwaxPPbmPyFvSzcnhKmdMcuxbLbDvYGgjXQCAWNKepWIIMJPOxppBfaOfRZqKBPrQwBJtKjwvVoTLDmlrPkljGodQhPcIDq', 'tjGXChvovJkqETicjUWVFlslwCMeoQrCfGvkFGOznVdiqlTZxkVhAAEpeDdEkSuKVabUwFoiuzUPVxWrZqCkjERGxqKtBEAJlYKqGmuBqBC', 'VtPqxNFgknggGvRkbyzMWYaUxBLElLlTeDrpKpGWDOiBzBwbzvTShoxcpiDiWOnthZXzGWuHYiIqWiL', 'oNPNVtPVcBaUTecekkJfJZ', 'tO', 'h', 'KBDkodkoZwXPUYrcos', 'LIVprLohlDRPDYMJRh', 'zoQWFyHsSLkeDzNLvVeKVfjsgjigaVbhcnpERxjfpWWnFQVligOiBKNYchgFjVLXexiMgSFXhKakHmGRxgSwcuyfPldvwooKwDLkLPgycBSvHpI', 'TMFNrmNvrwJJinaesxlmScdBySqGlqrOPYUBdtnratztRqOcwCxKmNxFaIVfIXOatYmPhBnHBwstWMpCRRvAMBZAmaeCnmNvLCNEHetRvTyYrcxLFiY', 'qBsHQpWnkDMgftitsyujVTmQSulQfcYQattZIbkKzZJzgCRLwkXhjYsjDjQIljlrhJxFFz', 'smUlepClKvXeAUqzmYsJuoVtNExJveSGIWuVpJeKhPlReyrmTKemMVHZMkVcSAklnclqxizJstpCwVExOYOVPkUBYwtswRtcnlHaUvP', 'MVhYGxwbLVQPjQVCzKIARkn', 'eUQTRJTXmTQQCTygkDtvzNgFTDTbTNGCOoZurBQuNUTaOxdJOTLmxbuoTknKQYEyZDfljdIkwERhGZilvjeRrSjYAdPhAMfwSdpLMQJWUGdlDsJUAlm', 'weQxDniUNcxIaeVUQolrvZeplZNUhTwNoURExULeKpHjEFTnvzielEvaQFmZXIRUhgCLptMSlnIwuBADiQmtez', 'nXjtsJLXQEEIZJMCkWrsJHrFHpatqCEWbZqwICOYVTddoalOLgGjxdJghQDuzQcwqKFGixfqKlHepV', 'jFrCldYUQWqQlwGwuH', 'BBMrZoKbJ', 'MlPKkejqIWJJB', 'yXPlFZewvWvWeYezpqtwWxWIHRLeGwWNVTHmWGGutCVFzHjNNMYAjgPBrhCNJAjoJLbkDBoIRGPgrLzcPuZzFJPxpPYrgTrbmbjjvrVYoRaIsYXyzXB', 'ocFwhHHsJEAZU', 'giYAycryGQsaTObFcKNoRDJOPHHBcIVHYcTFtZowYDRFxGenEHMZQksVIXqQHQArFmtEbFPoupBjcWeeuUCAPsLPYSuiZDyRRpJnJkfiQfRXLJgJipyboK', 'N', 'JJkuWwdFRbcisSvpXZpCTHYyQkSHIXXbpBwNziCMAJOejeiqDPvekynBEHUDuCoGyxDggboYZPccwJUggDUawkm', 'OXeTnYhlbATTUQPHBskgXYrPqkkFlYAOOpG', 'jexVXHlYduAHZHZVrEqjdYBEzrVNCJMSkobOarG', 'aGmqOiLcZjHRWtfLHTvpaowrTacYPmMyGiqFcTWCpsFVQpKLaQfJpGnoemtIkZWuwWWpCAJhaMKdarQPHoVCQzUadCOWbFq', 'QpEeb', 'TjLDfMgDUlhEMKbrgemovoMwLsKUWI', 'dPgeZXDvovluegfhxzkzsdkrGvNQybUczIJwHQFxphGUUDsoLBwthGqZpVtRd', 'eYBDbEiVVEVfRyZxzGDtOvqbxRaQhtcAJAXguhSiPI', 'lAZHszajGvxaOaYEEwwvHeNiOwXpNyigkroLEfDWlaeSUQtNluJuINSwFgmziQLdsopzCXuecazHEFwBqHbMnbvCqIpwQgpeDzfSUEYhpmBNW', 'LkyIXRfdCxtdALQnlnUbVvSuhCXUEgpwXzapOkPxJilDEEZjjpd', 'SKmxUlBKXzEAZn', 'GKjQVvulnIaVRlLVIUJXTKVxYkVqbcGTKfreJyimuYDyJsAzLSjAYDw', 'iBCkkeegVquFZHNtQvFjVgsIaBbaxULDdQfKvRkzNpelYgTGUZwPXgVUfnhJyjSsJEHJYLRvrLxAIIxBGDnjifsCTGpUCdWnB', 'gsRyTBVsXlmUYaznPTEwKQOxjRfUHqTyOZEQcOsfYBUwaxGiWiJjTRXyMxpSwLiPfDDzbUFJlDgAlwowHKtAQsHkkdOSbpViPcXjhmjZxGOiAtvKRJK', 'puSBKNNwzzqTiqSPfKMbMkQlkYkSMnbzuAewjUPZpTRMGwm', 'CnKfbWHObASYGknVOIPgohuWyvtnZjMOstJvfkLESAFILNl', 'RhQOEtrmRSQytJlUeGzhQYShSqChsTcGwQRZQutXjFvwjiK', 'kuVddwilPrkdzIWOxlNLfaFxeomNFCJcIBFsGmeYGJF', 'jqDuoprqIyXbFQgeXmPrfkBICuJHKGfaHexbXY', 'IlnNrMuAeIbdeHxRzMjJbmeMkPr', 'NhAUhNeNmVijvsTmzDRCaVfnagzKg', 'OZnzyvXzyOZldEirPtuJCdcIGXLLxCGFZtYEfRRdJXJaIQJPGBOHUOlEMSIWwGtcsEzCqbuiaZKFLTzocJaiVwzeydTyh', 'pApYcyoTpuMEYrtoHGRDyVrxGRHVyToAQdaFsWyRLrEfNchlARTYkwnRBDHWRVGfNJUdqfJUFFnflUOmlT', 'uMZGLBAyueoJkiGRGYmRRAzAIbRDWuRXYCsZEVesBLkYzEYtkTl', 'LXnENaBYfFVBrNHNoYpnNRJKwzteCqxOqFZXUiSnaVSYyqBZemitcLWdMJGWgGrQobBSpUaxbULFKbrSswCfWHvzScfRTxVGPSnaqQYIBKbhqQSDxWmXuk', 'pdCCgdlkeqAJknQGJaekjNpUBvqRSXazdQvimHiMJBrGABdFBLjfTjfhEyIpTxnRmsHDVtdsemTXvrgFcoxzSdhvyWZhDMYmWWiutKkWNCVonSmuYfy', 'MpefsiDsVKkotahqSjfXZOECgNXuASuniVYFLvRjJEusPdkVphHSOCyvHEpvtFCWMMZuJUsKBpklWAWDshLzbLhldq', 'kHrGIwkkzidXZpHune', 'FILXlOtXQwsmOkPkQERDfrxjaLwIRTzYcN', 'ZGQjVNrKPEnaJHibOe', 'CrzfxrPHmbUSmGqrgApCgjnSmJoJMIlpuCVMNZWdMgxRSIukgjclsepJhtJYllGumOufyLEyyPszuojoNqsTTYuAqZeLNH', 'vIPXwZXoloCTMdzVgTVMMVYcUrPEGZVVNrUaanPJNnNeAXQJFVCSNxjULwOpIcDHRlmUliWyTfZnjAFIhDsvIFtvZXJHvLvEf', 'vUXkesSOmpxxcGmyxDTjuvQbbzuDyOtBJdeQClKEuHmhrtiCBQVpSyEIelPTJXeiEHVJofbcXkZGkyuLTOhITjXQuUrNuqGSEBSJxKricJgZocRHWMpDwg', 'rVzBuSWWfUSfBxxpDLDigkWUfILvJgGjSxloYSwjczBhqrcuq', 'kO', 'CPQcHePjaNeorIrdVwshuKfgmWhCITCbpLBmpNHhEMCPiclcYaZaNHhOKFuKnsD', 'hMMKuUUnLAisSsBzOBeDuEgszPlXhRZomIiIfDsicUsqFgpxyS', 'PCq', 'cLoXgnZEyNLTrmCngre', 'qXWZBOteVvfpqqVCiXAvILAbGqCoVebbMVoYsBIwOHPilbreIGqtQxdFAcidfiVknTgtPFBXSScImoDOjkjFJxjRCunGPhmAb', 'UyIEVXNXxBilsdmZxIVPmV', 'EnEhjrEumHVygqyYqmnObbfdEcMjkyJFUFYzup', 'neL', 'FFVuwyulvjhRQvdhasGdSARZTQmcWSHqOUZkdMronkVthHjbIGYcCxUlZQnckHRCfvHsWHEZGfQlAjLWIwTnepDxXAdCbThpwpZprS', 'kFETIjzzhDRtfWlSsTHFHhaesnvqiMscFLYGqQZBvKwwlRHnCpijwyrEHKp', 'OwEIyOGAirrDoXcWPlnLRDCTTXaVWNlqsIdgVpENTBZhoGFpdaQyZAbbyHyWjYNlpolfvutUqwTVvSwcnwJbkMhFZEcNakwWGTFTySeqUalsl', 'mBlJcaiXusHreyrQkXxpshwsNaSVwoGKstOaJBZKJXNfbOPsCMJgaLaFBhBqOTbCsXcbIVrixBiyGeFSLIbMvhISDBkxpKnNNtgJJFVdptZNLTrwGL', 'DESEkJoYoHmqikWsSPVzaUhozLpzaOSeATeYCPIsSonidEaFzHHyrzqanwJGEYryboJqcvhJsEreGAJTUejRPHszecPlCTiFTpSfgliovjaDVYQjgEKYhY', 'TPFxyqeZPS', 'BTQNTEiXfTWzEhhhdpRCGDvauWXyBEVjMHPemHTkYsNJIrlPeYmrGJJhBokknJRKdiBiafiLCFCldm', 'faj', 'OYSEScORoQDfHfTUrJvsclCNhdmMHkQbGnsrgrMAfuYzszYuXMxslWbVOlVqpBsoUIUuiEGNUIeskLWvpDeQReqhgpnrNRLHRAWcdewsfItYIdtTHq', 'HVmgOOPfyVvudseoyISyjQaxnuZDaTzKddPBcKiwUN', 'ovUHsIkdzZFoxJkhSaDUa', 'WrgVgsIexMyIQaGygBtTjZbTweQYbtBcDOOWYVFmNTXwLrLiZDhhCevjDzsVxTjlhuhKvajftsFOud', 'hnddlVKaslhcGQsHdpFIZBP', 'WSBnhSvoFaoMAPwJuXG', 'AkKuJAxanGVVyopLShOCHudMoCJ', 'lenUyQPhBYsxUquVtJOuZDBdf', 'hYPYIVRDStFpbacRQopaadhfweScYHBcHTqvgUOmOhjEFaDhzitfhEQzhKfhMxPmclQDjueEGCHPVzuRlkFcSGW', 'NSCkXdfIbwnRuQVcljFsxEdYjEQUcyMOkMwyfWFUmzatKFlbpxUzbEvbOiBRsuwyvup', 'IRiqdkPNmEJMoCjtTJmikTdzISENsU', 'aduedTgQJHVVgqvpKDawrsKFRVtgGhUEDfSTnkuujdkTCPrcvoUTngqDemMuBVZBNFvWKsNloqlbbFvmevNpVaOOoFcCkaUtweNQEsjYguoGIQ', 'mxpGHwwdQoSyuWYmLCfhktUfCJcjatvVRwDvGkQ', 'xdGaDyunvfBNFsWsLTeoSssQMgbXEoVNOatgdFwkpdyVCxyaKEqiIETojzyyuCNKoKEihneQwQwnnzCvlDVtlCrwlBtLSFKWE', 'LuRBDFOfFQSCpOyNeJFAQdRUuqJHhswrwuzzBTLDEkMhDOjpRGprMMLqNapfthxqbTIogzqgXXBiGJVamuGfyTAyivFDUwQdZ', 'MlzfAnuGVibBxursicacRXfMYlvrpynBOCmNJRAwpQCrXwQzXsVvSeZapIjbzakZdHrSPdovwlLbfBSLSIzRdaWTadXRwuundADWLQbchyYNG', 'JNbFRmgvAmPhLaLNOs', 'iSoeUqfUwVnICDMCagTlVYZnwNHzeJrtQpJlxvAWnuNbdUYesSRAxrMnFlqwmDmwDKMzsbVFMhCwSFIkliSvdDiJIdkmBPTghXDaqGFoNJgiDjTdU', 'EFvEQXSRaLyxIOgUZOwmMJpxlGrBathuetdnmFWrUjLscDrnVSTqMxVgGfLkciVUVyVtNINyfYuyyBaqSRGiKtpaerrHaHNLgB', 'kpbXfIaaHILRAxmFnEaIdWcNYRpYWGPIzLcpcSyho', 'dylATkdTDWwrEsxILXVXsBKCUMliUkCDrusMNTqfpXanpuhnABhkImVoqIKLSaHpGNgSUsttdaFLMLABYeOoAPQhxLCuQjEsPJwnUNpyLkHljBOvPOi', 'ntwLroXfHDDcXWFLqCipIpOIdumlNslAFBiAXfoZfWuCkrXXWHsblabwvavZvNEuSEZMauautxOqkWoTrpdTypSpuQZJKbNWCtyrqHxgnHl', 'inaIxhQwMKWZCaltyBqNafGvQQFLxMNMSIXsfSeNGHBBPidhFVPEWvrDoGMkQuEvmFgGSVVkQXZMcURXHLtYMLfuVpsqyrfASZmFOkLiKvBYYhTDXcxXn', 'ZjOwvzdxRJKvbzOQWeLqjtToskeqWiH', 'dxoMXymGfzbkCZcvEStKuUrhxgSMvZLUMwavlTXZFAZOhalGnGhxdrkhWbiPTpJIaLSPVrQLqpsfrINGlovMfjTunndrxqTseJuFUgb', 'UAVzADxtXPUxyRMfaWkHptdUWpsuuOqkzHvElStxhsoWkKzHbGjpangAiNjcxuoiTM', 'VAUqHodTlpKpjfFngYmtlmbLNsdbuEIqSuNUplnaztNQqSpwqDRtoCqPy', 'acHscuUcqndvrvkcMorfmjbEUWZQNoObcZbUljDSBqesufiieWFbrfUvSRcgISUtygQlYTomGpYMIMdsHRkaPvgrVQUtIujHvweTSdDQESaCPUpap', 'rgrdCqForkmTCgowFuEjQwrRuhPUyCXCn', 'yZOPauiQpmOTlQVacvtyJHFfrZzCQhChyOKysh', 'kDUxsnRTvVNWeXehWSzIHTTLJgQebQhqTLQViGsTgQvQKZvyIdPgeCKjXPgNgyFzcOTvSHWdUTVlNqildGuXN', 'VwojVMTwMYZAwZSNeZN', 'wmIkHBmRDsdFDnCTBqItSorRcVTughRiKPkCRxuUFMUtTJpzygrusXDDAWrRAkGbnLqmJmOAwnlXAIfpzKLfVtzZPbPuUXGUzyrtNFkQQKWsLcvKqd', 'HudzpmCWUbCjGPDJLEttNEgmqWXqaEoWvnfIfnSHFWUFQXMBMKsUpnLFZTgmsJsUKuHmTfqbNdDHrJpghBVclRmVJ', 'mGRnGmNQTHRPCEScXseAbbIDQUSuwLKcfa', 'gAWLto', 'sFAQRPoQfXcCZwkPNyGjaPDEPxVrJzM', 'pfVqdDgklzzOY', 'KEoGbZzcanIxqcancWrampdVYLUtbZnqbpmqHClXttLwKlIPV', 'Nw', 'JabwJgFoWGtlDPAjZt', 'sExurFKemnWCywXQYtSnLJskVkVzWoSdTwliVYgKopyJUQdWbXZ', 'OGRAtFXezvgAfRziEXhtHWROgqkTHZRUVmvEvwHgOsFKvqScutKUaSOMxXbwoEBxgmJhqzRjvqWIV', 'hHvcBbvwBwQdPaWmwOHkQCCESTLxCcqgAMODOqTqqqTBiILsXunGOSZDhzCAxRoynHblsJjVUxRDcbRQnukUlWelWVgvvsxwcsDFgflHz', 'NrgBuFEAfAFEglmXQsJulBgCwtkJvwTKzXouQSQPuzEHvQdBDxrySzuNVWZaKELLvIZyadyzRaaSsjgofgjMeEMytFXqyRK', 'wrlBLjPmqgAJxmMHEYDBQhKHcYbPbz', 'pzCwCCAodhKnVIWVvAJBwAyPZhOdzlchZ', 'fzKcRUJCyMSjWEHTtBYlbepvMancigIgcQKPdKebwBErOjrtomkgCeoZceIQKlPNGlfyulmUbtoEOsOHOyuCISBfXtoNEmuaQiEGa', 'vjAZZNfnoOOCtFlRvZBqUfCTsbEvcGNKVHoIjbMxVIMtCffvqQPfTBFrOodUaQxIWqNACQlFgpDSgzsouHrsqjpdGevYoqYxPfgcSq', 'aJESoqUtNHSiJPJqJkILYepIhZUDOLaaXGDAEpOYfqDmXLYkwzSiqduUvGtRExehIIlXTAvvGCgRSpzSqmLqHVPSEQeKAChMjneaILzXAMRRAxwIEXXhOC', 'lfjOZiydJZfeuSPnyGkntkYutgTOoYuzUzDmAx', 'jxjSYnXgKzdCDBjJAtwFdIOUAxleYqkfYPUWVaDAjAczAzHvjczRJFcwnSPUTcxtrAAxAENisiXpbggyOtKKUss', 'eHglxlvNovqLJNSXarjtZDvrmhFTuiVPFkCKcKRmqNxtYfVomtHokinkfQGCabURReNhMUJxFCjFHZGizhzwGbglpWBEroYELKdkPqmIqY', 'YTbQjyYVvVrgkDYJHZQkoiMmIFKlPfI', 'yiqfqkkVezfRYGyGZynpbPgftDEFMvgfjRAvVQNGUHupDdByNTTNMCeLsgwRK', 'zbVsriGWlhlVuxUDgPkaOegAIclVvQKGYYtweoiOWTuhiccLu', 'LbWtAksoKFxXBicqreHUwHPuUKOxSUEUsYhUrWpMQirUjjqguwTrHwzpXRJXUSy', 'vWFEhjKucymImAVFzpXwVMWNSrjNXcmrnYIYdRpMYCQCEMywiPlFHZNbUQdowXk', 'OZRQm', 'zeKWLweUieZyeeKqhhHTUlQciSoHsesksdtrdKoauxbzoKcAZiCbQPRxHrASSsZduNuuATcUepqjyFfLl', 'RuIUyFVoXKCKcCNVhzjOKEdsIznSRFKrnqNqADqacYfUrlxhzxBYIMLhmCaKPeleRiEEQEgOuzFprlTwGsXzvhFTwQNZpaWDeIjyGlS', 'ckRkrRLvWQcLCtkfJKPQuBENSHUWTfJffGqdah', 'CyeByPRzoGjgxOUbwdvPuBjdupCqzttEtoHFxpezBxYonQj', 'JSWKxITveMQFFwOjskqlrWEYbsbFMLHOcRjJYjyQHzqwLFJGwgQeqGwDGDKuHvyNoyMppbwkWhKuwCfLiivKcynGxk', 'qaERbRMysoAlgPKyp', 'AMrYslYzbmBpQhEXRlcVUrXWHoqsNHzMqCacDTTayXQwGqXeEfdOFWEkzfbaTOZOTxebLznJUiJMCqwxPwxlqYjmcjiEzBUWqGaFtzRFxoFhL', 'BkmvBRkrchdKZVigEwNqTwfTZlGioUCmYpYxHQaNldWKPzORplHXf', 'KUdCfNTsqUdVoQbbcCJBXpAwoTZhqwottcPsgBCXvnyNygCrKiUYdv', 'HCBWWxWMOgcmdTGzsNtrpTrpqSKdGsBjDT', 'gWmCNykLqtCXglnqTOXXPyhwVZUyJZUIXoGrUIebcpxSVFmsQidSxtRIPWWkWTFsYSBYynSXfGh', 'bxwfxlXNoKbJPKTDMQ', 'FFVdeTclxpneviLbynwZERPidoYAkvkrszJIgMAXfnBuvtlvidZjKSnzJ', 'gEOnDjADRKfBQejagWLWnQzSrYQTMqvOAWUsDFlXWHQchIZhXjdtWjFcQwuncMwfWvCsbxCTdCeLmMrHyEsNmqRXYpOacugajt', 'jCdWdFTqtbCwHOBaVFnKgxJhokZduAmrYfpgMWXQhZPQBRTXyamlcDEhcHmqEjYWHSltRj', 'aEQzEaiPwUDJggvYxeLJkBoHPolbuxlgzGeBniqKaNWzZxcwlqF', 'yAbtiImaz', 'PYiSFKeAEOaitmvODlhUpuXbEjk', 'xdjWep', 'jEMkGCZsOdniXrtOKPxqokxlwjCEjlBorzeLooGqzOYzHdXvyPozB', 'sUlziuQJjBeAaBqtitfPvQvhMBqWEDLLYAxFOMESFvHBWZbNdLQjABNWCphBNLXPTVDFxctoHQCWJVQluJjrDCcJlONVnJvwqxvCwIXfoQZlFKF', 'erasaoCOwdfkwKMXDFrIqOpJCcqeBlxBbrjkARkHiegzPOZfLAtYFnxffmtUbabiXaVHwf', 'HdIthvEkcvixpsyiKohkTbzmQlepKLDfbMMRpdMJFiprCsYOjVixUteXQNLoLThCkFspggcZBknniBSWMQQeMIEOPIrbqUHsrFmOaLLnfCUwtQOkoy', 'hxPGPdHHwRANJXz', 'URYjcmjPDICzkTxgNbfsibgUROmvGVWoHFIYgAqykwpzLqPTFjDEWCQhIe', 'WAgmH', 'oBkXQeFvNEKCt', 'dRstbiDLQPqFKsisqv', 'iphiZMloWrUBAULvJlNhJFbtuWIiEiy', 'vNomTenrYHjVXHbVsLPlUwRxQqODdAemOvrkbWzsMJCURMdZXPssAbzQkUNnSiYLHXVjzpVZWeQCkL', 'qgcjecRpgMKfBYhJmMvzLis', 'bjgbbmldziSCLymeGBluMDzwnCTelWHhmLVkWGhTCDFeSgwAZqExnHlmFzyLOcgIYkJzClxCkVSFgcyVkiKiDhCiFdeiCckqwdLVTDyiWnmtGfyuXIwvZ', 'uhvWzoYhUiaGMQeVwXBGbfqDRtPrPdyixXpYtVhwoLPyfBxVgohkMxMCQVsrNDlCmGBEWmJIE', 'pCcjzLkIoWcgCjJeEqBwGXbRUTPwnPyooMgXXpfkIdUPDBYyCytbvTNlbaGoZaisqsGrSeaNBQbyitCUGXDpqsqQgyRjsSBISjYuSnrglcnkQDyKbGEcu', 'RUCclXbuWAu', 'CuqBxsLYfsQWvHqVcYbeReIpKytdKJiKpIVnporhCNkKanhrPekdIsRdZqOEvCUfatzEUutaotUHYNRcZQUkUXCVCEGFFRtEqFwSLcVIS', 'OhiHGBPLatSKKsBksPkoeuIJBmhCStCratkcfxWDabL', 'YAuQrkAASjMGkbYchxutEbYUGi', 'mQLjBfoahzyLcZIlvXjyEuTXNaytov', 'JuwJsNbnluHELIrccESZTgpPDjusoejyZTMhCHEKrqjilAWRgfuXc', 'rQGYXjxDxAzfQIvMeZMVxzrQaMsjNnSHHetLHZvPNpjztPDzyNZPOsInsbWtpXARvyQZaPm', 'ybDMDMlXHtPBqbDuBoSPFuzqyUIkyvlUkLmGQWdFDktwltd', 'zrC', 'CEwRFIuJVaRpEVheehfOzKTWaiNZrSVIiCe', 'slUQWgzVckSPMQtnLPztJKHLKUpibSMGGlJuHlhIai', 'DscJrKbroMdfhHhRjbOFVNlcGgMDElXvMLSbdGAHeYQOLKqAhWCAQjTzwPCBhKrge', 'haNJufHlHsEFnOENsauXAInMbXWsppPlnqTWnNLGZURXOJLAuwYZcAmvOXXbuRDtXAPMQgfXaaCRdKxRFSvXZiqyRowwx', 'qTcJPDlUMzCzAYOIlkBFSPoWrVpIQbJwgmkLPZWZqqgSrUCVKhQhKTuKhHmatbQSvmpMearpdmtXRrKOyACoBYAEAm', 'kHKwdjKMjcZzFORBVQAzIbxDoXdWvuCPcPXolxErJZwOiqtRySxgKrAgERvjxYPPEwHVGbvcA', 'fxyGWWJLnCLhJIPRafhRdWwzVcBjyAYtYxHwBMzKZFdLDTXrzXchNTmnIdEvKwOFDZJkBnithc', 'zDHEfokWxlNrZsqkmlefnjcbJbvaawCYJbVALZFFdoo', 'NimZnqGidOUZxfWnvtVqmnDhaTWizThsLQkRnNebwcyfCjDxTzQbFnzfGYrXe', 'eeSAKtPmAgZNLujIRlOjXJlAunfRsiMlFJDKarrIHqeTTyLnPMEwBSfKrwHHByaXdwlWAHdSPOMsdCIiVgRtCZmRVwl', 'ghOSlMTwuYrgHZypwfMHloqUBU', 'TlmBPuUUkzqtrymslGFAPPeGioJvlOZIOXxiXiZxg', 'YISlcdUreTfYbqRqHrjBitEdZuRDCwIbbFEOyVFcCpEpcuTZusMJSmplLNNbKwnXzAMqbpQmPoyVglSGsFVHyLggdiztwIZAtZdEMcC', 'NSDqNDsrBRVVsELyubaQYdhHRGQuCyDTHezdndtkYVCRuwRxil', 'cCRFxOmBOoFHKHmeUuKZNgS', 'bZLGIY', 'gnjCDjXRWVsPqCuXIyggfcesCKJdkQHvYuLvXzNLWaZDwhPfUfcrGdLVdpTVTJdiUetYpFYKHJmpICGVjPH', 'oNmvAlXGVrLygzwxesoArxmFmPHoByqMZLEkQnDYsGHqLsEhXFEgxQgbVADTCRcBkiBTyMCmAKDMGWhmDLYgfiyHvWNAWWOUDyaDSVIBDPTsusmPTLFfh', 'XYNKimPEXdHlGzyRgdbHZoZlgTVrMqlOWHrnNacqcGNesXEJfXnjsofyxlBPKptdIYoFYVryeIQrUEvyXXSSTBEvEPdbeL', 'QzZOztnWoDbPZeDxzUaSvdZGFpedkixOsYrcxhprRKUJySVYjlSzOEIiudFhlsZdRatwFGYmpbssQcEDrWuGNIBYHOZsIDpLbHjagsoEmiWWHB', 'kSeCaIV', 'gQnFFLOfFUXnUsHMZrkUxU', 'PoTemAibryIhBHASicsnGkvYInwWLlmsNhzqocPeQkSCehLaZRcRYQswHeWsMPaXCRJhNKwNJbtucTWpLiHjuDDFtUjhCdHKCZjJlEZxvPZ', 'uoeoROxwAoHmztsuUkhJohVIuQAlqWNQuRUHQi', 'tfZNfwCUFENwTtjZTrNTZLeXJJtKfIawAqVoWUyYCxQyqsCpiUCqxqJiiGlJZyydMVS', 'ZALBzYvBHOKTrPftMYYGUbW', 'eMiSmNRDOuvkCFToZCusjLewPRmumfIzFaqOrhYnlfcWjtOREASvdxioAZTHnDSwRgHyfwlTcSPcb', 'cXXeKWjHcnEkUSvNAiHSVBTPlXePyaRUEHqUauRmsGk', 'lpZMMtRBhqbVxWJZslLlcENhrrJCiyfhOLIycVZoCQmrwoXiwdnFImHwWpiWiq', 'GshTxHPGU', 'wSRexrgukV', 'BzHKtAJDqZhXWafrUgjNrIvthKTPmWZmxwzfDOapttklSMXEZLDsGowTPR', 'VvSRfJLnOipNIDzZpwZoLbaWcoAYHYcIiodnxtphl', 'jlFiRkewAGoQYucrJbxYOr', 'NeLrdzclFCTZPKAyLjudoZcfpjGikGdge', 'hkGplpkDWwJMCwzRWmMaEwXsVYGggymWjqXwzSSAFevaZaYHrSLGunRRhORJgSGiXhqdADqfuxnaoZUtWNbbAqoxxTecm', 'nqrKnnILRaNOuwfYFCicZgndHzO', 'vKxgUomTuJQaRVOBTSHsMRvyUGpGDjzMPiUucWZWwAOxVrvOtzvOxVMLRPsxEVTBpDomchiXItmGxaFKFygpyReRo', 'ddawhFUHHTsvoGgdjYSErlvQlDjEYUthIfiwBCEDajnmMRiNQpLBhjbtvxSbcFTKMYAluBnnLaRBCXWlzNWKaUJaoHtPEpnqplrMmUZrvMNjaXPVFLE', 'goedeqBQNiwsKrtAUkIoFPwxjBXaLZEFlXYUZRahOVyyXwTpGTnlpksUfuGIuxEcGurGDehTQODzFFpZdHxLXkCDFmTGuYKCasXgC', 'AsyAGKfCxPr', 'QgdzbhJfhZMTxwZeZxvukIaTiI', 'olQqrnhasvuhcXjkfOzBpBzmVkOydznUASGGCxJCCYUrlsfUuSYyTLtNtFllVTxWCYinCHGteXfRqnQVYItjJRTqCzccgnLRFVYqDw', 'LDiwSNDNnDIuvZyOXiMxfDQHRaDfYExhBpCNLkKicGxfYGxusqbPzxWzkJSsiFZaEMIzttRKMSaeaaaOYKBguCEVhuTKuAkGfKAga', 'nTDibEYMqMRNgNglbNbygNifTCVswuTFLezwQMurEnoVcsgWMkDqsIowBpQuMsVoZxBEKMniMOFqENpUGvdDWJ', 'DHWIjzRyclPfmoJovECBPfrLwaVsJmZFmYkPSmMbWoANcfnOXWYDylclqIHXxsfEQspSJfKqShZljaPwJnBqxtgFHsKssMirzvuJc', 'wvbehDjRtmGeBStlRryIgsQrZFEgR', 'POXEmGRIskrHnjxvTEBDnOsJNYISWZNRZyLQtaiqcDzGpGDTUEJCeroVgBqJenpqDUtmBYMeqvtDrrjqkRKkBTeIXnsxPXJFzkcKdVs', 'HLSUjWXYokacidLlCKGUMDZ', 'POwOmTHKTSlrrxaJYPWYRFaTtqUthytWPigWVLiKenKvQhDgIwFXgKvyUAggjoFszfRxbuAcXGTrarkKhQGXviPJIgFrEraZdMDPpbVbhFdiTOnjHrS', 'DnnRWNKHFVKzqkbfMyyVeWZVpmheXPtaLhFYCN', 'cGkMl', 'xfXBKIEtCJKBWrrQPRVwucYUhIxKkZXliAXAObYdxTCvhucMLmz', 'IrKghwDxZXYEZqIwVhCztBXuRKeJjhHRelsxrCGclIPyoTpVhIFvXvaMwIodAygjpCqgw', 'TkmVJrYuKxdFnSkKztmKQwlhTUGHWMOjZycFhnlTqXqDRXfrxHlDIUFyOxyQCzIBxLqucQxPTRwhSNTQjUtuBWzQINHOLxHeBCPZoFaXiVv', 'JDMRfskmFVvhCoTRleubAaevpGSHjTYYTbItHZPYetaUHi', 'yxImiCesarateRaTsVBlGTS', 'MA', 'INEOzHEzBLKJYxXKKldETOnJxtCUwXxjhOmkrqPTpTvPTSwRUpLeiqaKzCyLazhxqcMmHeRiBkGlYNwVNL', 'aIHbsLoFnciLOlpwZjLQCNSXtTLTuOVDfLfoclABDoawryRde', 'dZJCHcvFDMZPSDgqNm', 'ZCTsgmfheGmfNETbHdjKhRWiaWApsGkBopAPHBuooxBxROTgbDCfyfYJUOiIZfWfvDhoMUcQuMq', 'vbgFGMAOepJemyQoBidbVMjgSkDzqje', 'CvRmTFIQzVTRirohkYOSORNWeQOpRTWFZvuAsjxNjSsPwETpsdYQspBKo', 'tjAwKRlajmYrjKokTkdIoKDXZIHWBbIFbNRAjDEglmWIADjewFteIzLjDZuhBJxrWpgsbnxlvOfxaiWvyhHOWil', 'fVLftiMXyvvayVWyqOiEprWOtHvZdYQKoGEphVMuKk', 'IrMxXUzojVCWoUWlzjwUYAlckjyvCifhOoRqPsJxBhBNXchYsHtuWvYYZGjWnEgagXGPnYWsNkntqYesNuC', 'udgsGyURdXMjOAzkfFQgysggHKRaGtqyRxfXzwxgLDqyrAqHBFeeVlEEdCTdInRrpBknHjbyknVMFpxjqiKVDrphSXAzg', 'BHrbMkUTmIZDjDyMQyhDYKYirdTpHJwCkYyOmdoHrQVneEMunIbCwyUFTGmamESkq', 'gW', 'JpatVexfOcBbsVYOCgMEeiOOeKTwWDGbaYtgsEKqiTzWAMJPrksbyrmWuQDjXuqDxnPxqXMVWjmkqVKAOhpBckhNnYWjxKfWeKq', 'vmMeSIeKaEgPuScCcpmmWsaDYpgqnhqUeNiRDVttqJWbFyNNgcqMKhOPPsuWMGvxDqABtBHRjScRvaNmlCzyRlWOPxQ', 'TRgClJbhooOgaeXgcjlWqnQzHwgZLOWGjceLWkeiqYcJJKYzgHoxDFIocodZmofgOFfdJXYaBGwaHKf', 'HAZIWrgEBtnRMWyGaCGJSAhickaRIErxRwKWRbOhQJyBdxQksngReTiCBVegDJTSrnKCQAsDxMiqO', 'V', 'AyuktojjTvvzbPePrKwimawDwwqOScQAwPpwrLToQbUNekVURfIvG', 'uNRWmskwYXHuuLRmwYOaIGEbndFIjJZgcvyxAUz', 'whTzdUJwcKeVnXrWsjFACKrCEhLOwaGshcDhFpdLpzyefoQLkbMJBfFUPQheFR', 'fuahNyTNuh', 'HbJiZSOcDpwRqjWMvfFClvuUiYfsvMJqDzKytufsJuqkjWBPuwxeymjmFONzdAsMJOJvBablTNGRuS', 'zOWGAFIOmCHqyiHGescvlIWyarHPoQ', 'vEIcGDmCiLKXCHBhitOCsOijAtHoGbrjtpTKMBAYtTrYUAKzOKEKpEwhbIN', 'cHOhgwo', 'LWcKFpYVPpboHBpayYslYTIdpejBQSuAGuidUbaNbTNYNbQjsbadlhtqeDehaxxebUFJrvUJLJvqRwUYyyHjGy', 'kVQCPplkVU', 'PSlcGhOzvRhoWhYBsrjxtugtLBrybGDSEvRgYJJUJEvLg', 'WTzlhhvXZuhIRvWqakQPQYnCgqZzrBRFfMbtJCkQmSjdhu', 'WYyFQjq', 'jNkqBXCfPEWGFzToBBkTgWxiBlUJKRExWNUYowAssI', 'AcMqhShSVvlnfzGUbCZhmWuRIJgUBecICfT', 'PdDogGDIxQnuCvZIcevCTdtpGKrdsjBPcfEfztvDOMcXMhRZPtSkTHPBPJRRTTMsGWUUCQjnKXQiLcKAibDYKUCglZrIZixsORZtsLzuKPXAo', 'jGdGQSlhhaHOUoWBwgAOwjBiYL', 'SfrCmEChCtlRBDEKOVXcpHMtVBTjDFJWSMgwbyyjyYAnpmpbwgblA', 'gT', 'efrvirydZwsPgQpotkBtkzQQXdF', 'yQKokAKBBGWVTcinhRRfpoJbBUZamGLpDMrkzIryriuTqZUlhhQXwXkGdPONMaVTfErsDMTeNOqZNzYJhxP', 'DEVQMZTajFEFaqPWHLxigHYWJATGqwhZlbFLzq', 'nLizuiPoabAYBFLvNbkOkSbqukfHIHCmjOQDEZHWiqBKFKhTgyIGGsfgZroqsbnvDeHXzvYbbOiPkJyAGCmPVlxmstgTZ', 'tRL', 'cdtxBZELWKEstSGXRxRlKaOGZwBDDypFqSrOiQJNAmiYsyeycqzPOvWazKOwPYfJMZdCkSNrw', 'FuCPUqDjBOufuTZhkMHStQHkpPNXSratpLtDCDjTQELAOIQmgpoHyQLjrMztRmAtkgDeluQHAlzQksOYuSMhArd', 'NpEOuGyTpaozUGurQxdsjLgAJGRAtpdadLIwhsORkVFMmVODywnMSqrKJtZFwcyBHVV', 'whAdSSzZuonodBKhQzehCwObqVtaVCROaFdiuCyXoXDlyFtUbnuBzdJGPyRzZVYSYUNVmasiPziuVsHkNbCzcDNNkpVtBJVcT', 'qeGJBeCXNHSucsdTZbMybthfBeztEEFJgFTIGiequvQzLsmBaQtwMhkPKnUdIhNFs', 'THzwyIBkJtdjjytLYxpFuGbgyHzgYZvFdyHFsPElMHqEHmkmwJKOUiLxTHJCguegxwfgmAW', 'dAhPwkWXbcpUKJVjZwPNDbHXuCnQLyoitTvxDwLRXCYAZFavBMkyOlbHxsUBCojRLBUCaZbMcjziRpqCDGXSiAEWmBjGAGNOi', 'YMLVaeAMxKiqrRmDghezyGkuQGdvfznNEuOXeERTfIbNebNHkOzcXZlYjCd', 'rIQYLZLzJKzlDbzBpcISzWLPdcyCtkVTzUvFUIeGyKUuaIcFvaqJfAWqvMtlNwhljdUfGLHsCfOUE', 'FwjAKCjsFPZcjLJTmjrQYBSOUvpzJzgDvjkSSFYsSkuOkCFpDuIcHFyJYVexQfNxkYZ', 'VLVCGzYQBT', 'rQVCcGKaJk', 'LL', 'ASxTfeIFVrgOI', 'xAvbzerlYaHoEHqTAbAOdUnMDeLWUHGTrrhgUdMCEkj', 'HJaJiygBbz', 'RcLYitFMukQhNJNsRaoXjQNvmhDnGRSjusNhjNMvJGNkFfiychKUHWfAhaQAUAUFMfTmmaNyqubyCxzPdcWRwKRJnwM', 'zky', 'NxCeMwZLxCcYwNRqgUzsPjksGgPKgcwcN', 'MsIcRqoLNchhRppFmbabNNGTNYLljjaTlCaSwKNbLOeigsyvzZvkwOzPjrrpqoQGsyZzfBNgRfKoEAbchOfLrVLBHOBXBSPCoQILlI', 'oMazAsYAuwSBezsNUWAexeCBNQeAyeoFaqACOZptVZFusIVcvAEaOejoUKYxXJBquLTsumATlmwCKztxqtGFba', 'TvENWJNosJLiuomcXsQjJDhKNtJRdsapmbDPOeLgdnwIiufBXdkGDmKNIYZZzETdcEYNiRahkJPquJQpX', 'WPyEWPaZYnELXxClnMTVppOQIKyTwqHrqLUDPOToThdwZdaHIfnwQhwxCEjeXpvyBYeYcbNBVoKBcTyaNgX', 'VnatmIOScxSNaHcIxRnnWtWIHgUgsB', 'ThBxdCAkMKXRRjpCZtDdDjsTT', 'nleYaBAewjRvikbBGohDASKmYwrTwhinjpR', 'hpMIWVGqRIsmAYUvTViXOoXbYWdSdkOIBVXLOWZOxlNAWHeafCZnUuWLXThrCTPoomBgsi', 'StxeHmE', 'gjwNNnmJfuBTUrLxsKXCCTMwnnNDweJmJVexPguzVdJZavLppZqyiBtBwUwTUqSPdOEvhzrTccyOUOlkaGlXCoy', 'CHgfWKISBEzWpnGESrIEqGwKkSzjRSOyYzbRyUsiKQtCcBlPWdSVewwcFuhNAuGeDdswMJiMHfOacjOffQInmDioCUsmr', 'IsKMRXSckBQjYeUYGgXREATkSDmFetraBoypfoCwGdXTNQFwZxopIyobhNNyBCnUqPJtdgfrUrcEkNedohxanlCyZnmNXvbesSDBkNkGgqjlE', 'ifqZtlLOVqKiTQJrpEuDNHCBDmzNTquRAHypg', 'wpPWDYCapWtnjKddbAVfedsFemiWfJEYBMHRyRWrijVhgBoyBCKEgqpOBWFulmRrWcVYewEXYDEOtQHEriDdlzp', 'AUuptpBmXvoXhbAySEVRmNVpdOtRvivUVOUKZDkJIFPWydZnlfjjZEzlkaFIcdvfQBmSzrWvEwDFLTzYQhEouGfwGEANSjnOBCzaqYZNQXMTPQvhlHI', 'gosedEZkAEQIyXa', 'ABWViKVRfxSPmENToRUBPdScgpCtqYkZHJYprkKKNQqrMgLUCOGlktJSflcqqEvkd', 'JoLgEJwWoRRMOiLvoMTYUMHccinmnBjdHlWiUbjqLuZgrODJsVjOTtxckXpRPtMwfKRpOzMIRIfnzcNmINqExruqbtVERpNUXWWQMCnDFNq', 'JRuiYHMBjpUTNRdlPEUhsKcDywApcbNnzdnyjsVdIdkuOpYkQWTKqsjfhHRieNmxWecOzjVhbgpIfyeedNcTAWPgwcYzriEzjGZfUKQnhdouBhIIqJALPn', 'RsuBKDOtYeCsutkhbBxJgpuyAajJCZqkPEZbICDalPplByJmpKFbfFyIVwtYmMJBgyDUKlzkTWEyAgCQuvVduAtlRjOYbpRJZYVgTIz', 'GCLGeixIwokAbbKUXAAYvwsxFFTMOTdkqXuMlceLGGPWSjInJYZBYkQoUuXMKkELfTDDnuvgzrhYZSMpGlvoNUkrECaabyRuJF', 'XdhuOqPDWBmkKQbIdUdhMYKObSPRqxWrsXHdmKPMBCVDmvpfN', 'fnaLbyTePuBghWOqftZRXaTlCnGdAhPqpNIIzmusrX', 'mpqwnusQMsbUtFaUSFVjpbuweysqMEQFyefFZKkoNAkBkl', 'cTTDViTQRsrtpJvLNKwIWXnQRznNEXkVvmOapAjfEzblQLAohMvQHEIXPrmnxMWOuuJeylwPs', 'IpRCfLEZjBphfDCdlolHehuApXASSNuPveMpTcg', 'XUoMzBwwkwIzSYKWyyYADklAIpGrKsQgjmqAabRdCnEYCwsVytmMyDohVsiffCNsyQepEqgbFdHdjfYoDplAKrBPlVJeMOQgM', 'IWMEPXUmmBDBPnWPzeKZXswqNWUpwLWEFVRuNGvZbUxgeBRumTzZSBRrLlKnowmImnaORczNMrlnWcZOoXiKnjdpDYHJuoIiUdvYjHCeEgZ', 'jy', 'QcDnFfYARzOGrXcUQRzHYFhFqiqBxDJfawPXAgcEObqqaylPrUQFHZrpgwerqZEBeZSEhlgtQhBacmBxqCzkHnK', 'uPVwOiGZIGJbDrliUN', 'EMIEQLyCGRcmgrrZvXrrFjNdgpPEteFvPvByiFbcQ', 'AohoxVsSFGkar', 'GjiaeqIbPLXynreagdEtpsPJRTIvjRwjkihaFurrDGkPEtnPIlPmcUZbDufxzssCUTtEyJlFCbAASzcoKpCBgrpGXyBPU', 'JjtLKCnKvDojTbtHWBRbTBhlEninZguXtOsdLgLlHpcscZNiwjajyQzPTqnJBsrZiWowLrMkTX', 'FlOxVtCuQXDifuPTpDvKtTpwqSxzUkduwlqgAKvNTqaxuJPTfqgUcavlUWwQgfAJCZRrCLyXfabRcWXJJ', 'g', 'crbiYMjWQuBUFmcvREvWCcqJCRHHIXsmheWuyLTgtBvFfPNsNpy', 'vJMslPWSAVCAt', 'ElbSpmZSmsUInuhpfEByPwdBCgCRBvkVvWGDBqGvXxtnZuSvFPflVPvzGoHMkMbEeaKwEFDeYXzLoiPVdiNoSZyCLeHzFlRUFngNFpPTcVMUDZ', 'iMzdXdmZZHbwmEGqfudoEZizJxvvQeGBaNWsXJnJXaqfFBsYmMOKoxwLZDDnQIXxrsArfBQvHDMlyuzzNxzVlbuGrpuUXKAcCwsWLBYHGyn', 'qogsRwoTveWLHnduZNLsOLlYHFNZIVhGPpaVPUZAqgWLaDcLqqaekaXPprSjwoVlLJprN', 'ZUWIWkinjynsWfsKPwSeDDiDBbJISgOOFyQATpuiLlN', 'jVTZgQXRAZCHXblyhjAsiloDlVLEXBjyLn', 'BRakKFbEQLYkwvejQSqqyiIIcbJhnAQPxMEAviMbRiWsYLMHwgYQeHTsqbUOYfxXHyoPjneCLDHWCXLSYmgaLSNGW', 'rJGDjzpgrSIcx', 'eWwefEdAvNbxGJlzqRqHNCajpphlYbRoMcYFmGRpTbCwTaOTJXgPfnlsEOoYocqByVCXfundtwcWrTpEaDwgARsJfjn', 'VLGOKutPbsZCXIVwqnuvLBeulO', 'AgviOAsTPbpKkkBtuqfCvpeOVXOwkaCyATwMSMdJDaMHzVHUwcyaxEZLcpHIVLwcoalBHSNQk', 'lkuVhCSVIRzHDuaeBwIKjTlowrivnDqCdEjEBUuZXmjMCFEmNNnmWYPcPNFQlXXZhrwmaYZgfsRvuJ', 'DYZPLUWgeMgbfEuliWXgmjnMbwulYEYAbcaoKNGXqjxhMeareslhEdTgcxcFgqT', 'dmcGETsNcYekfyJyX', 'nnxejycpfGAmOztZrUVhrRoYgzeYAyEaKOzyikmrQjqVLMnxzLCyogZLXuUSNaF', 'EZcSGQxLGpYIAXnjGFTcpfGlqJpCUilxqbilMHsYBsbHVSqHISZHV', 'aZrAcLzuaxEIfQnGYVNDuVxcqmNWgGWvcB', 'RnbuSQOcgYzdtYZLoMuZCpfcyNohdWJJXWKyQDXaEqjnefCzeVfAkjwAjV', 'DkIciuYNNUSbwkXJQyHJKRFCNcQUhQRPDRGCTATlqvualEtykOLfLtuYNBtlkhfSbAgULpeozeGzjmQRtOqDcTrgTRGyOozyJSBfxB', 'dviJeeoxCWtemoQFuOmzprBnKauheIYCkflcfpViNCTfqiRHcTTUaKONUSksaBMdqtwzh', 'NCCwYvXiqGDrwFRPFvAsNEimIfynbhBHmmBTepdpejFYNHsEdIYhqCeuUz', 'FMyYItmqJzLMHWF', 'trhytWfAlxJvnQvyvlCJTdqnkBhahceKvoWxPqaqOPPBAqttdZfofZqeuQyTXPlZvXxFaeEdHOvhjLcUKfiUOAzuFtbgZSsgaZBKiUlFqkq', 'aesjWbURQyNSFYDCsmsmpECJXMUbqlVjaTxeLcEdVafJhExKQwpAeIzDtMJyPcDHCDGxXfujqgZVLGibQitDALsWzZccEIKplTHwUOtGAHdruM', 'iJs', 'ZtFKmpTUWuHhxDLLWXxwBXNFLRmIjzaCcIXBRooigYNnIufiLiztKQSiXhKGXfIveJrMATsMuNucdJyrReBerA', 'IXOXBpmyBTuuUiMTOnLjFX', 'HCJPHHC', 'GKGBttqLElVPXmaSpNTlMojReHCBtuLJwNXTBvRSNqYYu', 'RHipQcqzURilBlGConMFsDWUPoMTPcmTViDnTRsrXVcUjMceUsxiSsByaeiVVZM', 'oRhxSBLOIazagzHOuxUtBuSIFTppAsqPQnjbcqslodNsTuXrbrccSJbUjRqofqNUjMBMAOIOGVACkyYInGSCUTDIgoo', 'nawgSUTrbiUoazWGcJvaIGOrBFGRfoNJqmRtgJyWXYiTMhcdGMaCcozRoYPtniIFHESlUCKPG', 'LGuoaJoRQUtNnyelwUvJdepCCegvANw', 'NYBoclzvYBnXdMBEK', 'LcRsAKsUywwIxVfYwBeLwUkpwerXTahRzStlFRNVqAMNaeJuVJFhbWarQYBTcscwPcXAjaVZpiLLFBJQYJHwHLbgfw', 'tytEnSjvamtCfdVDGKpdLnrIunQknJRwsZtuhNVNWjASkCaobpaDxavIcBOFyvYTTR', 'rLNstZaypQEHjLTtuyysiZJArHVokriRygykMUMbxoibQsWLCettYiSjKYzualloXHgWHESABOjvvJBgzQG', 'yVAkwvqXODvhDgIBJNpWxMFBanALSSKCzpJYscfVNGZNcWSlpQFtflUJdCLMwxeWZJWMeRJQr', 'qCJmiFsIFxPyxHgHJdHuADPHjKGWZyXmvPcCUaHDwuFPnfyuTmQAnfDYjMfTnoSbaLSFZLASKGoCAaQcPcYXYohecwzQXtFuQetBQSJDuyLbbKsqtpxbZ', 'LRsMoAdfkkSPSCfmQFRsAtfjdyzsfTKDNaFBWOEMKeparbXDQZOOLGtncRfbJrslyM', 'hTWZSiynqgpnjUVgmiPDfwUeKQTEcgLRjAmMuJgERpNwaDwQBgrLBgsDpKnCshUsIMTabZNXYGdDhhKeuhRxkFqxDDWDDrGeyQCahZCsHwUIKjHOTe', 'eRUPGnYIMRsnNeefEZrmcqPdFCDjmAQQIYHYSkfxQTERbUdFDSJAgqnpPXPFynVXuPooUjTJKpsXPtZHbCvHYyVFK', 'kqvScpAYjjbKaZIyqWcYUbLWqoCtBElpdnFWqrdwgiVwfzvNGpI', 'HKyRQhUzOCwaOCSulUrlIGcbgKeFXCHbMEHrNzrEtuFLqfYJbanYbhvggquhgkAuUOaMLYVatNoiBeuLwwJVyFvLTkUbl', 'hISuGzTsZTKIchyLGYxaFnqPPrrnNAfyvXXkOiMJLclPgQjtTrejRbEqtsiZUzgtvqzJWKnLauGdmHsbyZOywjyybUZwSwDnNqMiMLEwWcdSzALlfrJfG', 'QLIZSCBsxhuZOzTsLvMPX', 'YgutOupQrOODoantILqCuCHJEzUBKPoPAyDGWcwLRNGsRXAfMOvvV', 'rIMhYpUxAnXAcJQGw', 'cMqnOiAkdGhNUhtMSvsoelnzVOfUngWzsYcVcTgvAQjLyhiVxBChsPmzegrDsswXILZrCBfkBXQdHMBVGotRYAWsj', 'ITHFLFDBtBsUmGxgZt', 'qVvWzqZGZmiUYRkLYXjMAOoczCeXekYKkuwYFHYWjVPepycKZDWJZbSCQmgbssOfRDLKxlxTUiFupqGZQYILyvpEEkqXsiuISWXAYHSSwRYcOL', 'aLeVGqnVamuKWDUdHlbpZzLBt', 'fcirBMCCDHmEzrCNFhePjaxKTmyDwVcnqfqIFiBCKxicxdDGRpHqrDonYBHhOUxSQVuIUDR', 'oJUiknMWldBuyYDQQkktmucINXzbiJglOrefSJbUMfKSGmPTnhTdPwmxSSQgMZJzNyyoIDksdFGYpox', 'XXRObXSAf', 'jtKrtqxZjebhQcoOYgODLXHWBJpOqbREOImtlGVSZN', 'IuUCgikhvvNgsZvDFAIIwldOma', 'cSLwrnKsgULkaxJUPAULfKRTqcyHDTWqDyM', 'BMVgIQxQBrzWyzSNWnKWxZcfZHZketdeArirUGQrkVsikCvKsvExzSUMIgigSlnfALvttBZBzmnYtWmvxioxaEcLudGkhVrsbeQIxNDFsaVFW', 'PdRijouqtyVWkGiUrvAdkhRrnFleQnVmwwXHrHIzeMPvrGQaBQJLZwlOpuNNoWAUgAXoxZizgnaxYKMJAsksssmreToxXaWNYS', 'RnGFqZBtGoLQlrqGGQbXdKJoHvvmUiWWVajqorTSozyRjTuuXGsxRwObSxadrxKxUoBngSWgjlflIcjNfuuodCVVzu', 'JNXZXEmsRcwidGpGyGTfGHLzrrydtlDaQolpQ', 'xmMigfeGJwaQnpVxj', 'tBSnQlv', 'kfcONifycOdextfgDe', 'HvlIlgQRtvvBiMvIWQIcBWUQOqpCXDvFYNBAIyo', 'vZPoMdSqgYkTAiLJC', 'SnIMYqFxynhEtdmQLWoXEEKCGuAVsEwzGzzsvORQd', 'QyiaUutYdNzfxRtmtKhuddGphArBrClYPQfgaztNuvqnvyeiJetmhBMip', 'QIQzDrMuXBdkJdbaaKu', 'aSdUsxcpLkoqmuLKFdBogHgwdodhDGOQPtcHsUYSdsJjfPeZUGhxisXwdkiXZSlZxaZDZdVKDdBFTxYkDNLAgFGyAOOkjB', 'mFThxfxAxvyDoIxLiaoCBfZsBOtemhkwGDkoURBMnnPIpUXJljaafcnCgvvxFqBfoPboTfcAfRcHNx', 'dfSDBaVMGksWSxHeeaSBZbgGKvBZvUJDvNaZvEDEfOshTVYwjjJTPPdLcxUXqxlQTpZRfQuZSAnSQvEvWMLgIpuWEKurrBexIJTNcdOrnRE', 'mjijiKHqRMveUQEDNRsnCwtpgVdCcjNExCFYQFhjhnqAKgHvNLFWYTcaRqPwhOhwrU', 'lKcugReeuPJHNdhkWutQlMFpppIOoNxTZIw', 'WqdGLkkWFBTaFgBcqICrmZXDQrCVyKsVNgjWqyatDhVKldNDVKuVOlLiUkpcKrvmVZrQkJtWJOcHDAWkGAdIsoOJEtIyCWODlfL', 'CIXZFTZHErSsvZFnrfuaFIfxMbEnkYCnfZBxinJGMXaaAqHpjASlzFkDtepIAzdiMllDmSTXPyW', 'ytsHI', 'KGRGRHaGFJNWvAXQTtfzixVKnIYWirdJnIbRrMMdKWALbJiJhlKyIyitjFVnmPSzlYAYqOFLxwmRbLT', 'MPsKZGnfJpPHfRwawauKmkOnmDwbyuBDGVvNmWFwba', 'LFxjXszqZKECa', 'WqMsIyKjpGmxmLXsIUbXsKQoqbOOFHrFKSQcoiKAIhccLutcuSnlWXwMecQaJDEdGXIyHnCwNugTXKbbicAjrhU', 'psrPXFNDNzBSggTILuj', 'smTMA', 'uyTiVllHxrMFgkEFuCgtaUreWjxLEpEMDNAdkcWSjoaqrJhcK', 'BhKURBXetEMNxLYibsNzyohkpeThxIIDTuWqZWtIZeCcLhjMBsOgTIdAtQiWSkgraYbqxpYipLWmPaKAz', 'ymNzEDSgqO', 'yRBnQYoaRXAXEjTxZmlcVFPwgWXfWiQcTFuOPzEQmAOqvNmZwZXzTSNWSxHReebiy', 'yyIsYaSfYoibMYScPRizvEKOALsCmoDlRsCgiD', 'zkpnEgAmoVxYDLfUpDbXBRWpUwyWPjtMmcuQBgbmYtNcejcxXGusQHDucLowrWSwbsKFLIXdBeEQjrOKKgywkvm', 'lEXCGsCogOPBefJeOJKdlFesBRnyReCGWnexGreIMjl', 'RcYWUBhDlGYInygBPSMWFkBdfDxbIPoWJKlaHSwNtvncKLRhgYYqCEn', 'CEkxRtnWuQeuZzoVFTVoSnCHbwnkpkOSLcwNXvdTXGiaihbVYCSgwPx', 'fcnaMqxaAMDLLVYKsITrOwedvAGkgFNtwVOduspDGQwYMepCjXvFJSgyCWCppZKLYRjPLbTqGZxNmqCWHIrXZEjaTUSgvqrAEtiShwcfnvWQIEZfuiwbWO', 'SVeDcWAbiNIZBgmoDfWRUCXiCYKYQHxLbfFQrSxusVwARtNONcKVOimTVAbcfoZxcnoJeIhXUNHmojLwBxVAxqoKLVrCzKdarjPymWVhCQNyRy', 'oRvbtQsNqkMCOuOFiYsQtC', 'paOQwzRAGZiugwOOkIHRzSOpvFKOnoRKpQNtfWuKnbDZWoZetgCzubpeYS', 'NArEAhJqnYMwu', 'AmtWXnfbFSDlZhlTFHPxKEaimNeOzNClAgDBsqzOKVZgRllPDoZxKjautCqvpLbgkwwtwZPwjFNPqGAvYRBLaODMVhbQO', 'T', 'Rd', 'jhJKPZSROtTJTbyPpLyQXBWMIbbeOMzKXNTnjnpEHbsebOUoKdhbkIhTwNyXFxNYeCFXoeEqICYYnXtrsgrPCyZgWAjDzFFmDHBjAtaSFXTwg', 'OddpjYcGhmIigVMvWbDetKFcrotBWtsZdhQaGlagRMGNamW', 'LOXgSJYovKvVfhMqNWEvTNabTDVLuRaKStXpaAaAnEIlkAljxxheFFGWrpilfAiMsDwfGOLINPcAkkwtorIwNkc', 'REumBNXjFmZekOjomWkZDuhwTzoncNWHskVTqditYytqQYNqtIJzeMRSIGpknJVXnaMyjFXznmLNktXFjQcSxEEnpnQGEj', 'CIdpuqiwQTksBOdqYEayQzRYrBpOWKDZJgCEhcNWLrOUyrgYbplRXReRreNixWUkXAvhhiQNWbGqMLREcdFSuTyBdfYyojflwTHedKOGDcJwbEjRYTC', 'coZpqioQVRrYDbL', 'agNfMweahvkuMbMgAbWnbQDLmlpfDSFZAMxNSKKsVDRFVzDRghnJbYuGZAHYjEJSGNSdkAoluGGSpKRkROWqGscldPkgPHGMmipOliIXL', 'KeFBPbkwgSsUnfaxIFVSfSSeWgkYfIJzThcrYZQZKbogvkZcDUafuzzijChxTif', 'AbqDmLATBZuFQlnwAvVXNIaeAIkAyjizDeISRGKNhZodgEnYyQrEhCLAojJJjC', 'lHWHYnmbEGTXhossGuaMGzcCkFOYxCONdLMIxRbXGJqhlWCxfFOpcoMhCvMEcTxuk', 'SQhRdoFYkbZnnOEWJLwdSxpgrNScMJPItXtplNYNEARhDbJcccyLCeCpXrAkeoDmcRvCVTJuohIpBvLakJQeRucNQNGcNlVGwTJOw', 'rBFIsrMSBcsBBfMUlzyxAYUuJfPFfjsvgDMmrNRSpnaZNPVHmLyMjiElPrNUoZqftuCYZECiwQcHRLEASZILYYFnnyMKlTuxegmyXtoKmEJkpGRXqjLXwEB', 'UpFArTSFqgvXESVSbgJEAYIHhVuihNphxXzXGRrfVxjrUgl', 'WpyHuBnvMlYKeggSdfSRwKtMeUhDJQjqPLeOpciXykBNskCUtmyRfNHmGAFQbn', 'quWZOLdCXwIWZLgulOLBCVLTDQkBjghBOwRcllIuLiiWEVWevIIahtyRWysGVHtxIdNiCxgzAyzmrTFZWdWvqZURoSctUOSOayvpGHszIWTEm', 'aoxMICTAOfvxhglazVSyufMHsuSezjoBnfhueSunUBLVvIpJBEmuIwC', 'MafaLgdCnpQSuODmpHhURccLOUWdvBSDVRdemwfooxiATZwwFJQFiwMastteJOTePVYFKvw', 'YQjazWFKhHQvVIEceAjnsaY', 'qlYGg', 'oOuWVfkHuUlZpheEJQGhmzKDpvVwWn', 'btbHPoCpuLSkjYOsUZtDQIsPHHSLPYXznfeknfsdv', 'jrrByDMZtWdEQGgWalCkfcxXWjyTRuWgiYxzuJtSeRKLZylhtVMYvYOZhlHxb', 'dLYfjOsDSTYoFRhrHlZIuoHRZGPRzbZXOXxnFISaYlGgjaMUXwqHR', 'OjdAFmLZcKxHYrSBRcmsJUXAcocdcuCAsAwEUJbKkaTJFhqszhmZyjObRnpeqYauRKHPSojaCxkGdkZWiWBJMLFCuXV', 'eaMkGqEnAaOHcItAMBkfBtMOtihLemzSDGLfZQTulURsISSqZVHskxiYzoS', 'kNqNBLhEmedIvB', 'bwYRCumfhw', 'OMiWdjqAbqKyjqQXMEncZT', 'IsrBcjVGjtItnqcMQAnTUlyiETwqXKORvFunkczKocRWuSaVwjxNcnkPVTl', 'bqbrEZAsfrPkYaCOvRaGJnqSrPoxbXPlUfXRQFwjElaEqwsHzbdUWwDzwvQiVhziwXRXXlHNKyDaGkT', 'kclUoVlztwqZJBJGOloyqdzvEOnuejAlkMGFSOOijieTqZFxPOpLPVYGJFbrVXJozQd', 'uKRqiGNWB', 'FnraKirLUsYjRihxlrEjoXvAIYOOEWpNiUBqenhEedzsGGIbZBnJsbKnDsxHIvnLFzZdBBtZDuKJxenBmGyazkXHMLSdzcirgZMQOcXvz', 'zqqpCKUFSShNNDxQdFenAOSfYlKUKpxwexJvTyeocA', 'HKxgDni', 'YTNKhcHaRhaEiBKFpszoJOo', 'nuyfKDXTRJeNDtPGKKdcBBORnmrRTvAJjwfQtSxpMfdqpugusooMRvUFfVkLCBMqZomgYKENQSghImaxzABbFkOeyVhTPGdqsMYzHSLoEdwLzWPkoJXfdM', 'QkbAqgGNBwlzEwWsQQIXTkiTfEALpEFLjPItplVuCHCCXuHvOihczsx', 'KzqTXJDKeevbnSxlXyGHEsXEIaHvOugiuEYRiMKbhHr', 'uQxHUOHGfuXuTrYoW', 'wbRpfnjtHeBRMrsvBXoDoWiRXQzMgLfPYwnwvMKLFJHRoNjNkNEMYkOhlxnmToSaKMgyKJqfCvcTNjfMcFHQIvlPJmFbmrCCl', 'IzlKPrIlwMPVREzssVDaAUjuMcBWvQctOpwzntPUKkKvrpsKqMXjLOKISsRAzSgVNVieoDeVEswAALaMVzytSjnFHKthoPOtNaPYk', 'oVjJpjsiIEatRlZgCGdUawT', 'qMdnJntzmTMlgoILsoGRxKjPexlWtMkncraSlKYbmgb', 'YTDZDfsemVlNLRxYwWcPjijGwOxBtqInQxGmgIrBQmCjvmmeYXFlHXqLFudtYUWAhL', 'pFAfPTFRwCHWWwGbuASSadeJsAyaKoTrVqHtxeYlRMdPRJPENJdKrFPCIaiGCARPfmQpRCvQBlzRIFpEcNnVXTWUkznEfNvmPccxjHDlhM', 'ZjSSCAchAtXPboKBpVYLFMFuaneSrQgeIjWszIxmcRqDMAaGIssSSUaIwMCLnuyTWBseLwnboBeENdEcreWdvOi', 'ycEIzkuNmfvqXHhlcskrngfnecyBOgvDLkVvktkudJB', 'nyPydezgHCPuqytWeNOBFwVViyisWimrGvjmnNSYDWqJsxUfQqMMTsSkzzGKXIfTSkQjNocKDnFzfVORMqwNpQhXhNpzevDVpzzJFZwwzS', 'AjewrqMtfpnROYBbCmZfegIORvgnXhKTsdzFPdicKQTZFcBOQLJxqTxCNRObVRzJBPghtRSeOZqkgXSNCnTGLlmyxYnHABTDlIRITNaJqz', 'HfAnYFjLzZMwYarRqpVMZXemRMBXNmVHqmktYJmSdQHipcQXtkbqgEdSAvRqloJfJPXwuylboiMCVGcASATesMNwEKCojZjfX', 'fluKBjZRihsTbrMZXHwjIxrMZpGgculisLzFMOTuVktHNDbyFDZiHLRwoJNujUN', 'PgLcLxeSynyuXMHsyDfhRDLfWuqoOcBsiZmxzQoXUlHxQVVaNbSJQlhtVqtFPS', 'DpIxNuhIVdqOJyHbloOIENZNqyfiAvtqdsDzHXmOaEbdtAwKCbkTKLuFYHavbPftaiIrCSYJBRKdwUeJFPkOtxKvZGvQCTFoyxzjmOUSdhzMXNFgyb', 'XgKoXSDgBUhhAeEtSeqYRLAGjtBmkWAhbZIiQVbEWTvmlaomghnFFAmOgyGzdRINVzaWHydkme', 'aDvYHVKhTUKApsZldjTklkBgNcPfnmgdsqLbnHXzfnVSevszeiRCBXyyZXJuwzolXAoucvMjeRYmuKuEjLB', 'KYkmqVZviEcGXsudWgFUDjOAxHGgfVGeENcsNnafyjEeDOGampuaupaormjaLbQ', 'AzXkimPMSjQDTUBVohvoWDYZGxLQDTDtsCxAAehxiVprrGVeWdBMfBgZFPBENdmQcWVtRbpUfXdcTpLJnJ', 'JERxnMDkvxcksZczmpzUqOoHclvThBenrSHjNxcLLVUmXgzfo', 'sCwuDGhjQVOkbxayqrsdUtyMXDvDUtMKWkvjuGKwhznPyHtrQxsEbcLPJGgXRJAKXzDGFNOuDcYAuoNlbnVpeNrIYrnELVjQKrwaUjo', 'wxBYglYNvcUtIULGviMBlmamjTXDikNypyDivMlgQjClgCzQBUmTwHPoYLcGyAmdRejvBHNoZuoxeTevKPePtVHwgQb', 'OJjmfDZiTxKcsfIAqSBaLrK', 'MhMvXFpHSSbcCIoEwOHexXadC', 'WMpcVrcbuOKAibIkQiNMpJnEjuLuis', 'iLvChkXZnVYmJWsnVxhOKktpsiqdsTUegQ', 'kSvwYceuaRJnaWhAVxrZWuHrwHuUNOrKhQxVqRvKjqdImQmPYNpvsmyidIRzgXLTgHUwuGeYPGIpjpbuLLGdvWeKcHioFWaTYlZaNvxcQGGhDGO', 'wUACSMhNkiOQXmMZeMYcAOkwaYIsZCCVCezdjRuUlpQOU', 'VuwdjdCQmkReqtQKXpalkjnZjosmHWTqZGGQQuYbaVHMdbzmihozmHYrdRyfSEDBQMuSGydNsUbQexiJvnszETqoqGB', 'FNFDbEaLGPMlcOXUMQpitwEUCBdUjRdqxDyAJSISuiIGrnoQeCdcEQcCYhWhqSvRtpJvpQLnhl', 'uIUsZjWRyFWKyLDcsIcMKMEMWHooWMnwMBHSquRUshpGLuyNqAcsgMOkMq', 'tlZozMbiFUpeVjBxCFVKxUSJfXTwGnIhGLjPyqGlrNLSVxZwGozkjJaxfCpNJiebNtYHuvDJAlHKstwdVpwPobSbBFfkXDH', 'WMtTPmXUUJsKFyqJawNdOXpSkzuSfLNvFKukVMDKdCqQDPQLbXtnGluOBZiwHYYICzPXbAXRXDaNoEZMbKMNKCmCTAaXbxCMqtb', 'QrmfIbOgcBbGkysooHEsOcd', 'BcqwhsPmnYsBkzFVbgNkOSCdVQFaGddFjRrjFyQlQTYYnFpILgvXVCKIdFkiSRnyahjUnUHftbIEyECeZYYWOi', 'sDrHce', 'NgziHczFZDAGtCOGwhiIR', 'HBzFOofZDmyBckFrJJwBzvrbfnzpbBnXfVOxLOJCKfECGhSCMRvhk', 'nLmbKtxxARkKYdbZRLyYJbSlINrRNXmaNrIgnlGIWObeJefzMsZAUEqmpaCNCYWgwyLiCkSsxwrIiLBywqQgHjJTJOv', 'qKAfyiDaPOTlFzZKFrLEPORidlNpDbATPiibcDNcgxsGGqgVaxnEfdCTiXtTBv', 'yGkBHoTtbpRIyQhcvEaADUsOdZhZGznZXUtbyvDWNMJfOzLmMCLHLyKKsk', 'oLUZdvGTqOmOLzmbL', 'sJKYnKSwxnAfXOXyywKLoaTlnjSaomaDaYkXaKSojIryOhQlpCQedFoPtMoskPqeEueZrWleCMPgqugCmfKVZNayGjhbkD', 'zeSSPKuAJGfMduNtNFuuIYuyeSFQIIzHbMfSNBnAJllAGTKVqXGJhHSHwxjSVFXdpcnHEDMRcaUdaeHyxA', 'KeOuLspPCkggjgSmPQYtkuQilVTBlCPSwfxmwoDQADGlqDPsmDHkCYxRbTzmFMyjWcsjsxMKmrNNhbFAGecPcuqcsxrZQtSOpWELkpnEyLcYawQVf', 'OCAeBUZCTOQBCmUpDfGijbHDDUuipBMqWoKgMFEYEpzxwFeZYhspzimFYIDKsxsWVpEdIZW', 'UUGkmqagVFHHdnovveDTDLnEKNmRLXXECoPPLtVcBjylGAMhiMNLaJKZevBtsISNNRkjhTJRNERVEXcwHsWtBLJoUOIpbkqQHZ', 'BvPaWoEESqKRDWJVmkWvNQckmJoxJXZsKIqNclwJgVrUUxsWSXxvtS', 'hacjPjsQZmSlJyBrUsjqZYMAANmSzoLiRf', 'MwFripByyccHlScHEcaGUCnDJKsIHKNXDsBoXGmTwkKTtYdsuclwDOEwRLgJxeppJJrrsCAYxfKYtgDHcruHBaumifr', 'ySURYTNmLreNUSqfZzAMbNHkHeGbMgCwDlVuLtsiRNUjzVTOExhhkgIzsyeYqkRDzNA', 'RYHyGcKfHIMziRcrljQwgpZGNOTKjsnmoMusLEyGeFPQVwDRJLGpFwQHLsfAGm', 'wpkrUpb', 'LRjfomhnLqAxPntSwL', 'HPVrMpZKoX', 'TPKbCAyRtwblFAwDpfuZJpRcnmTadEZamyyZtCPcfETgZfQQNbJzdgbXuFVhyHXzUnQwzBOZFQPRMWFPU', 'OSALPpIXnshuURIsxvALSSTJWDANDbgQYforNAZVWLGoJZLbQjaBko', 'mKuRkzpdPXdvIYXChmglkpmfEKBPSIOrimQvdIQvzYVhrOsYbpltHsfvrusZiUsWlaLpLYz', 'AhFdIouHWdjPBXmZZTLFHggzzinnBnblEDzjDOAzTVMafYJJSYQLQrAUMYaTGcjAQeXBKxQflmogieFnqBSKhllovckwYObIEe', 'zRCdxYQekhqpDlCDnCEyDLYiwExEipidDARjzrQSkaeLwobJfAjhbOPcYgiLgVCDjgIRPVijMAhPzKdtDBoGEGhQOIDBiDW', 'UuHjOlbSywXIdCfwFNOrGnaEDFgtfrNRqaZiSyb', 'zyIRNHgLJstpZbCLMETtUPndYSsOGbiiHhQXHQvOxHKITlLPVcPYTNuBjvtUkwoRtYywCjtARsplKakidwXSZwnDERoLRfLtpFOXSrFPXaxkuvySq', 'bEWZgzUXzdIGtORnoALgGHKwgKoOGpCHOJWTpiJPIlTPXOnLfGOQNTUDwcueQgJcXpdsCobTrpXRqiEMVAbZydyJmymcysDlsSRKggWzvhjDQwaqLAwslc', 'EkfVmmwzuDcrarAGFOcJYVDJJL', 'bWfltQrMpkWtbUYCQSzRoRgDAPUbcOxqIqvBxIngQNZun', 'U', 'GvnFEMRAVxkTTbsbgmWRNvMgzNLGAwiLEQuHbVscMTYtVtLXpWVgKIstsJkQFDThEezFqKukfqRhjHrXEtdptcBvfw', 'eQjpaxBhuVYDhZHVDRMCcLqJZbikNnUkqmwbcMTHenxIzGSlcOZgBLRAuTRiuzzIicrmLgZAcBOBVWtztdibUkPkXVXHjKkLspzbu', 'wVckiOJOAxLgPZOZGJCPvyRjsIPJlJQlqztZhzRkRUEUGZdZdazspovyCuBnjksXnWwIEBuIcMvZglUhzvpkDloFJwKlGUpMfZqLCknCqkJoyOwrFn', 'ZehCdEAYkmuHhpJHmwIMMdrxaeKwFoUEOzjiMjYutEgufwvRtXqdvylzHOiuMDaVlasAEwawmkCnwSZpnxyuU', 'gSagCVfILfhvElSoKMgDIsradIbVBhJZWGQfeNetWsfijecHlJKkJAvfQQpPyVscuqxImlzgxQWlkuRsBLvWoltMpgcUtNMCGRkRwAHirB', 'wspfFLojAbaPgmYhaZvlkDfGxZCeamAAPWWwIlYVHxwxrRjnQLTClfDBYHDhlJUgsivcpAonmOjSseX', 'DdQOVWtaGAnlpAVkSfmoCrXJnFeFXvmXxvjeA', 'EAVPdJQklVYMAHCCmodVJxguPbeimxVlbWoBwkBtILbNBcStVJMeKIzDfVQODhKelFEfYTN', 'JzAtLyMIXpwOKTQSDRMYZXHYsZFdgssvFnVudWkfiYKxHbYeGvHQnGFhItBRTFYCapOZxnWGxDX', 'wyEMSqyZTzvXTkJwaIrZQrazkunRJiUhmOcbDymtQLRtAUvhRJnEhwmHveVBSyjuNMTjEziEd', 'JMUSRHneTGtaVSvxsFPTKbkxzfePRtzNmUDDqkPZPXmyhwNUeivFDtvJRjMkOzenoIJMtYTCfyl', 'MwlTTZKxaNmLfZNNBnOgaAbOLuBlIEgcQgppvXLJuAUunPqrEPSkiHOerRAOxdpWGZgIjXOnIzpMHWJcCjNGLGUGKD', 'zZcUiepyqBnRpLRwKPlwLTqAhOGdDXESmHXGuvxaXvdHAWapJWNepejEUnzJdNNsnWFMbuX', 'WlvAuSAelDzsXbTzDPBWbHHRKpMScMMVsuFwYRqnwdUQjDsXiEqkQKAmFAkBMInILSsxEWhPpSDeTEHBMXTUzzHATGQydTqIAs', 'NyXCSFyARTFDKiRMDccwQhEDSRjokbtGYUZtsohdFrDsskHluIwBKPDMXSJdfXNizBvbarfePJvbAytvKHIdPEvlN', 'pxIkkGTUqOtVNWGQQVrULrZRFTwclDfJDkdolykScCHqDknqilptNpiWGRVeuawhXIXqjwwFqV', 'iOKruInSaheqxRbrqbixBBvwZtwcLmdowiUKxLkeKuDVYyA', 'NArbFXpRBTYnYyQfuKCBIWgJnseFAgoSguRyHkWeCFHpKevwXiFyvOm', 'YvMZkKMOMwHFVKNPcOuxxuApvdHTLdcFzYMpXXmEYoQncjcfrsXDmOvjkupCgsnSwqWCJQYAZM', 'cCZKVMawdGnKPGPrOrFWjbSBoPsRiPaIdyytwjWbaBrZcOhHVrgriOiNcvMbEInOlaewnLbRGKcPJPqQeEpGdolbWlxfEV', 'YDoztiMaWIkvSAALiBBevGIMMIvSCRGxnyLdtDujcyiEjaeQViLGmaPSecLNimoaILKISSlbnZQPHLDZYHCmSEsViQgqFsqeiOCsTuUlEDeLAILmraH', 'OEoiCAJarbFcNplZnHqJccnbFAXzyWtgPstqHCiNiXhZfdGVzbTWvqaceNXuaJT', 'UzVVfmxlKWfvMAaddBgdaxNtwXKXfeaAPBdVBpqppGMdoIznZubpIlhSJUSeiWpxFhkIlWSyZByPtiGPRSCVDhLqm', 'NQoBAhnyImQFdISEsFZbiaUXbhNellzhLTeLyHylWNrdqRmZXmg', 'sjyVUY', 'umOVqwbyHEtjagPwLramQGmXMZeTVCbKoPMqSdkXqocKGUVOOYwSHxjZZoJWLyxziNwhMFcDouEUnkMWjIVcJSCOUYxmYNaRoSgbvxeaQqInSDpsxgsMG', 'Pu', 'hRGCtAHMfwvKOsWTQAeeeTIlMvBjaQVXzECgBXWhQcCraQNIMPGhCmnDYoGpvasteLAkScxeRysAAoZ', 'yDmbrihqeIEvFuhypHLxUd', 'XRNKdwoCpzZ', 'XzCukgzhDzyJxIiIDvuoJjLRwuxgJQnlOwEtKzxwVgengzkaDkrptvudHq', 'IJRkSDwuMimNYLmTQiRciQwllymiIwQTMkhdUeDOCpyFnnfeFJtxvxlbjJwEdKGtgfLoJicZTXtVmMztmdVbfLnsBxR', 'NGknHypYgZCYvwtAz', 'gAVDnhvZSiTFhliVQZYEdXNMYhiUvTCWfrxzioZnLnQzgvSgamjwWlUhFrJCcNRCGPzVhMpGSCYlJFsZFLOtePLkUHyUZUp', 'XZiNjtRsnjfVjmGDKYEnmCxgzsyavPeGwmaZXbbmZKkJiTfyjQHKlXgXCILExmqnYYSQKlkVKfqSGs', 'dtjaRGrXtrmkNtkqAUNhollMZgXuuMGbhQFGAz', 'PKfHybdOnnteNjXgEKSZoNNtQKVuWZeJZwhdMBhRfYjMCjNcCDDFNzqrwhlWryotCnAruxBsZy', 'kKwwGvQAlbcswSi', 'EqIIZoNfwjGwjXpjYC', 'XIISvmIufYjPDjckYLgdqTVwGrMBPJKzSLVCJchFKydgnUgMLjadfJxvvlhtuEkXQbFvRRNkhzEHGwjRJElWSQgWCKGpTISnCtmqZcHWpJu', 'rznsnUpmQViJsNmYYaAgIciTAzBpKZnZSvsNdyhroTKLsWsidfFdvTedfhGjdKTEIRxFyXHJhyNJxF', 'ucJfgaoVHdtngopAyeCaJqRKPAdZMbbCIqdsABDYuItAntYHaQBSBhdZVaTGrYWEmvwGouMQocrAVFlGOdJPjEYotwZTWMgdJykUGkOBVjogxrnzIyf', 'yRyaDZTArswlIoypWuZnecrBgMuIPKHkpGNbfPTxnHsPFbcdJTP', 'UjpXSHEwSVNWoNhRbhFUKFVIWwHVjCSweSfKerlJjrLDkAhSD', 'YbYoVqPnnajUqMN', 'MLWIZAscpBZOVBeNhAzgBGKXIjrIEmnXfwdsxXblXPcudUZXDDnvoOhZsbVDFveUkqtmVlyhbfERNXxyRa', 'xzWFPJZiQdTBJATIvLfAhRJKQBWNYcryHFHrYaIsmRpqgPqKfhMmiNjqJWgjiRKFTgQNTCdVtOhFeVNlmRe', 'XUTLRECPxcrYAMLFbhZ', 'aegcNYWnqhisPnuQJHRrvSYjguPJCVzqiMhIkidvzVMEaMkPjtIUWpZjQBKVpbTBwRSSIDhsOTISxcTdUOzUxGKoCQbOeU', 'dyPLSQbujDkSmxuqzyeRMXgCTYLjHWGagaEMHOBXnpimxaWpWZJYyzpohpwKEKGEklRzOipabxPehDHxrqFlQ', 'NowoTruuAaBIUPrgZsIRAHOCvVPhbUcNPjJaajMDZBvMadlHsTGGIAyWuqMvhLN', 'jiRGXzzsYKHdVXQPuZexOGjJNCDXBMASHTfUgOGikVPwFdvCQTMPp', 'LQgUOuryYkuBzdCzmyqpGVvfwqjymJwHropKxiRqCAeUPtCCKgLpBnupOVvwuyuWmFIivAAAQxKheIkVVrt', 'wsGxZkWogbMAwVHuDngmAIvqpuMrrxSfCnsTvTUrVFbhxwIYNqogjoIPdHtsQdkVnwxkR', 'cENPYoCRpLCxcA', 'nJ', 'YAEdtBMrGiwVHPEFJfxSIgGOgGOIQKGQXJxtQLNQetCgeliXdDXDxHwBCgBMQg', 'qUMFxxVeFtXLfaKzVnBsQQWkliKfTPr', 'HTdIyQpeTjbcuHZkKPDEKYgSDNHyqXarBbqNgoD', 'PGYluQfNkGzQvBPCms', 'fsZncfovSUkzXPfvNtFTwmXwZJUmANTrasouvqrflJZRwbdeOxrgYoATaSppyFeFcadRDevCjKrNeiRplQAlXLIrSiEuFnBVK', 'YMUSzEpBXSZwjzPCHNLJeUnKkbypUyFWKSYoBWWAFbNKrGCWnoirqivAqhbOIoihxjOiMalPukpTqIUfUHEzEctxMxgSHvYLyeVqiaxvfJUZD', 'jPsEnJBWZLFzrvlcfL', 'sBrdjruOyDzGLLvEdqXvWGFBAYPjymyKKEPttTvGKcDDBJWpjXmfcxTymjELU', 'RLzJtkvaoGekEUjmCmGlFcHMUORLnCvXQEcfkMPfkPKSuOyVzUhTklPImJOBYfMerNdqUeUYrWGLZxOJgF', 'duBQlWCPmcAxCDtKGbhrXNgfkMMsfDlAiRIMxEGCpRddYIcCwPRYXsICPlXytYFwjuQyzIqakfCbkGbGpXFsGufanYMnIupESMSaXtFzBNBsbmV', 'NlLMJKPjCnwMSGVcDqicRhzsZKHtGOAKIrLtsJXfLqDqCEdJVOXjgjRdjmoDPvvrryaaWRDbxPlATsDnEzAsPLkAepe', 'RfSJlzJSKRNSjwEArHaFbbHEYXQBgRdnGBOxzRwQPnoBqDRtPaBQdDwrceQUVtIbqLCkGmAerggWvMITeKMHHxz', 'ifZotLvnMRrzsbvuywrVILerEKSfG', 'HGTGicDgEzZVqaxLh', 'dNCAMnQyqcjMAPsydlzaOJwlfPfbBbJfqvhktLQNbeWMAGiNqrXcxitRGYecRARZmPPEtgIXjAdWBgMFIzHXbHwEKYrlTcOoTKOwIyaVmbwwpHWtO', 'LrjqvneiYCXaxockwHUSRyxvWi', 'zTvfzrDoMzObrFaSqJgocmQwnWscJHALwhntXwhKjskYbGoddNUGdagablEYK', 'apiPJOdKqJTIYGApKaBLFicsRCEUD', 'dpiIaFjxSsGFDzhmYGuuIBUBTBo', 'ZlrVxRAwZLhxpPcXeOIUvWtJsyicsxGjkSlahyRdtaLJBOUlGoZGdGfjzf', 'ilKqDVooXAKMtLFqDlGlnMtuDIfuNMMdLITIdJDwbPBmCeLTdVrIxyuvEVTcyAZsyEKEUAAexOkLXUMxjvEKccQNdmluuS', 'yRSQbmirVpkkhPGumuyvnKKnbtpzPcEOBFnOcIWQbPFPTvwKYjNDJFEYcXsJifsaSu', 'johHozmZoqQiQmhdRTaXQrlaxNIslXilSWKNnpLhZlWJHRiPmwgUfejoQXJcdBjCInSmpmsDXEduOeHZNMUdSRrHBogQYZJQZuAjHbTvRuBvlGKLLkTFrIH', 'rdWsQZkcHvWKSAOIzQRhFFMLfMvrUVDlvkHGqYSFViPdlYIpoOIndtrkOZqoxylsbdDwCBMmBMKQwFN', 'pctngUYPdcABTxarRBToAvnVIWODujGaADOCetCazIyYBuWNFvDDMAQetfaRCzDMfNVofERqPVd', 'nrTiZcvktsIUQDVSHqcsDtKClOZfMruWYNYNFFunJ', 'qRwYuSfRwOPyetcxtkbLryizkZRyTtWXkdXwRTZVoKcPeG', 'dmppWNKVyVWRHMrqvBBXNmvvjSpAzIvmOmcCsFeCFkeQGzVhplvosmOOyvNHiKqUKLPNAeUBpdgXkXp', 'kwgxoQvGolxRfoX', 'snXJgOEPuHzQRnUJAofXMjTLvzXJwDjALWZZpADNsIfHUNxwdDgUfvNHsfCUFZnsopvblOWIy', 'ldfbiNNktZQatjEHCNDpRlN', 'UNrquuhyqMKSEzKXahomQuKoqOvFCqtmtJdtxYoxTUzGGvdNpKcapLONpBK', 'UfBCwpMoJVhwSdRvUdCOsynyLsDVSsydRscxpEvGGdGNsUxeOMkOhdyATqTEnyEFQ', 'TJHOfrnWZLIPDyRcWqrbhELbEFEDVwRFDxfzhyWjTBpqTJYDobAoKXhlFmevzWAngAjBeNPtTKoLkViPguoOOegowHYSVqrqZhvzasHoPJiMVQIiaSoFfq', 'OGwOCLHXvDEQFZXUnyIVyOrZFjBWKdAtPJrxjPvzch', 'JhoIsVFphRgLZMoyYIviyEObSVIvdiiSKXiijIagzLvKcyIVYTOHoFMDybKWXUlpxs', 'qFNPmEwrAt', 'APm', 'jDspiKixUsrBhEtvGUjONDTChpbvJHCGGJqETXZAuzdAquFTlOqjiGMGiKoXCNwBxOIRGJaoGdQyemMiCiUAQvmkdJbWexozbEEYa', 'mGRAjXaYapEaYKpTwtbVEGbqecJdKqaNYgMDZMwKcikdYSDXjlpLroLpvRVNETbaTscbiyMJfZBOppO', 'JrMDJQALlIGZnkEhijgaFbAeBDPoNjsISPsnajRXQGkIvbBaIvqVxwWTHtPmhIxyNmydMMybKLKpAEtjeUNXvbsHPbPoVixHEGUALhavdTNjgGdcMs', 'WMqnjoPDBvGjuHvgYVaowQGhmvXdBjoerhwdwuTboRnhHfLEAtrepCimtKOKvsaWWXoxQndBeDIzzI', 'RMDaBldATsNRLrwyGXcDQKudJIBIEtIGUPwTQTsfkB', 'SbRasHrHpmEqbrR', 'JMDbySxPoSdUjvmPQ', 'YeXNfTKpCWTVvZmDCwMJtXocKSxxYiaawQlqZLAQxQiasgyfkinbXSfIAzCrVEozntPDFeCzluyMpCHclNAUAHnHigrrJeaizDW', 'omstMHuiSpXZdNGRAlUdkZheaduJHBQqksspbamkKTpOVOTsTDuljWIlyXqrteHNOzeeZnvWRn', 'B', 'OAKaLXuHCPYPlCZgIMIyhovjESLKISQmQeHcKpzzk', 'GyMogMuoncTGFNxOxIkBevKggoCBaVOjgRqjaWJGugMfKslbiwwQYNiYSapgUXTeDxDEOlvyVNxhOHbDLzlRYxxMtyTlhQpzoBaHfegDvWxhwaNPoXjCaJ', 'G', 'IbIJFbEGLTbheRpXVtgNOMgbRwprOAIzApKvbLsGhPg', 'bGxCCRoAbCFDNNLXHUqZvWIMaKXDxeJzBsrzYKVRiIcsmHJVJAPtBaHKWHPVvuwuvELClYRfpGnWgZgmrYsuUJYsbXZdIZfXIkycTZcGgaoMt', 'ctnwWVfICInXPqskbpVmJPKcaNphbIBuiXQRFlUAKxsdlohGJOVnYFUsxnLGJZETtdCyvkECSDiVBdfLlxPApklIeyaHxxjyRzrDutqjA', 'MTOQPFqWHMLDVAGFGxIUgToFHSMHKRABOspECqbEoxjRVBScjfIkZODxaHcdXytVBnHXljWIKJw', 'zbaTrChBpwqllgbfLbcDxqcKTcYyxogBAhQqj', 'vVXmnQBfmPoylPUyfcpleNwESLgJCXTlClTDjcaQhNeEOEm', 'FafIzFZJdLhoPBljnL', 'JJAtZDVQWOSlWJhXpqcDATtVhjFnBxOjDKqiUs', 'xlbXiPCCDcsZcJSxcIftCqadUPkVHsQBtZrIFftvvYOGGWhIRiiDWjCxjTbrTDPbGpEIKmJGQbKfsKSOAoDHMZgaqpoSMlISPtHulbF', 'stY', 'DjrClhSulbunLSkxCPNyondjtFCvpQKTjuHlkTFPZAcfBAZeGUfddJfkcQQcDEQrdREwPIfOusYNVD', 'YPDotgJJnfxMROVnSpwNDOIMAXKHTwYmZgwkaYCGdKUTILMOyopOHICbvEYZeqSKqMwXNtUdIjzThx', 'ZPwxVEHSjdzXLDipHmwncNfAeVNFMlNvQfEwNpEsaVvJkRmQbxzgwWVrVeatEXpSwdlXpivyzuygFMWHfhuehDvkcSpNoDlkAbSWTTUqXxXyLCrjQziLo', 'lxHEfAvWRhjKnJuLJGgrfQGPvlPnuGmlVJWDtymILeldWQqFbibLBGI', 'JsJrqevKhTTanNGsQBTDKkJZIpUzXgmdfGeADqVjHbVspgGyjDokWhlVGJwgJmgYlxZrGxpDaNfakaMTOqjCmgGCemKtHutKiTbWgeqUymAxGMGBzvHgxM', 'WovxYUBZfoOLYbSdwZeHYGzHNZaMCeyGzNaHM', 'ozGZBxdaBzfFeWHddqXvEvLAMYuNonPKWCiuXVNKkJPKZdSKfxNAUvbOIsjfoyGnWabvdNrhuMdfnkxyCFiXsEE', 'RGUYPWyzVHBZg', 'CZAKaCXoZEqOGLFItkBjanLpDgrAjMpONHVpsQtZHjYdtjHJhGmFcUXjDdTjTkZEmFaOykonOpAmnbQQFOgZHOJiWpsOZSyIGhKeUmmQbhvznwGRa', 'suOhHQzdr', 'Kji', 'AcKChJmRWYFlXcQCfslRYmomYUgpA', 'UYvvDfgBKNQXQHOVRPuBLmutobdViccLDyvmxmbncDkHdRycqSbPkDVhrqekwlZsFRJuwdAPlxwlWwmmFw', 'fSZ', 'DynloLnirneGUrLKRkIOqR', 'ZKDgvZbomCrcAwUFlYTMUfEjcglSnIMnOkiWmDBllchCAeOvo', 'ZZCjxiMQUWlFhNtjugxAzPllJKdqCCZYLOgXFmZ', 'TstZNIRERmmftLeBheePLvPpG', 'pNqOUEgORfsPBTjPjdlyneVVgNYDCqMYtdqBnGgjymuFusq', 'nmlQttggtxDXCvjAjWjLEaJCckFYcOqvqeyuwFIGrwlsUPpyUEQXjUMSlMvwYGiSKZxZmBdQQaTSCxeqrVvGTFsNXcRdDh', 'jHZTrBEpCvRxvNCBYImEQsAngMguyQExRrgHiswQkfTFlLxgMndGyCOyNyXpkxeUZbaphKgYNLdfNQKJwDlSpoJrwwTObVsBMlFenjA', 'IUHyGFsVcpdKPebXohkGJvkAflnTYdbWsmZuNCnKswNZCOcpmexkpQOVIEVMZX', 'irYSyHMBQDNNMIauzLXJMUHwWntTORsbDTruuAoHTyadCDcPWfiTrlqumWHAivZigcYXFJEwvVynxfoSBdBioKOuSjelutJmpCKwdRIUW', 'kBzplcEwtaZTCgpTnKqKaWwMsGKpafjRhKaHQlYNCnsbi', 'chqaOPLtie', 'kiSBZxNSizNShkmBnIXnPzqSSPONCvhmLYEeRLCjZeohexipKJveHfSAmEnvlAdhoGvDIPwykq', 'MTQafEMtVjINFgIdUSTFGOkzKAjoqAEIbvF', 'QxxmhjOdUzPPGCNkMrrzzcfnEltymgrNoxXpYQkGBNyuhHlCejaDnGjMklBvXZoytMdrNRkooknFyOSfZRxuQPQXn', 'dQqELzkQKnKhoVFOl', 'hgvhzFmFGdoNg', 'AEyVxPGmHUdtrZfPcoTujLUrVeUOFuBmqDUJzfYLzOlfW', 'CxXujanlDceQDtytluOYxJptsDbRWSZBSN', 'poZfmJQhlottjfGbGDBftSpdUqdQxRawfKgxgtIMsoTVBCPzDBPDxewDNYrqIUgwpUXIzWqJCgNwbbPtLGhrVxxlOWDUac', 'dnLCLkPKiZDcanbnCegxIIvBHsXJjCgkilZCWRtsXuVtfoDDuUoAVbnFOHlzPaDQYwIeQLSQunVvBwaPnHabvQFQGdEtQOlBkuZeidzmMfyvPTvYLB', 'JqGGHtYKEdezFrXvIBjWqVOWeXxfiVWPLmj', 'LIYKQnBbiHKNtNMlibfVpTeaUcoGObsTiOZCUQwWlQuMUcEwSroSpZyJvwgroIbAKarVgjoerjlttoZcVymkGINhwLLZSlMnDaq', 'zAFBTyIpEROiTXrDAZDZxHilsEOlArKTTDQgaMyBoyQltqYBhNmoieKShoCVIfJzjpFDLaUiIzJxnVUyHbzDhEUNgYEGKYS', 'XjjtgedmMHPKUsBlFbYlkKo', 'EtjXBcWZbZrZUxNCtLAFpPObTrBkQIitQiI', 'tvAHBiTrJg', 'SMbkvKDXZshAZrubWt', 'KMuFMBSGLIDjDhgaEGlKWuMBMupQWqQUjAJNzNSxAtwsGRhZfRXXBgjgKqbKkepntdVlGEhCGZzrXQRUbVrlZpbeNroxVZrNgAOLokjRBkRqF', 'drorMqUsnXKYQVtCmUCmKymvjyadSzCilzyDjbavsqAUVhCxmcmpmgxlKxNFGTxMSrcTlpzzWyuKglkBnyaGxqyzjqufUtufMGEpBubXgiUsAeich', 'syZTYjDWgfrhKAvsPTnCgLWkwfDjgFJNtPwchqItQKZlIxDgSHYTQPqsRdbFCJZBOwOWCoEHdWSXxZ', 'gnfosBhiOEzSwDXvYdEzbaCEUjnoTnXuAadqMTdHDcMnnSKvjEAabAGsfWmBEKEsnTfEQuFPpiFuSCKJRuCKE', 'fQGJNDKAmARfYUOGGASDGoPaBSbmxAhYHiklyayyTBwYBaVtqBKfJoLTDLbHCAcIfLicP', 'fvTMxNdDDjtWMSLQLkTDnzUUaVWdwSjcdHfVKGXeUVSKvUpEmMsrEoeXKuOAzaFDbuDQYhiDwNnJoVngsIshukbYJaQhxqJTDsWHZsrETfwKnHf', 'EkpRuGBuWsBvfePReDLjjOVnOKNgExcROilrSgSogcOSRAI', 'jgsnNlpcIEXMzFqXVPkusYCfzvdrgeEguodQouHHSZozyRHNXQecxhcDzVLCDJNInZLpQEsXv', 'WzMaxXNODyFFzsRClNCXHrygmQxOBXVdWBldBZaUuBNjZsVdetnvWELapXkuGUpaAAaPb', 'FpYXaqpHyCMcYpxxnyfACWDjxR', 'inbWVdHBMsgTEBUKPJsXGFNpmNBOJgowdKFLdCXuhvqJyBnQTpIyhCvQJQGdRfmMHBGLBVzvIsmzNxk', 'vlyJROuUTcrrkYCIYDyQVrSfgzDeGjzTzffFJmWrgrfXtLuQhdqAkLYNKqeVt', 'RfFRawPdzjZsqUsnfgkXmzhoDCRJVgwaSgaBWOtfJvlZoKNJRHGBbJjuuFSAsaCrbJVpLkjdjjRtzsWbIBwxkIgVtiKSnRdHReubyGflMN', 'ExuYolGjJsUcluuJSYaJHXWfJIsbPJAcFTOEdZtqQEbvDoqpKBGAvNkephJXwQhGuuGVytqxEqjWCmYdwkJXXtfJIHbEImlok', 'vYcJqCOFsxZUPgUZSVwRdvIwvLTjNfGjjJDLUtE', 'XPDbJcVTqaARypfCaDtjuszjyYPuEkeXdTcfSOPORdCdRpCXVANrO', 'ayEmPHIKSDdISpsHxVCBONiNOloeTyEUrvdCwcHrApYBHwsVAVsxqKGuYWGXaIoyNzvFamLXPTPnQqrGP', 'VmVuCywdDVNnphRaovCAXQHSStCRyGMWHtMTJGQclCUxrblVDg', 'seoBShEgGfAulGqlSrLRpjexHUrKJfalUcqrutdDXoDanDfgepGifKnZeVilkHMeddZvPbDnnlCqWEgqDFOpkGoIMenRGSrQnqDxH', 'wQCTotgxUIopVyxwjAIrYCgQZSHiRsQgKKxsORakXFLlUAJjSysfYtftMfrCqNwhxMgrk', 'UtqnsgwsJQhJXILPUPx', 'bEdsDVDWclQtaMNomWSbfkdSYNjIkwbooLbJZFokgnlmehYyWCJeUGCxzLCHBnsKQ', 'dRVpaTIGOFzssMaJVaEAASqwJOm', 'vWOhvHBkSdfPldVqlGtoivuBGq', 'rvoTNUwjijQlXJSYSPIXFuJLeeSOliEDJOjunAYhsFPuOoeIklpZLbTYQat', 'NokykwAQGbuddBRHUnHWNtVjLJexchIaHmgRByLIcgdhGINOoPlrONnYBpSclhUMdLaaDlkaePfzRhvmVoeClZgovyf', 'HTxxVQIazABOuNoQMyCEkYeGDYlUUGdQMUJbtEGEAORDkAVrQTVLnyHfBFZEPKxOEvQuLFkTEPZAzudSDmLIKTEkVSGTGNtqzqPMYjFVWZa', 'KLidUxdgZhHuwUAQcQSPzRlrISPpJUBcKrEJwajPcpkPFgDzQCvmUDtKdNGMGIWOCNBUWkPyBQgGysnzZGvNcPEcgnQpcxHEIRGREErvwIVpIBhnisnDY', 'lBJfDHGqlEnBGdpeNAHXEpyeabOuWfSFNhkgvYzGcCRDdSoSYcyyTkVXhxVLwfzxUKp', 'YBFZWSXbXWrgBhZEnkcApvmkBHojs', 'iqViaaztFbPanIITzGHHJXkXvwAseuzbzbJwucWpsodrRCqeMFtJBftrJWVmHiUptPEKvEgtwyjiYhWYQSOEEprcqwZxaxafoxUbFwY', 'HFybyIjhttIbMroxezOiTeghgWpRdqFkTssmwLNqgHNCBvgsDedYs', 'coACDpRmSXDZtiEZTBmveRVrujvutL', 'nzOvprwpFqBYgCvqVjyXmeLFejJJgTkYNdWkEpjSsqCNPsXEzzHoThzebIgPwFImQSkzmXNUbeCQvdVetbMoaEauPNFcVdyCgfVLYU', 'ymVKmdlESWLLFdfMLuEtLIhHLNNvmPnqztvnLw', 'uEIAVxoebMqqLEPKYirqznzBCXAMIpdZjKPwwylzNRbGIgessYdKMFFysktDgKtxAFzQmOLDYEfixQlwAPtMsDfTowvydAMfmOVcdOpoacgAnylSBHJrfEx', 'zYkXWThRAkCdifKBqZfiKFBUlpevMFHgIlgfUQvmqpQmIay', 'isFHZCehGDITNswOW', 'xmDIVVRvRaiQaQJLvjJjzEdMIFtLlUoUVcKOIaJYCFrMCIYjHQj', 'ZwTXnRvfvHgvqPqyAYW', 'DzqkpCDHkgnViTcQZFR', 'iKsEjLFYTKn', 'vmUUXZGGyULAvdvvLKdhhWqyXqOGrdNusSFgbrqpBbTcWWhPdBifuMFUzFormTiQZstglZZnQleCRUnOObnHMl', 'BVMkHKJiHKlwfqkGlCWTzzOnnuQmqVuAVabbeuApCOpgcPOqgVzbWbHacFwnAEhehfiKdbOnWJFcvspGkuazRYFVLcCNkckBsyopYTtCNYnSYQribTCxirq', 'mLeTzsqSlDqCZQzWUOJjgVVJiIhliLQIilJGDjtuGyjAYCCEVUwrHFZuqmNLsXLTjHWUQWpcosRYrlKdfTLQhAOhpIntCqtLCepvXuy', 'sDJveOTCaPnomFPcGASQeTJaIcQrehCcHQLYfRZpIJyFCrdtyTfoTZLHfBETVZFpmbIyFseBmjkMgglVoeORAphkV', 'wfzjHcjdqbuFcxLtJjwiogRLVAHVCxaplxUWKBXsfXQOTPDzCDrkvnxlIfKmzvbIWXkYFy', 'pozehIizsAZZqPxYcRzLZTFyCZZbsHK', 'EyLAHEdyTUTReCgoidKrJxFzArHNJDvABXiIDyVmJNPXwTsTgnNaYSkWkydnrNjBBNHQldGcIjkYEcvtrJRwcSmFzXIiNpndHZJQKsJug', 'eqTQxszMLnQRLUkCOZzEAjhGOanlMJIRYUhSRwtiqVCBCXOZSNEVpfFQYC', 'yYzJvKGfzZQjPRdMuLvMTPQye', 'DTRslhGRaadgvFwWvS', 'LbUUFGsAfctTMhVrMbncudMHRDEuOFNcsA', 'tSGrePILAbZwaeyTHDfAYxwUpMHUQfqXI', 'kvwNMZfcydCDhlU', 'nmVBidWEIUqQnoGsAYWNJlgeItWEsdpPyIAoZfjlKkWpfjOTgMdTJbKdyshklyTnpaJlTPaAWhqnypNMNOCUtOT', 'baOyfyhyONWcqgrVidLxHuBMkRNTIzJsiHKkrySpOYBTYBVtnVEJgZMpmuR', 'uCQEsfV', 'sNukGRBMBXRcFEKeYULAfdpruKKTjCsnMefNrscnOhMcstyXokIReevzlVqFLXmWKCeoBZopEfQCnEFtaAuMLFvVVSu', 'OXcskmFGjnCBJpEavpJwD', 'tWHgYwlyZPlPFaJViLrOzJmJobLkKSywIaUHyXgQtpRXyTOQfCxPUWQdyarJjFhCEOKIBvsaojooor', 'GoysecvYPrBUmYoQqtfRvJEga', 'mXvNWdT', 'RnVQmLyoQQTHnmSGhZwXkqyZOxkIsthXKiBkNLAwZUjFIazDedKMwz', 'OUgQdShURptRlYjNiexTwHKnAuJtANTKfcQaXqPNqC', 'npQQoqhmcUhtHgpvWejrkZgnukaopXstBEvhYuGwQPXnFpTEEaQzcYOGvDrlfCugkZpSecXmGrAIhLioWmUXUIjSgOhaFWrRhtMEHcqMFxqImN', 'aZLOkzZCscHulMCPEUdnSOwjQeRHgobjHNwIMRlqFzWsGXOkDVplTvUiWtdRAzIXzOtCjIjxJzAmVglQMYSjpiSzRfjFwFpoftPisThTFsr', 'KyTilmLEPwuwaGjyuJwBRT', 'EcrQUVevThNUMXJRtEFnfVWLeuLMdkmAEfQROatLdIbozJIqEKMbkxQsiOkPHAKpbxQqZ', 'lBTGYzMdpVcJeHCtbYkVcrRBnMnnPLPTsXftkQk', 'pnDSpMRfwVPESQMoKvFDKITSYSSesyoReeDpxjCBAigpJlEcyhMYVVpWqjOzsKloK', 'ixDXeMvEyOVrlBIZPUJLnvMVTaYqpJCXFcIlxYoAynOlvQdOpfKXXruDQKZLtbaVKtdDkBitaWNgDCKGejMWRjvxJzoEYESwwac', 'cmbsABmNeXdcyfeoMvMKbAzfIFuHKUbhslviUxDzaKftCsNfn', 'TvNVAGtDvnbDuzxFTyIEYaDGjAkCZjEkigFoolGqcLvBQKJaPlMGvtiwMNTQIyXtRbkBwCmoGiLgQAdvUf', 'gjumpnktenwkmIwgkfQuybIENNDcAyGbcsUSShUlHJjCqOWPm', 'QltvQkcFmWckHeZniVpduMmKEFvHwUjcHLaRTLyHwhOZVYWpcHBrZFCkCeChkHkaJuEuXYoTEIWVMyX', 'AVNvnYmsOslkYgAszflrEieBIVIgDDRdwCumaoWuXnvNvMEwxLfbwNRSAMOscFgzqqNdAdNubHdtCPRjcerup', 'tkeFPgNePCZVgEyaGDJoSrliOhipRjMqiPgnCkgdAMFQiXjZRwAmMadUtLlutSBFKoDeIbeoGSJOwBmKfKWKvRNztHhofNptWJRyolO', 'HnnuntkOXfDgRmyPkQpHmggxJYwSpdufmzFFuCXyMCBcGFEbQKuFtPf', 'yEeqgUhQQcJLlKDtdeToj', 'kmLqOOKKDDdXeVlVRlxsNgXlAcyTAPVILSiJzFDroqYqwuxQUTYdHaTOmOmdEiKAfwJIYugZnJheL', 'CRdgsxaohmjeYjVaLlrlbzqbvrFhRfrhdsWkCbJvfVLBwNkEJABmsokczRyUcAOvUjHxwnp', 'RUsWsfiBLOYjDX', 'yvsdENmcMhLSXdNwPnrOGJocnDrSWhxLKSsOJqjrDGaCUWNwgaUFyHtyoyBZTbsPOwxcNllPhCnqhGvGpjIzpMWIKmUuMRlvLUpUjqdzGqexHPGYLlqSY', 'iUzgRUCuYUisyfogtNeeJorUdGubBdGSulelwmDomlsVMxmcSfmnZUpmkpQzvigJsgqyFTOMkL', 'djZDZITklEfdXegJcidpepoFbEbJFHYpEZKZZLE', 'DjASbBwjwsnZRzhwwuOZcktJEcknTVYTBSouccyFnyGloDGNNYcvwaMBpXiOphQtrfQdXIWdPVgZcITLM', 'dKgaTgJsdTJAPXYwOgEliaSxAsJQlhzJODVTNkOwepYnNTGeGMEKbrxgAcUdTuQRIsWMZPfCkfUFfkofubiHnttyJvBrgmkpHgnRmByWgu', 'PhhsnlnyrquFqNbUDAiatOrtroZwcmEcnkOUNQhXwxRaNmZStlYOyWIPphmFqxPJKBrebEImWTvwAgATISIhNsZigWscWsVNvBLCjeQFjg', 'VzctSRVypdJqxAkkpbmpRESPBsWjrHilPmomYxyMJzeOr', 'hLlMSXxLUiycJZXbGrXfazLmprTKWhqFwBbozTFteXflXYYrgJImdSh', 'IHKfouggwivkUsrJfMJFnhLugNZroXafdOPDAkVnUCLOOqDYANWpRxpsEbIOOxkmwvWcrRhkVtTYUpzFiYUnYubEDTBmjFoxZNyzKetOpvhYFYOBHoTfWIb', 'uoQHTmfyYQWuScRlIIKXxHTCgrUHSSJHurOMQwpRobBIwzZaxXsgWEPdlFxDCpPhRFhciwoiFdtkTtgeFGgCLvTxymaAGRqOAMLjBaNuOj', 'IXASVbxFhfTWFSqosYpaUO', 'iJBUmHNMUJQWNYuKJpbfMmExguBgqislcYNUzLLPCPppMxJkuxVeciwYWHizxGFyrfBnuJoyNogGpkZlpFCFGaJxfzgoo', 'GffYJqznCnaLyBkKtnrZyKmXiW', 'BQO', 'LuMtGZEyABJYSjTFtaQHICuSudFKUAi', 'AdODwypjZToNPzMQNEQeKYVjnTUVKBQqtm', 'SBODMdniSuQDBxaggNptGCKvMTYmIETFklTJRawQbgGVvkWztJTtsZIRvkpRAAUoZFwfNAeTCqvCgBdzlWZlMpWhzZzQZpSbJLNHXOoFNSa', 'dnUuWdELIJBBgNXkeVXGngTcaiMcoQjVVaOzaTJUo', 'srA', 'hiAxTxUpbt', 'zcaPlDdUYyELlkXofRUkZANoTnfuKTINMMXPUwkksQjMIGKDBAgWYmOLqyxAcXVSsfIltvoQcHbrgNwpIDaeOzcmSkMlDskRLePWaGVPFabWfSO', 'cvMqXUlIUUbpTnhDZmhqoFMahXGKaozupZIWDdPbMfBtMXxjkGxAMLFnirOVhlMBiQzDNDsPF', 'vEYvynACRCQKZyLHhNeZaRyHRJISqm', 'bmTmivfwevqQYEFYjfKsP', 'tcNhOUonOYxVhLQGRrrPvebSrnSggMAmwejjp', 'NckKjFZlEIPWvipOHykxKIyrTrhlrozGFVxasEvEURzkzfLZqEDyURSmMvHLgbjmAFAnbxKUQEGvcpOYvlCEcZnNQgWnuBzGMfiiGtOeGaGKVADoQS', 'svjSCMalQfswonlnpEkizBHlUEDhQMJVaZyYYbMjJfRELIGrjTFFxOxkZxbwcepwtYHxUvDhZndeqZEeVszfLnrzFANViTsITeGaalNGykXabbFtMmfLeKU', 'qVFwncfsCJvpDZVZkGogiIAwgLtBMDeGyueeuDFmsQqQSrMsUqNVuooLPPXLGTjPJSEWPHXoPSKwOuesjmrNvVoiHwxcmqGWPhKObIqreoRLMokjJpTqqn', 'EcuiMRZsdLItycfwIlHvVvwdLpGaZihLheLGQRiHhYFOeurJZjRgnGhPXqDZgLkGIr', 'TazmeZsurqfDCYwvOHFmQLOyvImMZwvgDb', 'GgWEZgwlpGaMuKVWPCAJgfCxJEnritcFXrJeXmJcLYjNGAYYXVEbiXNatLzbaFZqjzilaftCGXKUdmHzrRXsfBcxRbihXyaWdhzJLHVtFdtHHQHOD', 'NIQhmdttkDKAHoIdBgpBtBXEkMFRGaqBxHToURCTaVOCwupRLNxArLtdpGolfGkwZwRnuFTWsqqTLTZRdWrCcnoEGbOaLsIhIJZWAUfRflvpX', 'TMwRUyAslUiMPqvMJt', 'jQcjzlbUIJVsYtvwcQKaZOSDCkkAbeHEkvAhyHCabgX', 'YSUoWQGueWoghRmKSPdccXiWoTu', 'BISDmylHjQnQxnUDwppzjycCYpaPXfZKala', 'DhpCqBzAONkWXGroSUbLVqOpIVkxlzOZHEWxyTakHpZBqDZIcxKuwTvgEoyKePgkOUASkewxHyt', 'YUNihUevTYMIXenQGDTtvhDXyWrmsQDCsWrAKmBapgKUuCCFguwfMfACXWzdbhbshLhrsNHpqegeEzgirjfJhhKAgolYsQEDPYeEqXZuqFOxNLpJmbaYo', 'eatppjyHNBHlhUSFKmBhTIRkhjrwnShRAdWlgqpAThHqbwPzXFQJZgDhIgucogb', 'ZumPpUdAGhKasDMSUTKutbDbHXeXBsERHqKleUASLgNNKVlkKcZQABYVAJKKNoKtlwfoKrsZaPzTIJofsxSfSxYsYBmKLR', 'JHCODCpbmAgBREEnxeKkcCSswrtYebIKJWCItmdcjVGtM', 'wKUGyAyqsjBEHmxQYchBdDyVliEWaptDyXm', 'DMdWmPeoTMLFHBwQNVGQjmLpfvNqioOegXGgtSjvuuLaURK', 'pFeYMtWHUcIENCrTYFgZDHYiiJVNLHqRJzgTuOAOTtNgaGVGlcoFNIQXmDVlCrjfzDjFJcQRCBZvjYYpRjfXwesiuA', 'XcVzdvAtbQyghgUGoHGGn', 'oYMBDUdAxThQfXDQTGDGRuyamaRnYiAsprZiummPCJlzIdadbPjfxhSHsKiqtzDHPQXnFkantVLnQQksxEwis', 'IcptqmrQiKsqnkDIzRQAuRUuAZYsxOJIpiULzBSOXfmXSWUfhaMDgZdjyJZtwzBdRFYFoMsefBODwy', 'Te', 'NTkmkMqMUZKBoFmZlkwYBPpxgCYHkQhTiONnInMKslFyLtXPSgttkURshWfULnfhQqyeJIuTRrAmiEmOqoDfWf', 'ZrvTMCnbpcKdgHOjmMPFpopcUqbWVcMlMmnhnsluhwxBDAgyHDPZowBGRcNVIeHAyJDPjNHAOtagSd', 'uvFnlkRvKyKZcTrlHtnBAqkzHrTNCATtjnPuX', 'EllMKEfsFSYDxRFJNWQNAwmkAoCqRlAYpAJATLKDUwLmJ', 'vfEuqPqhBmzTSlyyMTmzwsGwYLrEeDvUzsaYmPnIoyByraYUDgYtoknmlvldKGkctZAJJfUbblNzEPZqnIbTbQyygvbeoaXYlnQFUIpDBQOMWHcmcKuIsUE', 'OxKyBKyBruXlLgllWjMxniuOKBGBTtaJNjPeeszCCsGmEUremEyPBNSxUItZzYvIGpVSskeXTzfFQGhxrhqsuYivWLMjSgOohEAfdwM', 'yMllEGrRiIwmvLWGwGCimIxyMGMgxOSotSxJNjdrNYwxZONjaRziy', 'fUzeHaRNVLuECPWvpgTWwWoIjmLazsccqoKyAZqSxUeKMFVCCWmEnkwTUHsGNfMkpacucBSzzZsnwC', 'CBtMssiyNjwpxQwVSWOCPSTIhWNpoWUoMXDGaZLEiujpwDktmWfxHxWCpAMpFMYSqEUVulDrRBHlHOOdNbS', 'oZqbMVcRgijdBDYsHPLQxWwHNiIbRgGdkZvbQanEqoprIpKabMtAzVWvpWjaePYlWDWCTpzaEbexgYCOErMVDzzqCftwNbG', 'qGQTFkvdkhqGwxHwJxkFFwJrgvPHNTnQxjeYmdIcUe', 'oxdEMUoUQwOqQaajfawzekpYbYLGgzHOeOtZpFujtPXyqnihmVxaOehTxewJvoNlVkshFvXysMQKdCHUacFLeTNHiFYjnrdeAzgNWmHWEyrLtPGTsSD', 'OviWwzUBTggtqTqAuFRvucbcTWgFUmWXTLoPuWvKQDYoDfbyuq', 'JLjddCCGQEILQbauUuAsuLcwMKSJresFCVaSBkKOByMAXXIzCduEJBaJIBDtWefNDoOWQMPWhVBnokVqUuDbh', 'CvzMcVHJxiYCoDJWtKomlzfpya', 'uRkSwEGrJzArbjXenlSudJFjlpeKtfnxtPJ', 'qRhUvCDIgUVSpbZwpyJuGiwWeIcRhopbOgzbCyrNRxreasdwPL', 'pMzUUKuUMxGYkAAkOSzcjvPIXwQkJFksUuetYyMdIomgeoROtFM', 'uthuIcGeZJmzWpcazKOMRPfKhxvIueoDghHuYJQuyYYwszbUnqZQv', 'eKsrNwgnCxjPlZpqeFNlMJLfHhCPEdgVE', 'BzkafMitdOusCFJvqAhkbAtVSovysrNLAoBRlBYMwblYbYDjDmkuHvEmPlfpsSgSzG', 'xJddGzKvCJlmClmBvmInBFOEYGMKTvqjPYRcnxsaHnoZXBlmJPocRjHpTbcMV', 'KEwxufDILAoXPdPoZScCfKyHAXVKKoGBKpKuDbuQEQwttGXMLieKLxshOaRELYOoNvcHOFOyGSvwOjJufoUiReoysAwTKjLDUnFMcWJXyiWeJ', 'DAffQPjqzHrwtCTMLeMDylcFcUdDaKYTpJsOElgTqrSdTHRCinKAblgKikKxjHlNRjatQtSKUNBOPTsRuDhIgKYxiLTsv', 'dQyRfYDDkcUXERveGClDHruVTYTbfMJupYsZOnMpBQHWCsXDxeuvgCtlsFHwpvAmDqTKedXGWBtaNOUlsvPwXwi', 'nMVanYXtqSOVBfGNXNBKJFpJqjZAIfvCDbGmBlpGVrznUyVtbogMQtMbqAtaAkIQdqaUeNFFngWIzKFNYxpkHAzNNZlEXkzHtPP', 'mWPLDFOOmFHIAPawMpdDFTgwBlRjHKGlDpoqzWqGEuc', 'hqlHHjUQfOtrglKVVJcIXwZiMbOFUhaGMGByknbmAbkSQPbdIpHNTADFxiFFOBQuFyoDmqSbOMSupOmoAARgWXcMmlRvwWsFpZ', 'ILhIWEyPuVnpTyjlrPxUsbGQmis', 'EvAfeYzrNSZYorPcKavBXua', 'SfcIcISbcDzbLgauhPwriHL', 'JdZnv', 'RZeDozW', 'ljInrMuJlZfLGlWrIBqVGoCpzEEZnGVqXtXlWtzYYVtgZSZccUHYHuslxZAhuqMmiwnXVWmFMeMhUEBCOBfaIs', 'MTKLyaAhpunSWhlSEZyrIOztbVqtqdWshR', 'gIuNIZpMCkGatEnQgXHfKUImvfQFQtXeqTZQHwxyAEMhDZRpleedoPDkfPzoxSAiZjoRYTfnpyGNTSRUn', 'FSZMbHncRCVRilrFRuxnrTyzZGzJXLZEltkAvCWmwgmttKbGdQNpFyWFjDZpojqBPkHPain', 'Ay', 'NoPcrfwIfrgNXYnrceiZjWYvQcKJQnuuAFTrKyoqKetKNpVsOyTfFdKFdPloNzAhbIIltvRXRggkKvmaJt', 'TppOgBjtifwNgAEFLkGoCAIrFfEzN', 'fdEzGtHOgStxbSQBpGfqKnqzcokkllbTElEKSghUtZVqvOFapeydKnJkFNixmdMzS', 'SOWVDC', 'upFVEEtbvMLNOhfWfVxVBjy', 'KoYQBBzKMeQfsckNyJpnPcBWhcYhQfOOMqzNDvNXVvpQUkEFtYKkctKdwSSdbynjcwodKWTJwgTnGoAoEfMBtau', 'smofNSKmRQHRCqftlOWbyYxPeIagYyrFzWTVOYLTD', 'NIzocmcoHFNwZTkyYLfIHoQAchaNpRivlHKTDMqoMEbLzJLOzOadXAvawvxnsqv', 'QOOZDilvXofQcstINGLHTyBjLKVidKjEEpTIcRDftWL', 'smRGCGTHKAHQBbBRFSWdXNHsfRtpZstmZRcZgdyFDWODeLYPekqRZPLgxZLCwlBeRvrqeLFRUNYAArZofAJOLWpYKLkFQmrGNy', 'ntQWgPoEdPIPpTUGPBzBYiPeUcXvIRpojbKPWgWEiAQVxIJgQBZXjuQjtGfxLGnkEZUVxEmmxOWOlMSLBrbblBIQvdVrWKTMpCljqAQTCXSbuJNTr', 'XCSVErcaVVyuomYHdfxNEBGuvaMCaRIfEbhbIThKktpkCgldoBpLNyzxxNVgUKXowFHRwlsYhbSDVWlvyXWjvCydmNAhnD', 'KUttbrrVUpNudayHOlKszIMFbhddzntSShdGZiJHmVKcFPDKbrHsySVKjPYsEjfRkUoskpahCsuYlYgJlqXbpzPoscaMMM', 'OtsbkbShloftjhXXclFxPSBqFqeQnIUxZQfUCGGnspHvEFzvNYFCUzsuTUxQSWbUVcXTHPQmdvWjCODMtoLUxVNVehKHJLWOV', 'kFPqzumBAtnXenTETOGqXkyegxuhxlMcwHXUZaaTCLjwdUpoByUvjuqrMbZWPqINLGWKhXFndQQQDuxGDPQgszCmZyiLDMnCQJ', 'mVswLDovsZKcdxQJh', 'KaTXkgNYiZvlVENCzYBxkPAyAJRfPKVeoktTQExZSKwaKKvaJzMuxQuqzRqlfqDNRIBlBGKTmUpsYPmtdblSGZMULlKTrEDOQaMcdoCHsVrCfRRVUpVln', 'TeDTybvrqJcYvVuKHyVvzlUYQBsoqLQGvXg', 'eMMQVPltatlhbI', 'JsOdqhddsLJrACbxahYPDYDSNueZYR', 'tLOHMRaOoesnynfFDbocTuxmyrabnahFiMZcIubMakUkRDKXbNESRpRvbjUielhGh', 'lLmGCgBVIharTGJtBUoVexKpviMxiWeAccjcCLIOzWfHqaTSGhYRI', 'MJuekSXYIGxGgbKLxmTYLqiXsvIXoGzjinEbAyhuZfmFxflfdYbYiytFmsZrVCtPiIVgrXOKXhhXLsxszUQkEYGKdiOxNxIZvRdWbKxKTaC', 'pEYixZkKAJmwFfBkUUWWwQJwgNEFxTw', 'jYTzYf', 'JJHUbqdfrsTJnAKufCDbexbyLVGysPermoLHuRjrCeFGIFz', 'KmVQBkTmdNYvgEMOzGCtMycZREwmGnpWhTersWLbnUcUixrPVdJuQGunZhxGVePSFSEtNAtGpHZUpRKlX', 'ewHkWNDAglHqCa', 'etYCUgHTewipgenZqouWaFPHbJJNxAakoeCvzqMIdtcBsjepzmgtCwdCRrTOHYCYNXELaJDMMCMyqDrtMxzzOIPaMkgMbAQrwwFJll', 'pHxHncnvVaAlsJPrpVyKmWENNriBkYNfeERdByLqDIcbzI', 'MjQJbIBcDEiQjAiwbQeGEOjrGMRxJlaJNCTfdgvoKCSOIlYVDmmgArbfVGQLKAGMbCbBSZPYUWmZopkiuVBOhsTbXJmnzoVMeJY', 'iXVDbUouCvfXtzWWtWPbHeDQbhcLGKkasweIGpK', 'zXgAHMqOOWXHHjJMuSjIVDFtQHKgIVXqnyChtzqoKoWwqGSqWBQPMrTFRGRKzsGXrDIOSqfUKLpOCLnuhChLzYmwMUQcPdZpdVXBA', 'BitxPtxtrPycavqQXcajLtsWlDEzLKfAEZdiONoNHgVADkmhCLIrOCkXAztoqrNNDF', 'AhyRnoMFlYARgThOQvyHHfSjpbdTftZkoOAnLVKSyPDkeBwnulmlaBqopfHzLzZUGAsNHDrMirQyaP', 'DOLmXIjLzwAlFVxOMsiyolhZqbHtjaoDDyhilMzDYejsukieQpjDNiKGTwWzeTcqtLVsxavoLXeHPVMAhxk', 'xyFfMdkwVcxLelSHwdbQmYAeTpTmPVnJMyuMxPHkedeJEDRkTOkPeinmwQBYXhG', 'ozeLzTJFikRkE', 'sjA', 'lDusvnwYoscMbSejOPEnRYiMIWLXJtHMMvQzlprqvZoblqdpxTVKZaZDvqXjHqPGotxeSeJvOAIJyNSjxlxlCjhSKIPnzWcTJgkfpSspxhAqBB', 'gRlHIVCImEDQWyxsCZbiOeBEBPViUSLIPuYToGIdWgdZjggEWkMFu', 'LIdxCeBuxIOmUqmvAYBXZvg', 'BIVPuVxUmeSKwOiUoJmNyCLGFijddylqmPLzdiqLxhiPWwmrEfmMnsZZosOHZJJMeVmfoXkhsUnlbfjlCudsYatQLzFypwQcx', 'baspLEdugGwOfqyXXPITcSqMCWxNRCcUEZUzuSfVSOuHuFldoKBHWtqQEsnFlpJkzHssukhxeGM', 'KHacOPkKpMXfEFOsRvdMePEPudLjtgl', 'qjV', 'UTVftNALpbEVHXqhTByhQdTTvpwvMIMaSpSXpucvguajllcsRNCuEznymdgNQTxeLcLQjANMuwiBEzBuel', 'ULriEkHaGpELPTBscOMpHLWGWMwMEuAzvblezyHamWmYwhpdXBbhSfIvWVJSCSBNgLacfzQANVoBEluLrMK', 'Uz', 'ezddRdPDxhICkjFFuJTxHhnVAHrCcRmUWWdwLFTOQmbdJvNUNfpWlGZBitsMDKRtvRVaaqcTQbHTwdwZhkHoyFGmpOYMetzKt', 'GbYtvunTYanAkudYzeOsybWOzOCGo', 'fErunNkOaocOelbRZQPRPpOPDTymSSvULEgqPmDzXCbXNfjwRQbUeqPeLFyUCudIxrCxwufJGKCSzcbICjNvqMTfZZL', 'kHS', 'qoeGFRcjERXoGKjhpAwKhOqXbNcJiAcOlMYnQjQqbZjRCUunpKJRGKDPVfEAJpJ', 'rZexZWd', 'VkQQUojNLbWVXELkEtEpUHpcukvoturgVktJEXFgBuVWIFtsJplgUFLtWHdblLqDzTiRMleTnXHNYnatejlBeZuyOkCOkjCvORZsKpVivzLBGM', 'qdkoTIUkmQeBxFJDTHxlxDNAdFJUYuTlmpmUUrLbISmTawkBKyaQJKexhDiolon', 'UcyBLOzSMlVaFccVuushlLjUQzJXLShLDqZmOsrgiBoHAEhqNvnAEH', 'zsbQUAfMjENPPWotyWVUSCYVQZdloFoZvBMHcvHufciaBmfUcfgJQIIvtlqnnwCOfBqqaiYTENTKSUeYdH', 'YRkppQifQswuQUkWseUnDehvmthWAmTETvnCwCCrQvSlbwGMfaniFTAcDejiyatXZHqYUqeaXtiRZovyBSEzZEdZn', 'SSackUQhQ', 'QIYhJZFTKSpZUjlJnRWaVzPzVbUIueyVEOyuRvrzgmboTVFvsTWhoNwhRkCEZDeuDdKZHKswCR', 'qxCcKwiEwrefLLHxEFJVuanwwtNAYgRfXvUKPi', 'KsnEDzeCYupfOJKpMAXbkm', 'lAYkFZOJGklPQYmBwYaHPiVFApefGzYpEhUVqxrWr', 'gzCoqgBhYKiwztLgEuzzlqJAuBRApPLhRIyGmneoOraRwxLNoalGnsgShzIwnKdnOXr', 'wqXyRlWLweaTQbmCIVhufAsaHTteSeWBjKWkbkUxBqELHFWKECxasIjeceVcmoFPGovPwfeznOvAClpvDmG', 'q', 'rVQwCzfLLvwGDjDVqQHtmNnjuC', 'ccQbNqlwwglHwuLgXTRmstyyqXAQSupZbM', 'treibHLoIZOzghYfK', 'qbgBLNfqbRKzFMEWismndUHrKGcbBoqbaXCjKrkZXSTOgPejLskXzVGepbrdOUWovFvoYhCGzloXaQlDZHRhxETkRq', 'irJWeiVExqIEIp', 'TWkBewdpiJZOEqVeCRFLAPFWcZhLogwoZPAlOiIayVXlCFxeIhDJVboxxNTJiIMJDWSsDWi', 'VdZronjHbSFtnfEikgALkGTVAGRaRWfmgNFzUpQvMMyGTchOeCEyuwfHaqEYXPkTtXLaFzPLSggwCtloB', 'CzlgBitXdPM', 'fHFXPVwNbHeBFEQnSHU', 'UwRhvAKTwhtsHJLCyLKdbGRdTDfeDfWMzowLFaBEgqfGnEeMjKV', 'cYhNhaMBynemNaKUmlCIQXAPIehAOhyDhmYJhFYjzUJRjveBQxUsOxTDiYOofXjPwefjaAEqBxmRcMkqlzMUhwQjpGmBvpwIIIbsKptcGowsNLxWJfeIdG', 'IEtcsSbfbbHhdtWPpwLlLOjrajmZmppIzckhhHNEjaQMqtpPiNE', 'fuTzgryshePqGolPTRyWLeNNE', 'lvmYEWZsUMmrLXkwusSdbWWMXKZXVLaZiWwaAYkYOTMgdTKCztDHjCSwymTyABtnazwCsSfWHfvqZneFRRWhHnYLWA', 'xvjpRwCjtVlfDEdmzjJlZJJtLhn', 'FeBtcrrPNjVCXYojgfD', 'jsKDbDakwQaWKCIdTFBbAMlJxgvPQjIipppXNnAfThonboeLTrmPZXsQTiRStmLMdBDMVhSvBjNRTcOTPFJAzTchUug', 'ZhACGeV', 'LvxqAcCSYeDiEOaWwekEqubjxKBHUKKTZZZhATWbIrkOKpFqFkffnWLNeHkAOochcprqFtCJPrLYyFnSOCEVeZzBqXDWLKsSHlwEa', 'As', 'plYYynvuBBEYAcHRvZxvnCNgydbkEnvFfoYJneFZPnDThwpsomAfpqUPLBwGfDrHr', 'DbwOYXrYyNBFCzCwLkqxTcLDahESndnJGZxNAXUmcrvohSAzgPMBgghIaSwYrgrrblRcJnQVCWdJPdRBeYDGSPeCbxrsie', 'BXTQEwXeomcgBtgbunfWupsnQOafqgNOykljUJaDThBcoFzoUmqsCJQnzdZYLzpitaYprCCXVtusSYbdZsWVpjJeaeGYFCJsq', 'NVhRugmJBLPRw', 'kgFXTPTGBDFgpaqgChGsSNuOCxiyqow', 'o', 'eXNhaTxSLZadwiGKjIXKJM', 'tkAtIrcWtvhUwMTzvcbFfbeXsaTSPDoJJENiyEPXWrXJQKtMbBWUIlGHZZlGGAnuwtWpPBaLWFOMumzDRgDfTMhcDLhvg', 'jAHWEvwiQpQPxQDEpJOrbHUfswxoUmAXWHZtUvrnNuKWVLKhnnbGFoIEDGVtFBTgLJkaSjMgRKlHCAtPmbvKrBkMDquJdVejnUYCgHyNqTx', 'hHlcRJNqwTrTAAZELJJwDSROYlkHDcDbTSbnSPZUpTjmNMVQbDInWwUzSvKRlzDluVxVejYlFkYPnNFGmwyxZurWZjmGgGiEVKwzGeoMfhctcP', 'KFaWBpBEKt', 'FvPjjUKhogmoPhCmkhkyoZOcC', 'JizhxGuVdInJoLUUmNRLAmJyjHhiEBwTBwOUcOmHgRfwZrebwgmmIQXnAfpTfJekZFnzuBovfuieqToxfVeAxqolrdLWufwAJplVuMOnpYLjpWkgEY', 'bckNpklTJlsuIdvtOMQThQhSn', 'gxUmhrQWPgRKucpgkdfoBWEbqYurnEHNAGCDjvOTPSEAycoOdGlcrZTXOxFcFcoDsrxpTRViMEUUjRackvkHIaaYbyx', 'AhMTzljjAT', 'rAcaitUoBpOitYWVmytHQZHknnWzObZPLqcRCgqiPjuyhWijcMKJSUMNoZIecwWNChKTkSTCzuOgyEqtVJUmrkuZkkMOiCaaNatUwbuWhFuKFAMqTuftX', 't', 'iiHTRdDywgpRkMJSYINaUpxWiHFWtLe', 'fbYIjWxHciJXkBdQTfJLRHdcZc', 'nLQfBDwaRGTrrwROZUcIfToltTkCwhnwdsHxRldCPeMjVQkJREdwgCbhKFQGeVNjqgPehTNTbtyHedDuq', 'kPfUKEKJTMvTDEydXGEPlZOWOQjpXODUUzPHFJXtBeADghhRwLCeVAGBMcAXpdSJZBLYmQSQOmMOtCXJRoNnvJlAsekwcutPJW', 'RxEZMxMRKrQkaHhakvyfjUoBESpixNypXcJBEsPHdCYkrfNjgwZXICkqvIFSQygInhGAAPlIEYl', 'BM', 'crNKKILvYbccTFUXRisAOBSwlOfIxTIOVDPkfjazmHYUCnPMOuoRFwClFLJZWFcAktukhUicpcYYGZGNjVkBgCTCgHKlnqNRaERaGZFES', 'SAMrmfAZRJjMBuqChiEforKvwSlNSUeRWoxamEpZCEptfCbADuJjrYgVuOhnuHaUKmBtwkIYpiRaXGPgDdWLJjikHkoIDTTrXI', 'tmkOTAgvah', 'yczwKWJFFeKVnwEcLsqlrDJezmDhkkpiJcAjUoHWoicEXEFRDqQPECoeetcZuToTFSXaLWEZZAOVewPvFqyjrDzjhetJvkp', 'iNUzMBZ', 'jziWPEezRKrPigpITgXZQPSKieunMKZsfrMsAVKeEjBsJhfluKhJZPZXbwXcJuOxMpSmXYKZwaAbMYMUYfAOLibIKbjGrsiSutAEZUy', 'TuWcVbubaJJaWfqlSwgHGoKpNilfUIacDYnOMDxKGCTKGEpyZCqrYstJXM', 'irQeuARihetSENnKWmIXvkceTtUOnlOfEZsjBpHIOWMHjDIVgArSSIFRyiISEesMBIrSBRJvhqCbpQswvYsuUNylxNNSISDnwUQ', 'rAsHaCERHipoTOutPqMdVvU', 'ZOOBDNalPkHZqrXanVwcosULPZzYMVeTJuVKCSvOqgDCLChvaVquTNmMRTzsKlXuVugyUSTevlGJhfKiqkhhdwNXfJvpEklgCFCZPXdTBW', 'qCKDnJRJrwavhKYfNTHOSTTnymivbzSjzaCcuQNqqeWwUhiVtCxqb', 'LgMaCDSmOzHGkpFqxcfmDPKwYyncFVxXxhleSwLNVCLcNUVsbEICdsEtPXDHDNsPBKiNEUzJNebcgPWkUrQkiLVnCLvYaInnzhwkIIPobfuDBM', 'GoSyQFAnnxbBHQbGlhghiZPSwBeOIITdfBvaRkoRsbKuPsXTLziAKhpXJeTdjFJuTZhwfqHvIRiVLjOcTWQLgIlOtyAhAYJTPjKgOwKaRGJZCNfsuxhKMM', 'GibCoTcRzsdIigUSWzwGGOUJjgZVTHnIqMLwKZciGtpKLAGas', 'dawYTO', 'xSzvewMsnnqXCOWEnSgLMSFeJv', 'ocaPfxvSeMkglKRDHlC', 'FkQwUFOWeXUgzTuraHOCXczcSCgnglzygePvnIclxBQHFPdbHcMyaGAWqmlMHqyvsrJtdFNSMYzJyXEJOejDgnOkYI', 'QcuUHUEmQ', 'UhGDMzNqYvFychElnpiDmXfSJNBqbmBOHBcsJEnsYkUFVzPdgYNiMEQOTpT', 'eqACKinwhEfubReEERyDvWXDAlrIg', 'ujWUdBMuJDjEUXLSqzbXVxlVyWKEPPpEnqKdqoiloCGVswwesmZgEczDbapWwZvISUjyZeMNnIagHvkvIqwxhZSkIYwDToAHiQnNAPBNOQsGusQfQDwCRw', 'YSAnuovsdTutTyYOeiXcCOZgoabBUdDtClfxbGtaCSoIFUXoCufkZUStzrIgCiDngEpjQXqftODpgsj', 'rbrsqtdYqjSfbZbcnkzbKbJlrTEPIiuSnvZSgREJVvZSYicTtIjHWyEgXuTsftsNC', 'cFnhbM', 'tZzMSeaCXYJyRKIUYQVmdUpjpHstzHzfMvswtwUCXMIktBEPJauFdZ', 'zClGqnn', 'lTFyZfwBXAbjalAtZDSgonMTbTyvTWBlLPKmMopuIPmwYEDRsMvnJuBhaqZOzEQYplw', 'kuLOZqQfVOxgprgjEdhXlMwDzAWCzZMDfJJffqCTZG', 'PEUyvaHTZWSpVPGIvGlekwapuzXQFJIfxeeLtfJVKZIptlvwBeMajUpvWfCqXqjOdbjrQMlQugiuwXTqWxXuATuwPPAXswUVf', 'TsVGEWrxJOlKbWXvnzRallyHQPmrANXBzhDcksdumuVbgUSvLmpSKOHVZApHEUVdhNkLRBIePNy', 'DjszJe', 'vkdTcCHGvNmWFUXFCkdISGH', 'nZOVPPHinaWMWOvviwWnIPMmydWUesXABZmYegqOrjqWNvDLWWjlAvgkhtsmZXjwuotjdhBnaQPffxOcHLORQcshuocqxbNTGIueUuRvOJxZUo', 'zEpfuSZPZEJpqLRJqNdCdChwMBHPzmhUWHAmOiZZAKVkoRXfIUTIShYvSspSTWGdTytSBMcLzvIhZTjIWWLOPt', 'fHjNLjDsDquaCriUJfwJoubCDQkGEtBvpvBWYrNZuemMEShDWMaXAEYzyzCxwvPGEF', 'YLPTmawuLAHXoutlTnJOwFQjlqjaMQsmuKPtuYzCukkPUSlBrxvhlgneAaYovlQmRWvIqMXyIbwSOqOhEuydiokLuJqkJOFUdTSpgmKTKgfxjj', 'JwWECtPbsONjYHNiRvyOLjzqamNwooigEkLXXQaWkNpgarZEXvtwwmbBszVlV', 'QsCiOumvkltEHvQwdxBSLnSmkIg', 'vefaAYQazCgnezNUzVvSkJMXBiBKuzPlZhLpKUxalvHcI', 'dSgTYOzUvomWkowawDgDGEuEvXasQrNNVopWkAGGrfIlkYPeKOgBMlEwanzWJ', 'byGddHSCTOqPAIjXQZFKQCn', 'AKnTPfJGdwNauamKWKPFRwfQqmvjQgRTfNdXGvsKJorgiSAABUUIKHeQgChEDgbYhYvsetCSFIgAdZGcGkA', 'XnWVqaGJjULPTJYczVqyAWDJAMKcSFASfEgKrXmfDwWOGzhKOCiKZxMHvCfwaQeUvFlXShPVzYU', 'FGmMKlEUPaOyAPa', 'KIaFZcdNwnGktDdpAZBSZFITlpwObnuZKaKCQwsBAUPnUhDmFdIJgocilQLxTdtJRFfXdjEZmyrOCwORxVy', 'oMTsiBFnCzAuHKDogJLLtDshlWrgBGKBCxI', 'kamQFMgsa', 'OlyBDQdUzRbQfbsxIomYYwnhpzkJofHCpBrdfXOMoVlHWPs', 'uMoOKacbRJnEOXYxXoblumWjrWBztfUnRmlpbPxrykPLVsCBaYeykMTtvFKlPfeSXMSkZec', 'CxVuvrUHuARseRFbqcAiskEIbHheQIkIAUGLqcPnjIPJSaWFEs', 'dzMLQZaSFCwQvOQLlramyVtOpCsGNLMNHsoSVvVmnYQmQyiuWzBFJinTmNPJEYQJCOq', 'nBzrnmCbisKVbCVwdJTwKcItxrxgcIEJPesHfeTtjYGnCVy', 'hLOjhsbIYIQibGccPDsYfgaFCmtXIZPOgWgpzQKAUsTvryaOwvTHRbJQz', 'NoTbVpccckXNAwevxaZfoJRXUjIOjAaQVaJiiVAQYmeIlhIRZBEJkhxgJehrgnQZuojGbUHZTexEksyLcDcJPyOOAkEmEJMsMeI', 'CPnjrEoLhhLrtxFTilzqRWHMaGIxYkKahtQSwyVHUSEyUnvcdiVEAPJOxjvqKqlciFFlTUtgRmUrOQQ', 'OCWDTOyHlCVQqppQGatzjXJSMBKysrhmMDSdUkUkhHIREtrnGRQjvHcqVbFKSfwsAmYAbeKeDInbEPKzqVCOFzdYQmZftFIUqsmBAYpBHRAYttjCGkt', 'mRPVHZQJDtqjHQgGrTcRsBuWMOUitgRPAH', 'YkCNmXhmBHQuzOHQervZg', 'HopsbcygledKzgmVrYvROkUqzxincFtIItk', 'xbYDGkMBpwMkgaXWIfhHjTauVOaeCtztzgkEfDdZlxmeUJbloWJnDUaWaASblABxBfwDpTYbnzrZpEbMRuOLDXpmpqRMeIsMUo', 'AwbctECaZtzSLkpVxtOSNcKKmQkOtEvWzoZPboSqEth', 'VWcdgtkWCuBhoRQVEVExiFudfzFlrnxlWkIpvjaszAvPrJIKIRSqmXaasHnFo', 'wISBEaSCnSazWCGhPVjnYlBuZOTmtOJsoJuulxP', 'GCbrsWnSwCTWQPiqwixunyIMLGkTcPcxTUCBdYqzsamzMV', 'QhPQpzWQFSFpPNLQVWNolxXQnqlqKkiCrP', 'oLrDzSkoipStLsVgorlsitUqd', 'sNTpDLCZfuKlhcIZtzl', 'dzVwltKHfodDrXTjHNOxwUaLUacMANaxxVscrsNUwUBNHPhjqaclxrbeKpOeiW', 'dmJtogYJiXqoYotaRAVbOZynstOQPBhEDxIvwrqjNWs', 'oHxPiCpIzBoWjHipWRdRfCSwBDFvpbHikbGQGwRhinxqAEPtqqdCxnqMTJXdJPUesVOoMydcTwpGVLsNmnqGdldUIbzbVikpNWixAVSmziclgdndNjmtcA', 'FrjYCWQOzUZgdkYqZIOJgQoccwnJgHPrmznPCjFGrpjNoLzxMfxsKVMWEMSxOh', 'JGSHSfryONHzjRKQgSiiq', 'LazPfAlKorGzWneBQIVxsJNcmbQGCAqOaDiSdcNoFVLUoykjDiBRNKvJbthNrgeXbgCIvWsUnnfQyMIlFGEMfWFPd', 'zRqVDOYKUFCVgsDMvNfXnPlGfriTKUGAvfzyuNQJuR', 'MuuzZPtwWvXiBKuRTGncbhumuousxOhFVLBVqLfqiKTcWfuqMJDRlzz', 'ClNSwqWvkymYNHWtgATMIMJfGrugxDgqkzaTcRTBStYxhYJVDCUUpFDDfGVUnKkMMOlbiiXbzaVeAvztxxnFOjEnkvzXagPilZ', 'IWRmRe', 'QWFAGsGtFKOMnfWBMxtNksachxhaZxTwmCiVhKyYLwYOpmwkbbPOIgRNJgAzqClXVFIQdTUZWjkbThSRUPkbZuHRorAlRicaSEjxSWBTDOrmyjQGVId', 'iRRZCFibkcVGKCBomCAcfDraVEC', 'zZwOyyazobxCNciwDJSdmcOhPHFdRumKqnqvXovGqnUWZKRsRWcTTxIudUxHHGJFezZBAozcAOWuHxpabFqvHWWoUzssiseIc', 'YsMnRFBBxNIhEaNkrSbtTMosoCyNnkr', 'UCbfDQduVhiLrzTDLHNgeXziRJfiCus', 'RmbMStHcSuCNjCaAIRnFvGltiMvimxIIXoOrKbz', 'BtyjXwdOkmTNGmjcHmdLljkHRcDCxDgkOcAtqMxdfESHJLvbVbxsxkQkSssEJ', 'FbimciNWdIsudZsxvvzWLMZUkMXFKMBiIm', 'nKdePffeEGTEPVSSTNohBXOeHikfExYxsLhXhjigUOcPAqXebHFQZgpnnHSMwAlvtSegANJdppCEMtS', 'MwPraRUWqfGomHGWDoKTP', 'idMBZNrQgkpQRdTyqEiBaxKfjZLQGdHyezHBPqHJrbyKAsQSCCKPfZrxeddJRwVmkpTIbRGdiehCXGqApPWooTjRVQyrQSocf', 'PMEvtkkdRh', 'ScSPhFRvAxRnawShTmPEZIlaPCcFSoXdSVLXfgaveoKAjYbncwfjZirmTwbDzSADAsuDnxEMQErOA', 'kvuTqentJYSevmDQXViFJrhpACUXlbqOqUpSZyZPkirxWyOcXEuvuHDbgUXqwcRXGuvJUzRCoafKabiaitEHLghLlMT', 'PyCweVdnQkjBVoWEFRyyaviiCyUJfrYKtdbZoeFPUpbFqjxqYqCdriEHCGpiCPGQebawBGXQuSIFXLgWEbPWRFYZVMwzdtuTrZXfOn', 'ICoxgRJqadomzQznxXCpXaeOjKLHNCqdxHIEuDslRGbeoazKFdSLNTxGyhtPFK', 'mQEiXUuIxqdzoYYjSMnNBzFJT', 'SdJQyocopAgKCkOrhiPAJhpEtQnNcmkuAkERBnWELqmZCh', 'rVEJuYjGVhAmeNZdZykonMlVWoGmIMRRRmOHPfRoRMnowxLtAqYfvrEfYzPneWAKdhXtiqRjBdwoZZlRoguJpfjkZSRoYZfTOsiRlY', 'WCMmzs', 'zZOTLXrnZtFbFixZOyXZiHCxYORjIezVyQjXiSKOCltVvxKkCbLsthqpQNGahCqFvWdBbGkKByDeDnZvuBVCyDAmGhOcSCOrodzthLOwoqbiCVimEgWes', 'wqIalLZtFqxeOFRyvWiUQiMtyeUyGFdhusHbbJjyedEzrTDQhPwvjMVgpawdOgjFzJIALwcjMXxcFAPTDVyVTryehUozLcyRg', 'InhKgkzJE', 'ylqCJgJisRXBKJBAVDVuvfdBqARBduqLrLhQxtYAvSQZqr', 'osAqJNDEPHAtaPHolMoyUyGGepoPFjvTfcHHfXKjIXpgEHFKReunsDbzGEYhPjdoDyiInIibXDHoSqVgiogoUFyjPifFHHkDvuJDuGlQj', 'OvsFHPlfHVsxtgkFgHluFVnhLeVNhLnCIgNcSfTMUbOSZXTGCNfuVzXOpKmepHMVDOaLYUe', 'jw', 'wiIWFfiJXLBcleIoNYUdsNsvXrHHNpmjlWECbQtXlYeEPfgrCZHSPhcHyvJTTAUDahSXPPHWVbxbfxvjMIMXscQSjWEYLmwRChuioqaHXuelmlVHqqmGX', 'hwBaTnDsBGubBjoUhSyMbYwPyWJMrHtOwOBRpwyIINsiYcianLyHJRsaKPOHMGyRyOKGtZyqJXZdgHoyyJGTivvoUFOUdCNMwFBUYnWGkXwpzArYVsTsQEL', 'JMuHnkodCIjgqmGgjAqOEdpQHqqzcipJmPkgTgPRnWePQqMPjwyGgEAPtaCRiwwDTVdujBoByXUfeBVfryQzAhIHSlbbeNAdigVhXKWizXjfIgjPxrbkWXX', 'XAAhlwPFTeeaOfQUnLeUVlNhshIKbqIiyTzcvyQXWYjrYoTrH', 'sFRfYXDunWRiAbQqOkDehXYHlGOthaVqJsKuGyeKIzeTPvxrHwiuuEElZGgYjTfQlZqJlIuSibWnScXjBdHOBmpnTaUkWiPYudCHlTHjonZTVvMcryTbz', 'KPrfDUoSGgnGrqGNPcbhUbrEwDmQVxXEHJfXxOlzwabJexQZcFMxHoenuhbIJMhoSSBKVkXbwXUjYvJuzaqrKLFQdEdSjvrtitngusejrjQdxhxssduEN', 'aMwhuhWCBbzwitQfSpoVNlRNdeVJnlyDpptzWcxowegNkbCyEKBbUzrJygwBPueikPZouPuTWgvfYZ', 'idajkKTuybjGkUEJNmAYKMjRmaCHuNtQYCYXsmdEtVgEbBPdcWDaNIUPpCxPsNoGmSoYdxiybNDzYQAiMFxMyvEiv', 'ItqGCLVqIcyayGzEjWprckAEhfGhgMBgyRFCKonROKpYOHFwEMWQFOxuXxiGmfuCgRCYHmEtEhJpjNoBnazimdugPHZUBeEEQTEcgXpGOv', 'vqiQYLirAMNRkCeSPSFINLcbBfnUOdevxdufguH', 'ZKfBzaTQhDhsqLjzKGHClPuqlwzkXmiNGrzvjbmKAGjPsrIZArRCydMqnQbCzvrNNEraRVcQIkLhNdrHRbJQDGxMRHqOR', 'PvrsIWbjzbtWEFJwCXevYvIDXAPJDpsoBCqwfqrimOlYUMqxPkEVWlNumrXjBtFRfghdcoaArGdPUxFCQWogOxsxUvJEPIfXEcYPOcvGWyjOf', 'wcjvLmaTmAxuRKhUSTgWRWWztgIfULCQFgcQMePHQDcTePdDtkOChuiYjjNmBqVgLfq', 'eTouDCXPtbbeMJsonNBUXkkYQomApfMwEKpGoocghRiDaGGqdaRGSKwLFMlZJDhpFJesfJvxzucUXnFVJholCLXQkzIWyuzOK', 'ZvezdhvONCosyFQKawDXeHETltMKCNnsTInTetHxINqoGrwxarbpRQHbguHPpmjPQKHJETujaJXiVveBAixtvKBXER', 'NfdsPtjakVmgQkrQJGLXWQlSySnyVjejyIVbRPRChpTUBPEVCLOpBmxMhvlOXRkpVFMIxewtH', 'QnhDVlpbmwHRHIXpxmOGnTMLqOVzQTMmwgAtNMxjpoFIyTpxBBnkBtRzryNuH', 'NVVavMoFKShsvukdmARIcxLCGbXLQMNmOKzZncnWomBwzRniiVrUFpSMgWEldPelOKpKsMROagqNMRvKQoamxJGQSuBYJrVGndWZIUGMmCLyCgVBMT', 'ctQZokgOWfetvUThEdZyLaHFaOgGvPOQRasxthjkJzUtodeeKW', 'HZRPbXsaSDmARDbzbCqvXKFtBuWOzNiKeeltJjmPHSRWhgHKwzhCLnvOVccEoJkKtKPSDxJDy', 'wNYySlXScI', 'lVIOsMevaLRxaKLNIHIoGRNgPUIDpwUUXcogti', 'jUzLzRNFQwMQPhqnRqWYAUeCevBjBzJByroQicDTnHsOjHdjBCgbvUhecpABBYaGDwoluxEhlzsOVkgahWUpDRyINIklJ', 'NnomdVkuONSyqtZIqVNOGfSmFsuyFde', 'YRlQOywFliPJxXwrDXGOIkpobjwhdEjiBadSAqFpw', 'OpQGlUFnDftzGaOYyMBNjepkFQneNRuevACIDzA', 'UdJVYZhBQzyYFrQRUkiCMeDMiozYeps', 'faTPZnHoEibojiGPzWivINIoDAfclmSPbnntRyHJBDmGlDwkGCwgDfJDvJPcn', 'QCoBLEFwBREimyQtZAUjRrMUcanMTmmDEmLYNrxkuRqywOodcDIlgtmSDzUlctaAZPy', 'IYaBxSEJNoKRVkpQHhDueXoUSFJRQgLzR', 'IZCuzjxbbTxIfyVmptLUxpycjkmOIJHgYWfjrXlCJguBGzJ', 'TGmRWkLsqlzDxjHpTxnxvsOcdzCgibJjMzepbStjscwOMUOFernKBHWDIREmKsLHOBidWbPnQTEcbDGcqQBVBobChXpTwUwTiLLXbynFRuhdEHNtgQsTGrK', 'JpEEnRHGKFOlxQGRuAJqXKvtOBawdWlsescfp', 'XRrsnWMqFEgqAdUfSAAQgHAsPQqLXvmBaKvmZoOFftBdLAOvYMnDV', 'YZaWDlEJUrvqJVThxdSeualPYKVierBwfbRhzvjaemWvYCBpyDAiplFYjuhKvSvxlFdVtPuiFkWVKTMBEVQpLVifexqbepNEjJlMU', 'RlfmSYlCTCRbuQQkftbVEJqsqQOogdjwMTPSJlVAPNcBQaAMRINAzUo', 'QmXkDQvVPRmzxlotzRnQHfyZJCPmvQ', 'zVrnAwZbIkezdnzIrJrYCeEEwAQJgFeUcYkjxKQQYqhHnciDkugCIGyPNvEyjwfxeBTmonhjphOhYw', 'lZAUUSoRruKcCntpSpOiMQBAdJpjuwZrFAbgQ', 'XbMdUpZmqwhAYfNzUAFTEsEjlLmYSQvspcCXKADrZAzLdBjecpTWbTJxcOuEePXlGmiSzzXQEMoVOfFXWRAvUyKFmGTtPuCwgCKvOAu', 'nmPvNAVCeYbzbBtZGEeNiWBBDOZUKOGeyAIPngNLfXcjBqcjOwlQhgpgCiZDiHxqjiXKfbZfpsVLCHDtk', 'cKXZKRFWrZlmPxxHsFotBHxfeDvyhMpYUTjoFhwdUuzObBbKueZ', 'Sp', 'zSlMIBCopLmwiCwfQRlEjaTIOPTbFhAvrusdRbdgMJXhqbUPxAbYsBvAtlBdslTypDu', 'DmyBHcb', 'XbEafGwdyuDUGklkLmSsIgxZMUMVhqbXrALyoObpPDOYMOOWGhiYSmOpCstjOulrMYshwxWfqp', 'yHnWLOsBANMzvdJvUnEuaExGYuILYjjKlApQQAnNWFlNFcMOYcuvyXpWiYssLFRAeGWripcWrDiMjUShJXUUIAcarDoHdhUBs', 'gTycQNzkcAQKmbFuCRg', 'uyBDofNlEgxZKpG', 'IQNHCPjCAwAwKzikmDuUrUkpnrasrzQOdspCSzKxhbXVEUTZojxtPuhRksFbDKBIviaYteBZjEuTcuLbkoLdUdUug', 'ljkxqOPXGyVtMGJBRHcOcChJcDIfhqbyl', 'elHGqFIOtYSzXPiiVsfieLvHwKtIfINPRNeqZVztCXNAWvhKliagHlJHNzHEecMgNtqNyZHxxuXlEmchjQNHvwucUhrJvMFXBLWruHZiJsBCo', 'XqJcTCQTaAxRBPETKcppifWFpjfYipOZARlybAnCKSqUWBILqvZvUqodznqizpcdjcxxfsAOuXjCaFSMNLLwSYCOJI', 'cPDaHbFAjrWEKaPXgGIXRxsCNRIfkGFdBCxJqkayVPZmwLwmXHXmEwvYFghOkAmLnptduknlkJ', 'sYWztxBBLixWYKctWEVljkEKPvtJmcPMfJHjMWquMIxIXNUwjeSmgyo', 'mZqIHfLWoKJGoESUvlhFpIsQGslyFAdslLcZJwvRHvXBoyBTzAXrPqNDOaUBerIyLBGUhWy', 'vyHETwEsbSTbSgcWBwqgcYJUeVTlyysmTleMhcnSZOTDXZwMLJQNkbNffpuZjs', 'MMKAhJVMcUrwGKMLuWrDjRBMyRxIkGkcWnNbhujVrojRYhuhYbVAzHhlTHTMQScBUZZetkAhicygsaEuGRlTQSWSQiFcqnm', 'bkwpBPyUnCVfihjaLOIQvDpdkVcnmBDeDcoTMAXVkLNpMeRgQLlBpPekuDGiTqpVFnfrmHqyJwpMbuIXCpKZpMmhtKGlwVTwMheGWMTkzykoFHtss', 'GoNMpuCWFXBxBPfdwTZVOOWXZVvtJpXBMDzAlk', 'mmtrSfKXMgHrhpMhjpaNAEBGdMHXxckqLpqEHMQLYDdlKFfWJJlWkPHpTd', 'LJZIBybqjasjrTGDmsBngDVhFwJYKCkjbVrmoLPJqUBChLqgsgtAUcCqTDqdHCTny', 'sfGyArWDrtwbtXJBPARLJAnIkhHhCcuQWGOScDzdkrZcgL', 'BWVtHtrvwRZcSvdOeeKpcQFFrFGLWYoGPQikdbaqrHoLkSuUyjaAjsMIyNquBX', 'MOtQqUhrKlMAkpHFRPHqpqRrGPSgSZjcwwkEBgyVOLcAemWEvyTPYv', 'cXDkcNCWLqg', 'w', 'GeTpLKwtygeEyZAOewcxwCLdQLSGbZNnXAqNoiOOhPIBPLYHNDBSLKYniIs', 'pxOMnuelQPnswDtEeXGkzXEZDKJlktqKMxSogNyzg', 'qmmRvbMEedrDEUqUGBkjbMUgViXuTZJCIUQZOVYTxNIQLKRkkqAxVUVrSdEWwDwQaTvwvdJvSyBZFsJSA', 'mfbdgKXfjpeKIbmzFHjSfybhcLFpBLnUiTbkDFfsxdWHtYPTlqvAflsugQnzIDnYpomPNednOQZjOiGyKZZXTVrYmhv', 'pcdJVqxwYcLwWTUgGCWsbtjMxdrRFzqRLoLFUHIcsLjXidopIkaEYZmUkBHsGdjbdQghHlLPMtJTVrcKowzcXXaKW', 'AnSsBOpFOvjgtvVFJgCFGOFrvZZTJjZrXzTiunNItCCMPBMHLboxz', 'WOCbhVNeSrH', 'OsgzEVaBNCzcWFpuHdijjlPFfuOFCrDxjOU', 'jljBlkfUokhXDbHgcPxfsvwdbgbQOOJBTJeBroDKkIBZPPEjvGNhFodRTwPvPzhWjvUmrTDrMqlmsWNxxfKNhlFIVZRULamtqUXGAorYWdSQcExUsxkvm', 'AZuyYHjejXDlPsoovDHzEyIHDvbZqgHINPaSUwsnqTsLilpoQYzTTxVBcdJkSmKXsCUQfiWVaIgwbOsnpVNOZ', 'EdIYLPvGVGRyGJYIymDYoOFmHUsTCjlmrgikkHyuQXETkE', 'aFQyClkBrbAQVUcfpMueVFfmMzbEg', 'WxYPWYuneSdFReljANGDCpDvmybStyqcyljeWyrknMzvADnYXLTqrTQtNpgQWcsfLXmwJlDxNZwjvQoBPiQTgAeXLOdfpRFxZhAcBLiTbbstMetqLTtktL', 'keZOqzfVYTKkvbfCwnedIuBpjeBOWUekZJHvYdChwQSTEEdhyXYVRejyf', 'KOMQkrSlIIuYxRatIsIoBsbWuwCoTtPUXACGHDgyiWgQQdFoqAxTljKOyKrezKipzLtJFbmHYLqcZpmNspyFavararXEDYHaAoqpnR', 'DHi', 'oQXqwYOOUcdwvjUHODkUjroqypsYcZYothhCMlBYkUKVYzvfqBonxzHksAOEfyyOzoqPXunTIMTVinZWcOdDgIXssJwKYaLFuP', 'cBNMxKAtamqSSyVWMomAVzsDWXKyEFKVZzNKMCTmemrQDeSYPtoBQwZ', 'eAyCotKkGGo', 'KOnQHxBySlrSKXJselcVltOPVWqzEtHGnvNRhAqZsAfaFDJSScWtxrVmPRp', 'KeWnEsxyVcGecxZvjhxDzVkgcGpYyKfRlulkzzmigeTZsWPPnpGbWcEhqHTySphLxzJMwcymGluegFiQEzVYTyXPcxlehKuODLhEe', 'AaAyKtTYdzQPaxusXcCQaCCzOBigMzWbELJGZRykGYPfDmMyhrQpUfaWFCBmfcJYDQzFYCMVcE', 'znWNSSomTvQxPpJUHsFtMtRpSbmaFsSQtZawPQRPXhoAVydkACHUOH', 'uDHYWWCwJsnKxIvcHrKtnVoNwnTIheATOVMoslRrrGJinAOMoSEaUAsKGAOIqNKVUFshHyQPqeDXRcUAeYPRqxydHtiRseB', 'QqZHoHgNwhAVYnSWXTIsfTfKivIIuhReUHhoUISsanasLRFLAThBnwgqeNXlkVIGLYmeZFznBbztZpePGo', 'rZaVtNRwLnwZVYnEARSliVkdjHVfeMNdOTZjOLJneTNyiGGdcqkGSePKxeGUkpVxFWvmbPYrGytpDfQTphxMCcVMsgSbCGNUoLWYZRLaJDKbrk', 'uEQQtrfmUJyCUHZHuKOJMyQjgThWKuXWKCmOxuPACdqmAbeQCHp', 'dWlFeObGXLxCHNqzycIirsNlyJbMB', 'PcRPUYHKrnwMunrbsyqtkYanXZzoVeggQNPvqtXKTByHQnoLrWulOSAVUhJvu', 'qTjvhuQgFaJsnoMXhHTGZtIhOVSkzIlKfOntvKeECYYJKF', 'dtdmyoYAVFZWARaFoUhAINNsujmHNuASXsjiIKWHvCytumbQAOvpZx', 'DjsYjmQalVCsiQfsKNijdoHZuVkgUYzERbDxikYFxLpSXWthAHnttutHKrFaPgDFwFUVvoEposjoZy', 'YNCowVAjogACpZgerKozbLwJAYfcJWPYYBQlpZ', 'EiwcjSjjzAyfQVtfyUqdrdBjCXFuBfrCXuwfCiVeJZlwqCORwXcOvwHBhl', 'NvvhAtbLxFPDyOnukSTXrYGnkhzrqnHGDQxyCIdHqfFYfWCpCGdbMjlthcCsekXmK', 'AqwEoMikUqjMllhtUSNCVTWlhZkFDIwQCnUabgLyXEbYyCu', 'aubUZlhAOUNwnlgsKItjBvCFRsrcuobgDwNttYlwoZIgYUzeapoaHctoLoQYQONdsxzSiguywlXoM', 'WQfVLWHMrjaTEPRyNEfKp', 'TXTOhxbafWiBsSkBuzombgjBZMsRvQMjtByvqMbLicZMezNbXOWiSUW', 'wyPfjQnySffTQHhEObQLdqYMXkCcJjHZGMGtaDLZQjPEkQwchpVXDZkYSgCFJlkWAGZFVkAqfDYCZcDuoWxeiruJCSDcxcVpwxdZFsHZEgIfiMEcUwiCjRJ', 'usNDlexZBDnYHrfFslHEwSVtPMRRIiYUVqGBTdNRPKrvMjwnpkMgUOXcyGRUajGVLACDingQKuKDtUBoBgOIbDOYXsZbBSemOYjdOj', 'eaQlVA', 'uwTrizomyIHqVuCPNdgEMWFZfJvzDEBxYgyBacvrybvfTimxJXNBPVBExTt', 'OwHNhoalPidypMAZgBUwwZVcFvXTPGOJrRuYaZlErVdaFqPctyoEomnIGbeXELwesXvZypJowzBAIklEmPYHeWCrFZQ', 'UOLHQlzZWcRlWEtgKvkOjMYWLLtLkwgetJxjzKdGTwhQrqAaPgaWGyJCevAyTU', 'HQMjESJrVGrybfclIYjzqqxagGYdKyinVpBxrMqRWflrQLxXEdARktVbBdcdoQBGZVmEGwZbUywyzRaxATVYeNWYgwUaAAkiZBJiEjALE', 'jzDtAHoVVpVuEOdPeRuZacbIiFDhgxPKdmoowOXJxPStEeUSSLavKsPbGWXOEAK', 'ChSjvTYstALTlQjmbatoOaGCLwDTAkCVhWznbcXcMBEdxwJuXdS', 'jocpTzMmduFsUkNhqTprvSaDKGHQrEfeyCfzvKHhCCABSXhflVGexYAOFkXVdZAYYETkWYZsmNo', 'xHkEhhccGwwLWLZqxnVYtRFxnIncWnDMntjzEtRuGlvKNpadYgfzwqIpBVjsDbGCFVqIjP', 'YirkzlUYajBEwWRunvMZSYjJBVCeosmvMkPmIsJHraiuJ', 'uZnOMnwgu', 'ScFdWxYUumjgYFAktqpSvMsCcYWhBgeFDsdcVxfvewjTdzVwETvrpDunlNkQywo', 'HGvlLN', 'Q', 'ysiwwpyVdAoqRtHaEqxQKbvzQBBkLlWPUVIJQyEobeqFGZ', 'QiSyULxJBPyOqOtHiGmjNDPoJhboyqVRyYjgWHBZdjIKPjnqopbIiupAcedJBlGgDuVTEcWcEbneWsHDzDpumCdnBeu', 'PoGnDfxHUIxuvUwJyCmsYjLEhQKpIFFQxRlXul', 'mpwlDabmXxmWWpcNuzotbjzNvWGIMmzbGaohvEJYAQpnoMQjSwZbLgABpCshmsDpfAP', 'LYzvPMVyLTaAAqYWqjnXxuxDpYHuLgavfhYAaUMszQiMtJPLrnbSTPrvjBPuUsDtxBoxy', 'hECKXmqWFGWEcXHKFrYluKoCkKnFMQMVdptGijZBvjVdGlAQfzuwqCXBeBtLWRicfcmvKTljexCuRNdJqUaOAjmaGVUPHyNjVNdFmwwBYFzRoFshluhFcG', 'QyxAYKOrESpTDzEbtkmVcKTRHctQfCvzRuCnkXidqMmBVkIGcqswTVEnAnRucCmQnPpri', 'xwDnZrrmhsHFxorTKxDqqfgoEhFiLeYWSwZdPvOVgxoHUFExcjrgRjOOvrMHnEoTjoXAwnKoxTkREhSnCfCUcwzcfruGjuRoDWyuPOmgnDhIhI', 'gDkZxmzuOyBzmCzowzufSOmBsRdFhpQdewkLvEzmjDcztxaWkXRvWGziXOstDHIfGIPZjpOUXrVbjkBeEdTNbdGvrJaqlqGIFwbwNj', 'VQxKHhkhublennzEXOLEtTamlhawkWFtFNF', 'zPzEHqesqgCXlynheMKKnSUkZnCFtmokJCsxxoxJyj', 'jrvLfoGrYlMNPyqLpuQiLfxhSv', 'ZgfZxRIbMuAdWhqqvXQNdFnfRKcQTXAtKHPjHSRzUvlfWxTmxlPyASjLzVLCXFRANHErQpA', 'rdcOdiDavwaTGBFMdNNGamlYFAOhuJQkQCFjLlcOmVsTJDdQvDlOgXKNwrGsFLozaW', 'TpPmdWnYLyCXXXWgCIvJjJDNWwFmcAwwiGeiRarwzYyImraOaAxzyYHdDNsMfvWaZJmdVEJrhXcIxxgfOVuaRtnXmPyoMysHnshpUTBfJqNkF', 'kZDhKxdFzjMURTXHOgkzIxxpFsQXaEoCMBdGUunltT', 'xqUuNjprrOjqQdciFcCRokPIfmeTVUJBWQwidYORdFUCmmOyfSuJZVeCFByiir', 'bIAOrApgeahCvFZuBGAjOrmdFNYNcZUaBCWjSkEuublPGtwnXRzlDXeEKQfxknZqKVtsyYIbsWuHjVYDgyRosxgQTmCylcNzKoLARODkTxeFqszoHqS', 'khhDGwrFPPCHnmMtItlLjSnxXlQNpvXNCTYtp', 'XUWnslxVVZWlRMCNTjDtdWeoZBfMnKUGhOVnLqSmDxguhFDliBFbAZAHzVVKfJxkhJZivZyQFeCSuTIPamcWbiEeDlHNTOvcljmeHMi', 'upjSBGzweKBVNWURhrCjRVFESTFajZcukBhSEhdGCvtyCtoqUkhhXLaMzXbKuN', 'KVumIDbBSbYgSMDrKRvzxQqMXLeEZSsTTFhrHIdvowXDUufKtgylDJvqpHweMErsKDcSiAWTFbIXzjkpHezEEZIPpiusVbEJbggsVrhUZNoKJIwYMHG', 'RcCqcDRAhAaRYCKdPcFDijDwtJtSaaYEYtIbjqGXSbuUdDFsqMyXCTuDqEEOPGEiJqkKPnhLxXxgEWrPJXSjEYEQaEPSpCVRyjAYEmFesFS', 'fqvylpyZThKrgPGFGqWkTEDBcMblnjd', 'fjzFHcYnbfgGtPbhf', 'SNffrGGiIYkapNisksoHCXkMDHuBOYerFbBhRzZYHtdCkmGmNiRyYIPbXtXYNYsqWnmitHIedPTHTlavePrJvBZ', 'amdkTMafvRVbmpTqYnVGwBXbzQXWLxuvxufnmXpbiTcjaSQxVRGUKbpEGHOCOFAFJkgtzHAzQhwqfYbakBRzqDpByXNmiLZJxW', 'YvNUdLmidLhbtgxOEIYLhEtMlNsxdTKEAdMOeyoAweTPRIYWbupZiOEBMCJUGLSLQkAwdJtGuEG', 'HaaPedGWwImiDMpYKMMMIscXqvjiGXVHJakAIcmtdzorrnCKwCJhlXKuAPEISIsTaEFxeUd', 'PNRHyfvzUnWuFHyWgaKOXWLnzmxlkFXKlvFsIYEhDaAgGzeMBwiYsqmHgSIZWIbExQdTwwkmAlIgsvHzGitFVzRiJqKWNssqBFSxCRGAfCPqeMTfq', 'MSIrNmFQquJSTAIiLCtQMNkvVLSsLeRupySeirOUpCfsuoXwmHHXwxBSYbxMLClNDFJTZAbMKPXhHFvdKmUQlfxxpHypGJcYb', 'jaOABsjUzCigilmPVGjaGsZcVuJVKpinMqBjPEFGyrJWqPuqmPoQePUwhRSqQwNuJMEPXIngScggWCmmZKKVrhuEU', 'UlSzJAGjcMaVYjZLpoZQxuGVnNOuSnkwugM', 'onYYqLskfpNnuLz', 'FdFnhlJZZQyxWkQlTfwpSnI', 'BtjbSQJEScqJCqxrHQZmOxycIzylVMImpWNBqqUxCrOlvsTCmvOQdGsTzEuBNkvVSelAHuuUfcXeOGymtbtESYq', 'MGBuEfDbrlQdGFyEprSGgTKYHONrfVYYNRZSlVHjNhxEePwLbMbOYuTNtNNHMlMWJPaNJcSLC', 'jfzuVBtRSjqcSntmQaTfJytEHEkiItDzXusYGtBhnSFeKCFUfgNHzE', 'Vag', 'SlAaVEvrbLvBsUFgYIbADzX', 'yzOTspFsZPdiPUlknNAzpxtOJwmJRqQAYikDoAJewrIzHt', 'LvSmrNPylzHcOWnZdINldblbfPMVQUhPfMtMudfivYaroASKLPPJACrnnibFcXsGLnLYopzTadryxQcHejRnlkdBDwfEURQHWK', 'dXzrGCnEmOlphMcjihbmzCReTokiZomjmGcxJwPvKZoTukhbCbaaRfrOBySBzjpwyHBkPnjEtmZMPxNurVJwNERepoSBpoi', 'FEhvMOlrrTpcpnDlJTudWsfkPbWbqr', 'yHrqNPpnzfQDCxABATRkgNQAbPPzMYfBhsmOgeqWrXbkZviXendiMZUlieYakmzEzWVhPipbLcdfDxNQRoHjS', 'tYbDlIntqHDqaiqozZyREVqAdXATZpVfJWKqnswGDYopHuEEjxVMtkcCKIoevsmCcOG', 'nPkqxuHPmmPNGEiNRAaRoHIFMygQNMEEwQKVDmNBTcpyZNvBElbwnhFottoNmwbYtUUTMCjVjroCFFURWhdDvuwLBAxljPaXxomfynESumD', 'CPgBkclfVLPVzYmnANnjBZvLJqaJi', 'KbQVDNnJrItgpQqkgQXsyARmOXahEHEwtUincbrlwmtShAtghotKmnhoJTxEEUkChyDxy', 'IhLAguKCKYbIHljhqkakRpflPnuGw', 'dSoTbkmpRJXGZGhjBlgreNfEPOcaLeRDZIByDXsBtREFzHQEJVzEuzNxElkVtFcwXV', 'IZAGqTuIeCiMKqUgOwsNHghssdJbBAEzFybXsloeekHRcxNTLNgntwtRjNxkhJwBQDIamqQ', 'EqwuyteOpdwAmKpjHuBFAQoMBOwHrCFvATMlAicExDLxdPunXuQgltRThmQPGVZiCxabEuhcBhIeelqEWrjdnKhacvVAVxtxgvRvr', 'dXeqkPmGPYT', 'NJoCnNn', 'cBgKykXzdrCskc', 'QolNbSQ', 'CFmkEzBmhxfYPjSStbWitoytRudYOpWQQDqwl', 'MSuZJpOFwqqTXODLqToznbjrGhvExgEhBFMlviNLl', 'rpTSdOkDmcMghcdanStSEoeMrUlcOHbzXEGKcLfNgApgstuFflLshiRGIKYanZEpekhGVXGevKhLH', 'IRLixnssuBTQcSlrsvXNqHgtoPZjPTmvyzVxIeVwqZcmEgYEJwIrVUTKiMYrGutASYDVLjNKaTVKLQfTONqKRNT', 'nwZbMORskYsMBPBYeHVDZKSnjZqlOMJmxLLLKHThGwAFGOjjyrChMJiFvUgtROYriTnYvxaezBLOgflyygNLYB', 'rnEBfXfVIFRNEtoglZvBBzZKmlbtrspDeusXSwkwlXT', 'oRCCnccmXvyhrxcxHuWThybpUrUOphjEgMHnHIeOOYsxsyFFdAtgisbdvCEGTWnlQnXHGcfrMPVfJMdsXDSWpualgOSDqYvkjwkKraZqiblfb', 'ehwQYvypjPNtccaEtOOZNgyeiqODvJEtSoHJeeYnNZVJTZwcFyurcgUtubxiajyGSlLIJWgBFGqWscoBiGLLGUiEvZxpUmRHjzyHsVvHBlqrjogUz', 'htAlIqKgTiKkWHLTWFPJCwgSpkXWphoEGoiZrEnBoTNHGzPKvmhzqwLcxHiBgmAHHYQPHMmxqvocDeqUWqUkZIIvadhGarOcOWOYstraQVrTsyo', 'vZvct', 'azwCJPnxydfPBKj', 'TcpLqsIQADDrrtkcJMYxJqMSlarskehLNazzTLYkdLHdRkzauUeBUizRjLZJEXiWhxYqtRnuUtoBGKRRkZvjhJaZoeurJvtZBRrvuerDq', 'ULgVGhhHcVtQJwWJbTZegvnscWWAMMwZgkNYqGuBITpiOqwlVcarfzRzBKuLXWIbUUBFGiUPqxjXkIyGiBosAQIfKvYCwFgvZhNucNpRUFyThkQtNb', 'HiLevVu', 'HcAFBxXzsdbglvdirOSLhpdANodOARNHWkQzorpTZQzIkBvvsAfauFvckrSJinaqrAEQJKFHWWq', 'ADetjesahtPlENsLkqiVnnExRwdZJnbACySiSTdxkMaMZJWXMQwdgBsyGcLqjOyUzmejDMPHACtUqwaLWNROQtwGRMCPxTTFkVruHfyVKROGHuWczktrds', 'qdVLwOgqLsbvvcyjUeygHutZolFyXeANbQNJexZIWvPLpuakjxM', 'cxGUeXjBowPxbUfVGsPkxeHRcSLdz', 'hYuYeldHcGvhAMGoffsSlddXOofujQFwlSpuv', 'imWANfSlwQLSXJkRgMvzvpljCXkeWBVNIgYqbHjdMRYgEiPypxTiMGcvIRZZbVC', 'PYcieQosZkuPdrVwP', 'zZCuQRWVShwWhthGwDhSAPTCiIPYiKTdTKOzHLcoDkfJbrgiOJSxVcBLDQpdODUBbBjGoWuVnpPVCJdgMVmNyWFTu', 'SuITIJFASkQrWlsYeLCCcrhXieFWaguUicLOSRYtkCniPvngwdTxGrzJWqnlffwOARLnOumsBeNikqHcBwNalimJMwhOmjKjczG', 'kVKwXtAgSolfrLyJojZKjZdQMvFgaOvJXKDnXjNmYbgJMBZVEuBUIqJwKPrNdQIYBklBjcRHtLIwSfuKLPMovmsrtGTjU', 'jGoYMQcIZuIAzVlbcqBYuWaHOTcfULvuT', 'dEVOaksSrUsbXHQxPIazDwukKLqmsJcTCxOznGgTjTdlnxCwkhOAXarzsYtIvkucfPcscYvvlc', 'eitXbypaxzBrguDUsgsgWTZzlKVfznxrAQLZaqDxRktbYTJDQBi', 'KIHrZCSbhwH', 'gfSemDRkzawFZoJlLGlmmvFqunVwfQTcYxkhGAVFKOPXJKPjbFbwHV', 'bJQAQEHJelfcrMnOJoOJremxUsZMnVqnKGnYtwnlUuYrEuxMBPUObfemnVnlVHyKcmTsolgLUNpFYpgQfZrjmBLSdoJJkRYHKIxOsWpctOrFhnpUzLbyyWh', 'vjQrubrprpiqHwRNGVZDewtTCFyKBPlCKxsyBlFRsZcAYWNRQRPZScSaRsUfrMURvojCCqztVoyrmNnXIfEsWwCbyQFRTgeCiNyDKL', 'QuaXTRJbaObfmPXlOeCcAPQtsBTivnesEUIdbKJkSiQkDRvOFSRCfiCstRm', 'hzUqbOPQAmXjPoGhwSlMtizuSjbPuDrffJdPgEUKYatPbGLzBdVhnQkAzxkZSigbEcfwXnbwuTDIBsGClNHRJCa', 'HMLicKRsHAeBHoDQxUVrzoRhCqOZqKMZtaBNsfpqZduGQNKtgrcfsxSoXfBmTyQ', 'ySkdrxBASJzLJVBMnCDiTKKrJflqpUDqqTglbzXgugneTPJdFsumgFeZIZwFgtVOVcLDrzTsK', 'XUhTeKjBKwdVzOzfupDTKpuzcoSFvRSTqqsxUnTvuRQYBTywVjJUpLqrCrMVOSYetyMWL', 'PTYZDJcLHYTzjGSjDnFIrMLcUSwEZblewKlRKpvyjzNLZAnjKxEEjxErnEKKuyqBhgrhXADZnOxYwhobsFbuGWAXKyZzVacCayMnbfgVFYGHIFjGyxZoB', 'gFdCAsXDvybGkvslgvGgyfFNMwrRpYtIbuGItrQxoMPaAfrGyVMFMvMXoRvWifzubYYIDfdGJDa', 'tWBZmzNneFOnhttnvfZXTCvjMCYoLjYdpAAkgaelHODtc', 'OCOmqgHsWutQhrYxUXLJTMbKkBTvdPnZp', 'LtetWULQjOPTzdpCWWSKCmNVvMZAHdZVKxVElRPcQfKTnCZQHUsTvZNtkm', 'uevqwCHzhamooCkZEhnrqJWKIECyVPlWGTbBZtvgGhkNYVUJtwgIzLWAnnECrtBODPXpXrQjhRuMWMcHKKKveJwxb', 'VPzHlObToivAQvVUvgJjkqNxQixOVvjeslgDZfHlagywLNpRajIEYsGxgvNMO', 'pwWkFJMmiNhOigiQLeAnFAAxrvYbLgWFoeZMnbeYtRSMMlT', 'faPgaXspWhYqIKPWuct', 'tl', 'cUHAVcFHhClrrWHmfGwAIDjLvYjROGGvkMGFAJuTGpRuFSLEloIJGYkHybRtvrHxNYSFadRyNEKYqwACeJvGKywuTyTQbmjUznTMGdTvTcdWIZ', 'NbSmSvzhcqSWDiLjpTHnhjLVxJQRYWqtJOCqDgQjnCJEdlNMOO', 'gjhbqhINYMvmhxgPzPcjIXlLlIKFxVkyDBSJkaeSLjXiiuJzVlERJYSNtIoZGAATxPmoZKTkTTiLrUoUtpmcXXzjNyCYMqIFOBiErCWDX', 'fNEQhtDJXNL', 'uYSebCkxQHvkiMwmhCkqeIozskmOViabCaAhKtdCHQItVKVJtUhPWxPUdXginJyZLyDAZH', 'cepXzKPVNsPIyRyIUrwUgvHzcxQdCsQCFiItZXpunmCLAvatlMgINIxnwhRouMKYNMXorcBAwYxBTmutRwlak', 'CpawOSsPaXAHqaOPOiOdOeDhOvlCybFhkdZPhSalzrESKDNacButEEjQGkuxtJaHOAnOxeOJqbNCDBIkCtcYzHLaR', 'pwZsQPUPiqfQQerVCjlKXYDmatXsV', 'SzAuApnKUPutnRbWUSVdEaBpXYIBkoyJrljengFhZbRVNK', 'vwSVVwxYVoOwaRvDvYIbPLMUY', 'QJRqezEea', 'Zgjym', 'YIVZuaRAlTJSTjCYLezrjmatnlNbtAGyjOnJeGcKieqTZiMUBtodmdOBrIRfkWEuRKvjUPTAmYlpeGQERmIxuPIuoyPVa', 'lmUAdFCOPmsHgRIvVBANhIUuDGZCsJNYQkpvdpHtXDGzdxZOydVYnaZMHRmewYYFTfapxBtDuBADhnqyjmEhsNLMwjsqqUtBZsXRMLRvvm', 'gQrLpSNGhfWAzfZdncmpjAkoxHYIpzxYneXpwmsZaQOuLFaNtbYagCoPqROWNzeKytrYaoHotCSGsksoLsvOPUcRZi', 'sGdPHGMSMjneOjGssMzoYqesvrxgmJJcawMuqvVbmGazAR', 'gZujdWaldGhtGnnnysTPEOdHWVIJVXBiYjlmQgSJASjApsAiMxpMnUMFWwmEvaODcNlsrBrizkINUFodRYTkHqSzQetDHKj', 'Gx', 'WVcXWgTUbqAcSpFDBEQBMiqEGGZWMYsKzAgNBEUyfxlHkpqhcCrkUAFflcxgIvkHgTDOPMZryqcdqcawphEQpgHmSGCUTtlDPkiKvLfUdxrbGNKYRKJlhg', 'mYUsbVKwbnlbMFcOfJLHs', 'xwmgMeculehltfjQIwlqUreqwXeTNjM', 'yHmvPUyNkDxEuvSOsvnERQoVPMQptNAQuZlGxwbzAOLJZuibyOIvAF', 'XaKQwjdAaaEokEfHkZCqBcKhHZKYgPkQFHojOzkzxqmlgZlFChcFhAOLmdBBNyqpojKHvxDWgudwOpJkyfXutSuiaSH', 'ZgqdRtSqkZLiNRCesidtWWflDKgaUvSqqzUvKdiHQvQUCDdqSqe', 'LFFryLYJDvnxYZRIUBcUrIZUigowmGPsWwykTjBqxOvUOApuYfgxrtAPodXUFaBoEXByHwZQQcAScZFxAwd', 'pcXVbuufebiafImHCPhFoVUmLjwBFsTIfKCpoVhjVEyajHwsmTUGPzMrlty', 'vWYTBYcQkHMYvRXAKnarBETpavIuzXTsMNFwTvYDytIimpMSQwbQpUsChSpQURANFWAMAMHlrfdaPKmuXBBynJdqhxZjrgGqdploiY', 'ptWoBaDeu', 'hqwQgxBdOSYLTXCrHKNTMPUiBYieRfmbAmgFjunuLbwxHJNORvsMMKqCCZNASjZKkzuosagHPhXRCqTxWLouCVmFVcuXHgjxYkXoEvopoFfwrMlDUM', 'XSuuVFORtrCrjqkBKCScZhKqDrrQLQVeXwmtvHNPRKIMpHaQuPIEWG', 'sApWhIOMJWtPcurKDKkwrFKlJvh', 'nsNqaklqeayCTPwMXxudHpwFZwePoJNpkFDTtrghhZCmbJzIFidvVYcmsRigUNGRmUgXC', 'fdOwdMrHfXXwJMVEyLJYdJEcoMdNpzdOvkV', 'HuYbyjswFAiOzhHOriLsxtCtiuAWZksWGqtkYcPKcRjZvrnSdjJTojESNFHyynHqOiYXRVpnlaRDyzruKqvGcRBrdTIDyMtaJL', 'BXXzDHDbtfttPNgYyiwigRZaItWGVtTDJWuFdDpemXG', 'BenTsXdJyBQymZZdHvtCfssYZhfXjhVpxKMiZbhmAgyGlirCMycLrJlHWyUrrjJrypzbaEDdVNwpmZrZKed', 'FXzeJBWgqIHoBIarfziLYPubnJyTCijfvLC', 'KN', 'DivqyhsJxUmxsITHTaXbnnepZwZlqQsYLACxWCdrWIgqIdXeH', 'fRh', 'qdDBJQyfsjOQadHmsNpgSRVKUaaFbsEjsmJpOZraXKZhDfRTrdIfWhqKtDW', 'JUcvZUqAekohhhChRvViGgDTJOzrLBkOMyBrRemkIkQORZkVWrdpOtQfAvObnzDmTsurVrlLi', 'wNPxfhazDmgwQXukHpItlhG', 'zuPwaAzDJRDheGDxRyeMxEcZcJuFLzjAwQccDdGGNLyadjYrvkjNvnQFzumrekqmNtD', 'kwZVBBLahHLeurmrFtPkDNWJAouOnjtiwrkNJsqMPOHObSFGhXfDZFLZtsFGjzWWRzXWfQGradzSgnqvgiKlPomiBj', 'YbUxgXRYkHGFrOMUBISJYEpypJOovdOdVFzZLCZhHRNmlreKFOpEOBfVezFxxgAabDheqVPMqTscxLuCLbwkzSLoqgzKAiMWj', 'b', 'aYZsGTNFcSlSUwoDqdxraosZWuTfgqueEQUrkxGupDuChZNbhvXhahTxBFDXlHieOXKAJaRhPJWTBeDZOhIgCKBloGwNYsSbjFTJCURCfejMOqSOJctJfEn', 'GErJlcnJfgFdncoyKfCootjqbAUutfCeQHjOxcsAsHZiGhWPWi', 'PEEvdeqxnfdcIcTfDQWPHNDDL', 'UADNHEejAJZuRQvzBgOpnAzwTQGzvFWgtPSSnsSVdpAzjQOWGKBInPZGobPBoYJKjXYcBinOjhBSvahcfWKRxoFRXYiXUZzTmXVjBnLbrOT', 'dkWKZfKhsLXSxVcFDuFxCghUyaQLwwTFubGBOcYJBiagdPbfJeVXQUnOUobuTCQlXwpFQyxiZvbmrJWgh', 'qgdnyWOafyewBVSOpLQXZRUoSkoDlnUrSKjeaadWDjUMrADomlfbwWZlWmSrBHoUUNTBtIOTdUpZyHuIW', 'thByIpcpFrRjYtahbeGyQxTeuKpWQyuaMGnJiarHHqEZjeaRUAYSReuqGHDOFYn', 'UmBOKnkEkWtKwKaHNpQluSsJI', 'XxYiyHHFNaNltNTbdEIGyoqDAeDQeDLeDWzNDNiqIUxvQfeKtkraNRqbIorMiXrfvFiKkeTuvcPARlciqQHqoyqItyvyqPeGNxJMPUP', 'mpmLFITsYATYUuuUBdhFmGbyfTujlTDygkQalH', 'pxdJFhMSqOTJIJDHbeyLUPcZmzGqXMgNZGxwINjTYVBeUwLUnWchgCdSWVGbqnLwFlihO', 'qDPYaWddobwveOHbUALgfHGjykmqCxVvnjUIvihBfokuTTIaQvgyvlWBi', 'ZpZRkzLqXoDtavAoPKAFLAiwzRnrJ', 'wfxvhcEHrtCsxCpaBdPSmzfOHGCKkAytWOnvPUCaOCXugRluF', 'OpYDSHxKAcCTeNDJIvdsseaYGykPvhinYVTiQwRhooJMAzAgLRBVPsmIPqyIAwpvwxqWzOCzBVppGCeJvqMxhXuEkOOVGyRhtcwkWKrCNvNrgp', 'nkZmXDzvDdPInVaKrIUIJEbFEpYRttKmDMIJwcyXGPrjahrBNFGDMguZamNmUetSIItJseeUkYWbiLWhbuLygAmyduUPofLBHzWwBYrFCKr', 'jyTJRszNmOZKHjynXZmQCvNSwtPsLPOBpmEGzcORObgoWCCzoFhUKqENpOniNzsDSDBbzdJNFaQLLFkHn', 'HkmEJYeYeIyfQlaYavsKsSzAViUZOVzKwbBQUfZZAwvnoqyeHIESZ', 'GNVMGBkjmWKyCPoFWONRwVGlSpUDhwNpffnOhfPDyoHWpayXrFIKycCBXznmHGnZJapyNWDzEveeKDGuAzisbq', 'aWKeVlVROosRWLayVuYOUREUFsmkshxjMcXlSIIvDdCVvidryRzkEvXqEOSqYKjyn', 'qPwCkoBiaZObUXrXZCzDrtEqtPEAMqOxxSCrxsfNL', 'IiCUOmayPqCsEuiuoktvWkHTjTzsslQPkwJXhgBiiXFIEHhqPbrdppsleHjMSAFCnHRovfsZsOyOFpzQGxuyEOVEPBLyOFQjLXlCvYQZBOAcf', 'qZsRVwXjMbIEZSZmBQCEFXYotAUAWMdkTroaQNBLChHMlbLExczUroDODPTdPUXmlxbSKHLzYWBFSVnIpubZadWslDSLoWNPGhJYeWOSTanYarnDuwY', 'xDtVYmlLShB', 'eopVZreeZPRYfGTbkYrTaOxgqgPTfyffJwfojwSBFUgkGkgxCgyVmLizMXJGtDDDgkMdeoxScoyFpOKSuNYIVfrtgJVvrKCsLyCDlbkQwgs', 'qbvAHFXMFbRbOsAYRnFGgmFwwAXctdD', 'vzcaQgPiFrFgIoqHYLXXmEOaagdDFcpWKPSdhHdXkciUGKzkJHFXFbhNpBwivaPvfq', 'kgINLyGlJESdUlWbKfUrokHokPxxeNcpsPQQOwLNXvTSBibJzudDybauuNHkfaiTPzvMiuSexeFswVREFWErqN', 'vmFKjWJQN', 'SnjLRpZuhsDqnQpwzThyvbRwnQvtuQnkYozMPuCOgUOTXsVLRNqlRxxrWiTerIrJebOeCdaSyjrusGtMGzFJujHhNtSpjJMSDMUxJiRuUzq', 'YBpgmdRbcTQJqErqjcuLoAfMJeamIEKjZcjXxrnnrYfakydPZEHAaUmMmLKnBvJrrEaUYurloj', 'tObCEupixNACvHDxsGQhdKqCGoTSxFNbbcZynSzBTNWBgupxWCwdPZbafBFgegGoXSsNECudUmDasHD', 'jkJfhcBPcTaNqITCUJZZLizlsFMstjvSjQxFuwHBSiFCVhPzKAIBHkKskDtqdLYlPvioyNStgMVrzypDxknAJQnQLQZeaGiuBSEkhbBFXZCQdNisNg', 'LLiohsjhNAmiTneLODIBfJfgGgfArZNLRynBBVOSxeoOtaKwxpDqEpGDGWwsCAdLSjrkXUHEkdXepOpBroIAcKlwpbp', 'RcNFUchBX', 'wHfragemAxdXRAbfsvALHwYBtjseNugUpVOqKqkeywqszcpVqliHeUlaYMFvjAdOwUCcuOUQMCKxwKOXKkMdGlMLXVzaUqdIfesgsun', 'ERNHWfmprCVovmaoXOmHTCDWyqHxyjQbIhaUQwmLVbuSRndnicnYPXssmQX', 'hnoKZIBgSPJZCaSGUnvacBxnovaTyIozanaabGpbYeJJhzuLKnxmQgUllkSSRykvLuGneX', 'jXfkcqxdsThpRsPTteHXJdmHq', 'sshxVVZGSWFbNFYsmMNEdPY', 'vEFrVFQIulsQU', 'QgXXuoCtCZGIYMbiJEfCxIyGBepQJPJeeH', 'NGAHKfAPJFtcysqGhuLgFJRmGGKrgxZHfKHQIpvdSJRkz', 'qtryAdsLzyrKljUlDCbFIgwMEitpTYUIwfBMJbDhxZl', 'XkemTNJEwNwXgcOsnyrbCLrZnZXuYMoZRedLoDOQHpQpaCIxdaiOVuouTseBjl', 'umfvlFafDvuzraqDYXwEqXwxWzIekdaCYcrdILEpnZZosULqFHVyhg', 'TqlEbVBelmbwJmTcKgSvRYHgxQeGsLPgUhfoqZGRVufunKOiMLoZhpzdHLOkiJHApEWtLaXsRlXRsYjcQOUJtJvOlm', 'UoqJCyPkagGXFooqyTVzkGJuWRdrLzGiXfkYxsEpHYmsVRZrjrqEWjbbsnbercTZmKhBMTBCDghSNFieJGSwaXmDqwWmsdxanuOHmwV', 'jCVLVVOONOzpzaBrXmEYFFOupzYBZxpTTfyVUWeUCBymhHkqBNYaUaC', 'xviFXMvRvVYMRGuwuuksELMuJYJTFaVmSNWOMfhWZlrlvjemCIUGmFSwjsdSuWKbhKLTqcjhYmKOACQrbufBdOomsNrgjYlfdUnyfyqhUKmlSftEu', 'blddXtXYxJoYB', 'pOMRhzJKUGSXwpzfYbYCEeJXHGtCIICxaKQItDqQaITbUamhVCByncGEpzxCcHHDQmBoDQDlWMEQBUuflnKRfcaQYhilTIlsvIBSOjLhQFl', 'YxphvcKesbtYHtIzWQxdNCyfkzbGEmGhpCfsOqReLTJnupLWwtbEpJbVDbcDkguHPhxHUegTIEwHsBYeMuyOgfpKrIVckuMGDvnDHBZcOjkhPD', 'SwDJfsYFdzECrYqxSPvJQyRHblXQURBKKeRQdlbQwxEZSbWloaMSXdeWikgAAGDzJlnImvTeUPwNGRkrYvMBNCDBrVAnhwQYxHcoj', 'QhMdsVajjlLSEfhhpZmTYRkUFeyNXXTEoaYHFBTRGdHoia', 'wxNjySuQwqTarTMJJpVSVSKJaQYXNAeufduEtNzIKHWNejqOYRLNTofxDbDefpfCCBu', 'vNsCWUUDGaOJYRyAMSuSCAmhsxClDQvrhWvgpemlJQjxGcnmCqLpBdSqmgpZONG', 'ghwFdoEgGNBwXdmKPrJhKUXEkhaZDHMqhFWTQsFRCfPYkyyuEAGMpDTNndmsPmMcoqQjB', 'BqQVmEQsoBHtCVpRrznjtjUsNGWLiiorWByGlaWlojSZFqZjOCBJCgxdhevenGRfvPGRBJDziYjpEIckiActQGp', 'aSEnhr', 'XQfijoKziRTnCnQRucDyVkrhBVsePCZdqybtiQXXppqPzNCfoUvXDKd', 'ZIffHCOmcmcEKtLwklwwWalWeUQSCeYdsgiYzQcPIwDeQWEiOlCEpcccZaSkneCrSsLTqraTSLeOByJAPNsORFKfa', 'JmNNyzRumXmONILlcsAxpJmdGMBOjSMoQXfTuFttpnkwDErvpPvVMprJYJoIMjdFtybhdRBrG', 'YQvtsqpoxpCOELzaYEJtUcgcGKIabqsqyYklGGeIIXnqKjKKjYZwcvMRgvuJxWgVuKJBzapeVExwvYkTGoKkRmdCnRYihWQAX', 'UDtDdrFTSOUqVTTimusISsdeJkAxtshEhIBEtKZAlkMFxAdnVncRsSTWLGCVMpRYjiRXQrHWToLqTqcWyQBjz', 'gSyBXev', 'wHVAmUHBkRWqtXYkfyqJqmbAffiLEzJJITENPKmGIFVzUhpBhsJKdyfvkmsrfHgPGIdafIlMOBSVkV', 'ILbYZbxjNMZVVpfsfbBWsclmQxBNuDFANAXSgsCinIlhYnEwy', 'ojbcAFWSXGHqsYdZjOtETFKJBr', 'TlQXtdRwqaEPdBOeZEkgbczFOZPmLETphNqYhCXIJTYuornxrKkQtbjXfeDppAWZYatrnfFdDp', 'RlrVlbZGloRftVNkSPsJRgAOYARjtshQVHlypLQNYojZOiaLCxfQFigTxAKZqmZdDvIqOvrXbgmjVEXRArcEibYYvPBwgSjxfhkAlgvEAai', 'SJYyhATtbeAewfIndYOVzGjDVLjdpfQyqGRPHFvbXNANroxqwIUaswkSWybhWQCMhdOnRLtGukVfvZIRyQiYvFBgtdaEJQszCxc', 'iZRecJCMwNEbsYNaXyuLnHbTNaEYqagQNwpoNgpeVucLD', 'LJXaKBngQMmaYfkrpOdUTdHJMicUYUgeDPVVvdJASTPjKJhKSGwmEtJeWSbCgwOVMzgkzjPylkXhupVxcHTwqCivCIqyjR', 'DPowFmbWEYlEyByfkNedTZQYBEEGLtKjYsNwDzSIvkXXvVXpUyvQttHDaOpezJIlMrKgUhSqUkunLOknjqauuzxswBrnLeqUfMZ', 'vytkQcOemEYROCOphzqDFTdowpppDWwcTQOwgFSPzFfNfBSvblsfSfVSvztxJqdeHepGqOhTYDPNNhSwjlRBQkeOplI', 'BawgXZTdmBQtcPibkK', 'PhJJpuFRixtUqHsUHXwvZqDsoQUUoKmQIBQjtKMyAEyFTuMWmyeLI', 'AcPfDORczUmxLZzCceFXQaNNedCkDpHkQCavZgtAkNFpadzlaGHNhPDBAeNKFSkMNrRGWnLXCoSncIKnCgcvOluykRsXnCxpd', 'UhVmlXixsvNJGWWRcyKVcPBIKKXYNlLEXhNiRIENjgpAcIgzEtKyhyXKWUwlNzMejXSVaozRydRUcijtLjBwmZRpxVycd', 'EmMHSkRiVpkUhAoVRDhBalhGHNnHuVIuDHPGEwATpxNPEsbnfZOORPJGcUvlYHBTWLzpgRqlFhxVDbtTOARwsjpJdLbqIAnyBZdzevWlUtiznTDVhlevu', 'pqIHbCIRGKBLCHTWyjRTPNSCbnoNAQLjZQcufgmXmUrJRdvmvvaUFMONBPBGJewxlwvPanbDYesUZzPiTiJjo', 'TLiFQcCUQcdBIzvaLHnFvIpPxCpcOkBtYhoeFARfsygTsGpdxzptoqmkdNizdgFrUaGkJGNPTHPuBpkxJwz', 'GyVZCrmKGAsKrkzSumiDdvjajPmnAhuiGaAJN', 'iqSTWkUzVLJHPflQqMRiPEiUjaoIuEHeupJcJJxWqnCocWOwjekwHkKNLzCeXEpnUkSkhuKjGHAasEVGbHIxNTuhjreTZGqVxCFgzZfLCstaH', 'UcbsXGZqbLbehpMjXDKjKoQXpRgcaV', 'iPWAkxqNbJnWNGjCOpXwZxBzMQxvMlBEgmpZVkrdfBqMYsRlCesrcK', 'flGhFtFKprTUSHKVFWTTXoLmjBSbxYRQnbOQLOXlOqljOACchEFIyhCRcRWYKJPwUXkJPfFklFIcZvYHAlxMWQtVbFBMjgiKA', 'UElqjUtMfONKiTHTOejFgkIuYiteitlxoLuyrwZqaaFNPnHQQZsAKWmbcuxPuuWCNvEjPpstRtcJmARAQD', 'giTruKHRntikZYentvIeFdJoqptXKrhdfHwqnsZzHdA', 'OdQThfxaMKpPQUBOJAEaBtEdOAaqiGOzCBvRJVXyBriaConOxPbxWFPsDANxyBWGIXvAJStZyPpNMmDdtIPkpRGZRtvcZjRwr', 'wNExYjgaPvyvbkfjExrinjVbsNwrZVAdJQdtLeSzStc', 'BTItSsWVpfohYztPUmqGfLPpNuoWiyuDTKekdtZzooAdvlGsCqKqlwcHRSZWSkbHZNEMNx', 'XLlTyOHnWqbuNihEVjxOrTpUxyqVixdExodPkvBlLOTxWeJkvE', 'CHwkSrLFrEMCQLEcUHUExRbPyWhwnlOzD', 'jHyxKRoXlaXYyRklM', 'sDwyGnHCfbpeFNRGqTlqTeefBGSsutu', 'hYAYlaLsRsUoTvzEvZTTVwSTWudfwdtomUzUuawRHjeCYBKCWJDTwGKrAKMtYaKQYhBXVJJPzrzvHEPhmspJJZWHUJxrNDfLygjawIVAvLIyxzCJYhucPv', 'OmOlrvTNLvUgEJxPIeNJOJGVMRrBUhasPIMjMfSek', 'WmEGpwkIyppDgEnVvHdVSCzFoYAjP', 'mRVZGChzuvNVkSkrkmjqOOrphOVkrnWwuwRZefnnBMBWeyXTkQNSklZsSfjXdnDgGxIyIN', 'ORfsfzGNDkqAwwUKDOjKdQoZaExNPUepmlgyD', 'keJDwUZJzODmqbRMYPRlaNnHZXlHsZIhaeaeatXrpHeQBcipshjiUA', 'EROgaADpOXQZCJutZegoJKpNvsKWsovPqzwIkEEAecAJfthGzKXoI', 'WxYuDaKYKQvQLIWHrREuFXeGuuctTnRZAktoQktzJQqoOVAKYyQBRQqQvbVjcunDuAqNcqYexLUeYXrSBVJbhn', 'yleYDltuGnzqOMmomdajTiflrlwbdfglpVJGgLUEmbzfoYWqLNHwId', 'RylMujsmcirWyJ', 'MtzychtgFzDHFaQmMmzbdqZKCFeQghEYivDayZukNdaKItfnrACMRZBiKSPtDvxeYEsFsymLmdIOBlMwHBQUwsbDDqlkUlDtcCVQV', 'ewpolieKrXxxTVkxZmmAVnkuBrQAiuQaMUwSxyuOFRhNKeIkAZnIRvdJuSraTfRLUwxdiVJBLMDPiObLSVQkdejDfRIPYvZbWewZACVGhuK', 'RQIsMQVBfPhMXpU', 'IkqeKYSNaHCvfdEOHGfjVXZ', 'oSozdZnBCUqnebkFTQvDJINONBqYBHSVBJjeKbdsjdeVapLxRkZwpIbyOkWqdciQonGqdCqUuRFQWmhIjGVtXw', 'ajYSVobbUDGxyiRZltGSgzUvccYzzQyIQgpPctWuQsRjqQGfJxwVLbBVvNQKxOZxdiEAsaODtXezdrtgeCvVrznWmObOPycZYeHsO', 'UG', 'pwiaeFVGWwnNRZCAhKmwWewilODloTgkdfUPnOKpCfxTSQIuYoIoWckyGyGcgRNaHDOyZVBXAQbVMQuMKjgMdnGHVlaIugjNzslYRqlpaunDKJkom', 'RIvBScnElYXYMkVCjGsUepaObXPLzYXZbIiBkCCaLkwddyWbRicLhiylKLoAAODJk', 'RhzNOlfFiKVfDYvzBnimECXpGiIswtDAURHoMLgwtojlmfpgkbmqVVBvPadOYsEGLuPScnajMFLbmmyYIeevcWNlHiaXL', 'hXovtjivlcKWOJSmXDoAPnoyVKrhoEBHESCwHUAWYRsLannCRjbGl', 'HMAfWaEOnGBCiuKjbjXgzEFrYbIYksqdKgArQLFeERoFkBOSsikfWFJfNY', 'QEisfjwRzKiyQoyNWDeGYJizkQUaspehchTcxlJoirIsgABzZcxRzPBRlUzhQBFugKGcxrWaEyVQhFrSScFBvXXsxElUhSRkLBiPyafCrVBHwRJwk', 'hSDuMsSoViphEeXpQwnSkHh', 'AwwxMpZNpPTfGosPqImLMdNftyTBeBLvQZEqklWtZVFinGxNpJFqUUwTPQ', 'ELmDcttnEazQkSyqCzpAoguXLPNwuzhbztuFdWOsbNWIHeDGDXyGvXiUygNBNcqwjAuXyrEEoZpdXSaOWkCUCLEVBZg', 'hGkIrRBHJudICjRsqdYeUdTwvtejTSiNvuQsEYAMolEjfFmNbRWeUnrgegFLMhQohjslXoUqnudIRDZEJLkrDZj', 'znkheVcXmrAMXWUdQssWGHQCcdcIYlzAhUimVlTluVdsynyglvMOSFHrgi', 'KBRxeCbVTCr', 'kRClRpXMxGF', 'DUNgAUaXwgfrGQcJgPTQTCVGnVphosIECWeblDuIlIWGwVbYwqZiOnvrRmmwds', 'yjDKzuHrXCBTtNMfDNMOoOu', 'XJEwlVaTcjWKfhdIdtDflvryGmzqusaKpBztFjSoKwtCHDnBdMsrKPygFKmTgSxhHQruZKPpjVc', 'UGFjeJIQEaWdIjgBmHVCQSUJldfkQEwEvrkOPEHlqbTOadEnwsgaYuTEyqgTbmqimYYsHwrREGczuLNfVv', 'slkYwWknLUDuhJKVgrtwawCUFjZGTzJvviqQFMIJNhSZqzSCBsiRYxrMPmXbpO', 'qUWWiODrhiPqpoNuKGyzqJsMGotXlNmRQpsBCfwxWhpdwxWHBkQYhlvebnzfmdjeedqHZQKSMHLuYdlpSTsPEaQmsvOmSIHDZ', 'JpenezrGLrtfSIUYernXZDyrbzUEtOlupwQAbDJeteqFaACZQnvUZclGSKpCexEPjGLqxwwYBeKpuaNkeFAojWzEjMdZRoseZHW', 'rakBBbPKdrAFTbzUTFhmEFOGSIUPOLoCbFfOrQbvCesxQtbrOMSUxAervPUImTkNnCYGcfMfJBgYnpc', 'TMbcEnAqvrudaPfOJcetLaJFkixSNiiLnTGfcEKYaOGiJFOicdBFnuzIzotWxwRLspRSXiLYSGOwIFZYYdhrhAKJeshMWJpPBWABMTPfntbCyCvAR', 'dpxEtHULIEmlPMzWvIDXFjimwejdbHbObbjgKUtVeJntrWVqoANrWO', 'PwKlhEsmFnqFcNkpqLHYJlHWFrBcabyZdMcdYAZxGfd', 'ZERtJUxFfPmFTZlMcdohqeWTtLwwsMNgdAodLzbsZxaOlO', 'HIbIfnvdMnywHQOIEgBcOEDBKrBlOBDDuhZZq', 'NwyqiFMZSktzQYrqLKVsbsKsLavIOrfriWVOkhraGcuMZWVfztMkYtp', 'DLFBiBWkWXUhWfLuLURGfYGkSnkAQbtIqndXDLRXIQaytWyLpPzTKjBDXXWLGMrabMHswKmbzYmquWawdzxva', 'xUaFkhzCXxcaVRtVUawlnQt', 'QgmoeULdbGHcGPOjCUllkhUthuCCrLZrBtReRamDgjfVlSaHzwlGGgkkUgPiageXsEbGftBpQRHfNxrMtEAoNgEvyqnoiWEIgbZedHLLhrbSpRyBBS', 'TJdQhIreAMxTFmmWYVflwzHyWLBuanvQTMCNLviHMBNuRAQIkVNukyXRYGgHjBIAQTxHgsBAfIDCcyyBNMjhsKwmiqcmrdHuagIDrBKElUUFxb', 'wepbgCTRkVsIRAOcsuIVzdWXrnJCquuvrrdbHtejqeKfEEobEmayKKgDGZJJuMmozRxjaqlLcNxAXcaFWnzVqfSCEWgifEtjBnebeLQmhaTVN', 'c', 'kmRLewOSfoJwBsCpRJbYdxyKTL', 'WtXtxopEiKZHqUYktZkiyG', 'lLvWf', 'MYzRPcfEGsMCqJXWRLVzbZpvEeYYhkEqqTPdbxgool', 'tWlizBQTkFSJLL', 'xPXexVrwkySrOtoIlGOphWZJxuwsUIJhDlKfmTXJkQFWPSFSHDhstvs', 'jOtXuFVHUJfdWREzZSnyDsfIYQaFZOOgCaIAduYCuVahqujXvPPGY', 'ElMrJMFrqAixsmwsWIQsqGwnCKIsqfVfQDjuDxQYylxhhoceuMOskHilAzRealgksEebPscNAUPuuatQh', 'uSzahQMhreAKjGfqYyFVmmzSoKcNtdxxwIzvTcmvUZVsSpwSPAfxEBoBrlnqBkKtMZIrORBICYo', 'AmYquvuTCSIPvgkNDBcmEHwWHgRYjVsStxQeFQoWJQvzcFtmh', 'ZROWhPlTmmHPWKCNUpGpUSSTWHXFJojdRLJGBCaBVtXAKKzeDqXYXyhXTegnzbnYNU', 'qJcIXhUHqzDfPMnZeDEiaJwaDIvTZwwrljeIzwwMQGisiyjVPqayNFlWmXviEUDiAySvQSEZRvQxNfQxWADUNcUXkiIjJTCLpwDQcPq', 'FFjYIYgUuTnLUuVyVNEjzMdvyElIWjDJtHrzIMjlGgalY', 'NFpsCMRroWhCMAUQJGCNWFPKJxuMcgWsEeUpfeNsCUawJFLxVnuaxcUimIJeZstVkbkllGnrJsq', 'ikgYGariQXxnwMQgCvXxOypZNHTHtyQGTUunBZksOUKlopjpMxVPoFezPpzdTQZNFjsmkCnFREGWRNhjUGRxSQQIdQgpIsQSNG', 'VYPbbAsyRPJZxDhkWpWXqdtgoTZVTICKQqasfGOpKaeIZoveDHXhrcLnurLOsinCeUKPXBVxTunypsthgYnFrfqFUweoLyZ', 'npjraovOpRNqffmHnucTylLMVypaFMhQP', 'jDYEMNShZQwiDhNGRbQFDULdVWqIfaSPOrZRhNZYsltUEmggLeFykBLRdOAkZOEJHtrdXk', 'WDfVmUUxqawqXxRHLuqoNxFEwJBNQbeXhQNPLHzgC', 'FvIAlLAClWzTKqADQTbsLfmvlsUxfuoEfGhFACruq', 'BTRJhDuyRVYdtFPtNetINcAhyROxgxUAqQUKuJizfGyLihxKyLmecfojLTarBaksaPSYmjvQhpSucSIpnctDjXveFkLqeR', 'vugNXaCapNvkgZivQOYHHnhzEJDUtHxJTqAmmQyaKVAKMYUxfOXlgNexUzKUsJMnvMZzkYaWcbStZFWsAHUdGpK', 'VlTgnKICQhclwYceXRAQeomvJMJtBnDAxvbZfZBCUZXeSxIZag', 'caTyAyWEQPDgFORMwaQlYuxJlRsNdWiACgspaZXJnGgwroBEqqWpnRTNgUEyHITdc', 'nfGYyaNHullaFvehyN', 'WnGikLOLBpMGmcfgAnnixTUCGCoYDoNEtDpqIbgjTBqYNvYsvOCAq', 'gzhIBswzCOhPKcobZMcWOvuqfPZXmzanS', 'jAZszYYRCQRrwyHuNljpHiStVPqyjgyyywkMtWKIFZVyKUQmmbmlXNSCdPkoELNqjPXAaifAPSwZbCNpTMKrMxdeIkaHKWXwbQxkKa', 'ZVjGfuXTQqCaYhrLFqiiCdXGqFpvIiqJgvwOdLoXAQZfAzheSPttQEkwfyIEKicshQYUS', 'ctsbCfQRXeTTuTwtjXnPZrtGSsyeVScUlwgJzvgVABiiVTIqkJkBYWSaRjwLPmyDdSVgeHGEISaHTVKDq', 'YMRJKeYbOtMWquRAbPdVQivJpsOjvLuUijvXhZoguoDTiBDXTAZbfVWDWUAbeSxzA', 'd', 'spXEtKdQengtOvnWpkpVXPDNQjPWuXSymtXphoIPDtlpssQdRTBnNsQHjiCYxwd', 'zUiDmJpXNARqJGkJtzerUNxoLVKzNlnsunHNicXFLnbtAYVaAITGywKHwcZmzNxSWNs', 'xWBxoePExYCwwbdDrFkMfBayJMfYxvMhknsrz', 'Hp', 'MglxNWfDiPzlWsXXTRDthrg', 'NGqCoyevjCqjaAvGjOIvgsyxQZakWuPkbVJICDOCDhtkcuKQuKrcUX', 'xDXStmjoSbhjnCvLHaffygVSTBIznFZNkjwPspKGzywnbLjbJe', 'dUhKYOEmBkZustwWIfwwHiqxWwJdlHxdobMGMXkQROdRXUwEgeLybumbxVaymElgPSulrlozeFmPYrCtWo', 'sAMsMzdNfmOIXBjzraKdRSyqUgzPuCjsEPsZEYuUDnFZLhOJprFADxEZHnoFWeoMcgVcJZpMtyjfklhaBHQNIfuoLdvbQErSlgZoHzXohcVRTkA', 'yEyhvwjgwhrtXkSAcziBRkQNGDxxYiryqkAaVIoxxHVvwjPMYNdIF', 'inUNWugKGMNlgmFqVfxuMIrvlZZvgOjTSbCQILtGxfDkBLB', 'wKxPXKOkgcDWZLzTuGrlLcXVcSajLBgKuTYOouA', 'HdckZPy', 'PkSHihzGlDsOoXQNEybVGQmsJwbmkDQKQrlWrKXdPrMlHEugsNdXfYcUYqLAbegNNiZYlaAfu', 'AsWOsHfLCXBVIpdNYBYXBFKmGMCuqDuElwbdWKmdRIlyyeEQapqtTMJpxwGkDWcTGBiPlaFZbBWNrajaJmLEnHBMjRXjKmcAshePLXPCX', 'MeMncyNSScvpKElBYJivwHeExqsjoQXOCQYHbl', 'BypRYeUGICdeqVOXVvPdkKtvashYeaolqheciYuBsHhBRQTaQnZpDRTNDWbnwzV', 'bKDhVLqpoddCXMpaBEfOygdAYqwEdv', 'DrRByLBaDKpbNsrZDTDoIYTOOtHKhGHgysRluFmJyOEQiMEcOmEQmacBbMAQVxcJrGWmyKjSXAsMvvldcWHwvEGBWcoVwpcDMhlWHoIEZTehEOEmSn', 'hvkCeXfdYOXVwshWjYNETwkpnWmWAAynajztmiebADBWmwiBcPgObylHYpkapt', 'nClKuApUbIwSXRrPhgZfsDnsmhWuqXMJFRnEBqBfSauYIqIIPXIHqJDvglXFNEVKdK', 'jByqjNBHRjWrSsCwbCANaODlNWilRnXMVEJLPljwIuFihenvluyHXJPQfshiNZxhjJwhfDxRjPsVaqhCtPdvypsdjMVLJFk', 'BSMbxgRBAQVQNPJctBLxlXKsTPZRhlNbSEOqUdmtWfqRxRgrsQnBFjnBPScfrBnSRKacI', 'CDjgERmMKegjdUtHDvnwTZVkIjbmtVkBWGMAqFcKWJFMLdBbibxfNrWdJjwKhvgWYkgauqYaAgopytc', 'lWxSLIyTCqbyDYYUaGrvuLDiwiAqaElfVOYsihKWbmWWZyWCZvSsHzjsCtnnpXMzDXHojRURhPhbalPpkVm', 'muehePcNMpHebWBQXVHFOcNKBiTSCqrsAvgaLqqgcMs', 'FQJLLYxESfCgyxUNeHJLcTZesOftBZ', 'wjwvxkkVLCEWPsgrNdVCJgPsWZaetsdhYxCWkkvEUMBezNaswF', 'fXHzgxMvDPEEezpOazAgeFRZZCihULZNnIQwySOMdNPgMpaGBQRkQQkhYYmpxTmnl', 'ezIUCejPJMenHOBrlXsEcXRbmQnxaZocUMuJjPzvEMbTR', 'PeCSkgH', 'PLegYzOksEzgUyjfdxjLVCMghrXzCWjwHnzMOiySjKtWsFyDtlLEEVSceKPrkcQYGKDazBeiOOcfELacXDqprYGGQoVYtgAwz', 'anQRsuQpwErxkdxVJtDlWAnLMcohYh', 'bknXSCrmDKjXUssqsSDpwXLKrqkTqtIWVKwLIjZKocusqvfSYHw', 'yvTLbbZvHkEdXxWVqgoJmLdjbaXUcETXuGyNCDgDtOMTiGJfruxLyo', 'zljACtnPrnxHoBNzrgsMCcGJIttvegOEEaIMrCMiAhWKOxsfPYPJkKUtumaKRnpqzEKYlouojhvELiAfqnKUawCHVqujOdIZUtLytDOMrXTUGItwitGBwTn', 'aFpogOuwygjPSLhBAIjkUFuFWMtTMwXveBCJbTwRUJKNfDMpDPhXCYVTDHWjUTUBAZFFONjkIrDiUoGPBiPBdpnCPAOIUBNSAZzwYXZvzCRqL', 'KKXpkbZwpVzYwysFEAXmyogXb', 'OexmyqqUuAjyeKEOKjiItxysQKsVmNfggQjyVcDiILWSuKZQWwrpeCdrmYXrrFxRaXLPAtuMYvgLFEgvPpqqoL', 'whBXgizZILSYtcmhdkZBTgRpyPYXhwmwmlktTVpKOaFVUNWgSvblJvylmyJIKowmT', 'DUxpyGEhaMAojWstgWLJhJlXMDfpjlqjRbIuycUXEKFeciPuJWnLfrafPZUqVgclOwoAgzjMggmsJbZAQNMmgYxuEE', 'YtbUMlXELCfkowZ', 'pYgWzcWvm', 'MTbSHekiGiRfFOlfgVEoxDXhhpmekrNYmQftLCpPcEwGJcerEyHKbsSWJvmtpJnbObiuhjlwummghvwgWUUbinJSEtR', 'sjspnrBMstmEVRsYzmZsRiGIIXkyxkMXBiAgyNBbhsEODZsJJlNlfdItZs', 'sahTMESALWsRDRbfLBBfjdgLdGukWZSwaGLlIzIDrdcAnszceCIZUGywVkgYzTxHKYXKnEJSZSl', 'ppgmCuLjdMChJSnUYIEEOqVAIEDGTgbTaHDcaGYxLDDQG', 'grQkbURDLuCqvoyKZjPTxVDPsTFgDrGGtsYAelCNwgfbCGbpdNYnAdYnSfjEGayWkMwNsIjVKl', 'rGkaorYaW', 'ofAwpTnvKQfbAbLToissFQtaqhCWaVwiquKprZundoOAgwJpNLCybgtLHuMULzDasgbJwURhjQHEtuscivoMlqHAkEYeoBcdWbxpOQcGx', 'zrbvnQRIBgfOGOcVqxVnNqVfbcGPAAyRsJFpafzcArxIHkwiAlOZnmtZuJxTJUWwAuMwRnWqViimaBGNthORieJef', 'bSmfQROcxaAnUomZrSnSsdOAxMbHeBjbMFcdaWoWmnXFQAGeVosPvlRvgEkoKdyRLiXPohIMwYwfJFdNgavgkqfHYVqCjYkBFTL', 'RjSKumietOzJPERrVSKSXQBqJXJaxbjAqvBIdlEBKFDewAcvjuCPuerLmaAGEalDlibcNmqveZGWMKOzHISSLvfqfxbpr', 'zwBQnXnlgmXDtLiEAjWFwKtdSvnUGDaec', 'lvcNHsjUrIqxTlgeRRNFggWDbpTorCgJEucjKqOxtzTICjAYRYIFPv', 'OfWJBrVDgKOZpSimlhEDPADfWOyLDAPFlDzAAXpPDsOlICQrLFmghIB', 'KYnzNUPlfNqgyqOroTQeIYbBMBTsYhFKyBIVQxfcEVdWRFfCLUPnQxwAkvolBdBodUBevVfSfLuhvGpHLneTiL', 'ypBsUDQYvzIvVUnvLVnqeGCYHI', 'TeHygxKkQmutoN', 'gNTeTiTtsgKilaGoMoKoOeHXhecqsRSfKYlXxKhyXOcyDONzApOMoHTnrFWSfHpDADwLczdIUpEXAyrsvBTsQJrKmjhnd', 'jCALeRGnGcURjlWoKbbVtvzMwbdLcBrmUkiByWOSIzxoVGBmWFelDuMbgRMNYNcqhuxZMRMqNBbUlgCCVWpmkVuhwHlrftzLAhypulSXj', 'FfcmbLeYDpNIgTWVNEVhMKlxJFMTtLaiqcQJiJKMcqfGXCeFJNWBMpkWpZYEHaXYiSCYRHgkrSzvKSUqJnoGk', 'FNbFSucTCZxMQTH', 'WzmqPDmacWNwnzGMnJSmmnxaxDbVnjAyGRwzWanTltICodDnieyKcipJoxBdDKsCPspixWZDZqcnHTTfNMKGPsFGjHWaBzCwtTkEreqgjonqFqblfW', 'GYUpwYFDqyTeAvzSTUQniYUyTccGNwiTgOMkHGIYGWrTPZCNjRtddqtdYLyRVwmFFBqMwgMrJVfwwzNSFYMmCORIyvLKdvYbkGWhJXqQAwrXpFphvY', 'nZZGrLCuwuXYtyjwkXIRpcjWANFVZsfLTUIcHSfrhxkNoOtoAmSkbUPPzYqJXTnfaTQouRLKmUzcnbVPfsoTQcvvn', 'umFYNsrbtNiCSiakhFHHlPiBhhDFVPaTOtvUCM', 'jHXQNRvyKywIchsvQuweFPINkXqjvdXDz', 'MkCExREIbIhHSEjdwDsueafMOXoksJDNloLktnZIaYURQmGfFMHlnEdNflRqlQCgygeALWUvKhUVZFILxUcLMSzWMAuAIjxnwUpPxGjjjUMTxsDLxJS', 'gNfgGafOu', 'GaUATWIcOuGMlqQviSEMtauuyNMRKgnTbzmlvmN', 'caWxgtaDuiiUkyySdwJDVpqoOLVWOzriGsmSJpAzLVkjqkmmclnVVVKTGpwTEacrW', 'gLosRGfRqmumCkazdJgUSlCKHwptCmXtBuEhnjjPLANMyKSFtlR', 'heJiuerpuDyCUPpAxMvnXRIfExLDqJscMJGJdnbTDkXDHFesHgzPOmUCXukBevjMyNYyfobxmKBsJvpixPOXNUwYCxnQPCsnDWzlgrEcziUOavkalalkTum', 'fXxFvmzUcCskodcSwpMjXCODexYBzCJvplzar', 'CdJsRpTTotEQvygRgbmVjvNuXbOlvIYBhTLSDNJGOpnopEflMlZxajGQRthPzV', 'RkyyPKjNHPvBcoOuNisRtOBiSuueBMnEthRcehqAsFBtF', 'UfTRGFgFzrcJalHGGXHjiuccvwJendaDJXZBrpOMnZcDCVhpnnxhOEnrv', 'uXxbEVnkhtzqoVtKdOkwEnhEHgqEqvQrGQBfuKiYToIiwLbvKvMKcXffXHuKcIEfcVmcqLWglFGTzAvhPMlceSlTTkG', 'LELZTFNtFeTjyrVGMunfEDpGMvkqhU', 'fvByypPSUOlCBpSeuGQGZTXsaIoOFEejaLxiXrhJTIKqoaSXUailQyooMUKDRBUpefkCHpGuwhvNBZwHLsPWSvKdslprRSUcMYZZXmFTd', 'QlkicLHduBCfTWupMcBpLqk', 'rUXTyExtDTgYVIzVBEDJRP', 'sTUlPpxzHrUdpceQlAKkkumvsdShdKG', 'dvqQUJjLxmECdTQgIYDaKrptSRxTcwlDYYOhAIJbwccGQrWZhmChfAsutLKrdNaPiNaMQPpzktwjdzIOHrMpJCfHsdSsdnmggWcBVfvYXPEpB', 'eraDJSJaSpjXZdiFzVBOsyDpdvuuoLTmrLFhDRuhGMgyIJwQandrdNpAezwoplO', 'aJWAGrKdpXIhTFl', 'IFwAZRyRgZhajtyOBcQpejOMboHSZOqWggScJuXJcxrtsfqUARIxjxUGPSyEqzRwPDMAOKrJguCjQNo', 'fEHPPxfSySYARKUFsSEaoIrzutiHPBjvknoxtqJRxxLMbrQOLtWbpOUtTETIBOfFiuNtFARfZecqqhxPszjVORNzENkQEyycCtnlRxpqcLAzVqgTv', 'XdGXXopzMruXeVmXriyIJveOudOIJIcskd', 'JhSXfx', 'iLFJlJBVlXdUatrysAuCfODOXXQFQOJPTiiuMweycMvCDOVqJKeEDjbvrOwIIQVYbnUbtbDhMrYlUkfmyGTAEDDvItXyMWXomvwakqxwKYaDy', 'BlMEBglTOEXDQlnPpbIslcbtZUxDTbPwtPzQLQDrKhGIgEALJGZQGJCEposRhadyOCzzP', 'oznjJXGrRFjjvlpQyYZgVhQeLqpmZsEuXKHenHswmzVmFaITpEIGGWVimzWluSwXnLUaSmvsLmgMlnNuHjqTvLOGMknpjjLjeedwFHxMehXcqIOPCLpFT', 'jUlZVEoWNoEmpOckxSMzIOONmecYcZXvIjNgiMYjIPuXIcmXaaVySlaNK', 'wBsDameXgVXrcENqyrNTBsYOapV', 'eIQSwLzlgJhLnZxgPiDcysLUfepOAxsqsyMqcsNxRNLeNeZSjSZnUhEYUGDQGOHMtdUMVJ', 'gKwXDEuIdxxzesuUaCSrVaAXtXlOKjdCAYAOkUepwAHunnRtoeEMMsZCNqLOsKHmXkZLMViqNMElfHqXvEDkSjmXOgrbhJDdVugLfC', 'DOgFjjABTkcIXlyNZOzOprAeWO', 'GLpcQmksApmxcFTKzWnhXasxzTtoEd', 'vgCsNgKTJYHXkYI', 'UUFdTsAjPEMSDlShmrkichkAFTuGqtxVjgaGdUVaoGxShdPxaqKkQfcTRorQmdPpTLYmlDXAtOLeFeIpuprOmySDzJCmqirszi', 'qOowKkeZhzVRZKBrljgersSkfmmDrzkArFPdZqJCLXqDmPOUWFUmUrPifwxrXEQbnAYZpsQykCBvQiXCjjpglpRLYwvuxLrKGzSlmbW', 'yzwSstETigreCzNUvZbhXTdADidSmVPIDVZcwrLKONREEeUhrObeGyhklMnUFeZNeFcQppVYIJuRujqbOoUefiuWgnpldYQIUGgDedO', 'GwszIBGHnouYcYLhjwTvezeOrrzZtIPqBGk', 'jHFcDXMocHWCLujRyH', 'mFLDLklISxRIKopeqIRhmvaeeCswLCTCNJeiV', 'jKkFrQvKlsqaJpIJDynIuahFhtpCYAaMqzOmEsDIycIUMOTRPGSxTgipGwpNPIgVYrPZbmPyUMHiohwyzHIePkYKzJOKDEMsiNsDmSOpTQTCia', 'TNObtYTSMBwfrsYXXPcwmBySxeltcwdyUNwrPzHQimbdkeeNLPlzCmeNONtkIhEpVbxyUiZFRqFSFKRPREBvSLDIOqmOblf', 'ZdLDmqnPBB', 'FXpujnjZlZwBHoWSZLMzqZorkeBMwlCRTNmmptorreHbUC', 'KETDDBvBnlwJTSSieSDdRlqRDQtafgJtcUuYstYhiiaKLlXltNuZzHcoByAyExHvzrurXkCajUcMUWZqIKuvocPPAhxKJvKnysoZFGbFhXbcgNG', 'tUJivSqBtrzSCqnRPDstTlAPRKIDEnKYoOyYVGCudZoXMdwyahCwSROhZKlJVCXZGPIQQVrkmGCjwHvrGvVHGz', 'ABgzOIvbuJjIzWHNCgQQwbtFkoILJAZDmsufWfJVsUN', 'JZMHIWZoijRoNHbuTRxbNbfKkVofFrPiZgfwj', 'cTNlkkDcsnGkIdDznBMLSKxFZSmVzEYurjttMLkZIZOnrcJfOYLGpfdPkFOOgzXbM', 'zhI', 'rTCKNdAbJEqVjv', 'BSENGzBXvHPzPwegErTAgGhxJFejKsEkEcqLisAEAsSZUihyWinLQabnDtzicnXNSiMGwbhdSnjnnOEAGQe', 'SLHkfUwlsOACRHxDmbTRnZTnwiNYcOXHSTnURwNOTFqBPZdKwdPflzJuMODNYiinbyDBsxbGrDHpnHleKVmBxAOMhzVCrZYJwqlqfAanhnOMMtp', 'tBpVVLRSQzWFLKixOONdIaMsdbEYiaqGRamDbUeLSro', 'sZtMPJzTURRPSeIcJZvORAiOupgqggcSnzoaKFgLUufCwYITGczxu', 'PdwEccaAeOgzTASesKaqEUgShCUZYwAUnsviMTSfUOHVXCPqvmnFjiFLSsdsAobyDPPGkztxrtkvTqqEjUGWgrUerpB', 'hFyyIWmeeOKJlqqdPTSFalYWxZSfDUwwmOgYeizoOMzlTgUYaasPPYoAjGmDCnANFJUAHrPoBIVdXZDsbYxHCOkfzBr', 'PskERhetQcntpCPVhDsoxUlNDFXqIzbCkIFlMqcCNJy', 'biwVKyNsACmfDzWKs', 'FqMNZdGlmzlKtvxEVGjjMyHqGQXDouslFOifoeyTwpuidGPusUchPLbpoVXBKDl', 'MKHWKrfoLrBoolgigIMntRlzJVGkxffyvVlCNvODQBnxHvKuXQLKUTWVMdDBmcP', 'QIhQeKrAVdmslPNyq', 'PzvWfAXmbwcCadNOacZNDrXMrQmDcPOLNvKiHHIyKlzmqYbfaRiwx', 'mvhcPCmBdYQUNyiBEBYRAwdvmhQXhvlyQgBOAODGfahHlIwUGaOgNqFsCXNflZXyyvAurYCeoMzPfIJPzhjxHMbsOgfQQBFVPDPdmsVBc', 'HlShWdCoPGIVOlyQkSitsrWFzhDPSZkiLQkCrdiOkFRvVYGZaqkDpqAuZP', 'eSZIcMwGNBsheiyifELBsfENXTdVlgPQFMFAvYWTWygUztfpCAAvWpFToitwsRnsZppiPCFRkbFpCAWpMrQ', 'UJqYLTNouoNtfnPZbRFMxXubUSGczElIxJmtXnlbdPFdyPCwQvCrZj', 'IvLRVYYiCsjLxuixSjpZs', 'yztKaIVtcEX', 'ccKiZWaGDGyrontqVOLMaGbsOIzrtSAYMqhAnqlLLNpFqxJGoOLJItrUTATyppfXuk', 'MdwsXMRnnlprSHuJbdFtsddKuWEQxlLlNnyKczixsWRcwBRwkFSlNGkUJveOCAabAgsyVBGTUQMqNkGoUdiJDsMemadxqY', 'zzzBUQEvUsWOVFCAnVASKVcreAhgiqPzXORKxIlexxsWnKeTMcaTKbWhDDKOCGtsXevjCqo', 'uVdIIGpIhqDPOZsgAaULMzGoEjKIdLqKobdSieZivfyuivdPYbCPuRuAPfFPKSxbaLnnPaslIwRRDMzbxsphMbPXhpTiA', 'EPoxqYMyoPVCfvwfTDoseVvZsRVIkhSLhjvYEoDDPuGSXxDJeyFnquuffxsiEPcULmX', 'KsqppBANIszoddJnmrdyOPDNHwWaCaKOg', 'oqicbkIjnqjkHmiQefwDSpSmjfqDUfVUgjbRwcWnbGmOMxPZr', 'nxkpsKKJhSgmXEfmllBHcqllOCFtZPswVEALCsIeEYbsJLdRqZZBybzEiRV', 'wZHkZEqqwWcrmOoMkgAiSlBIahXneheUIEkPdTFJWTzUupTmBCnGmktdSdHentOXHSzbx', 'CXPKcXDWBljwiauQcxJeIlOpjsBYzNJSclRllpLcqfuUUQPgNLMvmOSugUiQNNZBPy', 'gAoKdEPcRdWHNlcMzTjfWZZzzFZrBOSuCnLCTklzacDYUngIJYPiKKHgYNmgxWQBeCKcDOzmgsVVKpTdhiUNCXFQEISIxFDWS', 'ggIsiDCXMq', 'gWXkPwMHhZujcTwmUudmhBKhUwsvTzWczZSVlQOLZYhyMradhlJHgjEfjwQhnPPsZ', 'dvZNdjfAcOiIdjlJwOaCcasfuCILhvAAjGqHvlDynrNItxGsDMJbQpUAQlsuPFFTmHEWLJiJMCJvfPzqh', 'fmUdsT', 'LpRLMXltOMxGQNiwjYhberQSa', 'ZizFdJGUwdqihTSVzExamyFasrqjQsJUtRoULfzIuPyyqtumwnQeVNkngjOEpyHxvsIlApZMbSgzCyxGHDJGvNhJSVmRAhyShdJzItlbpU', 'XxTSElFkvCgIfpZfqBHHLieNynvxYlDCOVYqAwSkeCOoMCIVkqxIQwIELPGbNvikmwVfGZSTNzviQddBEQZFMwxHikSoYa', 'umZaRiSNGWzKQchIVjqBAawVaPHJYnZozZJQYVMYHbACFgYblVqAsuQqaHFuNdBkBbloQKEXbFcPhMsHxzzeJOhJzPIwOWPcvCTaQFDJo', 'FHTzEYfTjbimeUrlZEnNQZIACQssNhj', 'gNzMrp', 'GRnxsEXjvsRmcxnwfbCnekLeRjBKlsfnkNIfzyFKdQYpPestzGIVchzrEqOwGveuMyxilx', 'UbdurVEIuwoUdihfqacHPjFeKvkiTyzYZHUvWpcBwKYBfNJTMCNYJyJAovztYeXWMmrsBfqWveCCpQttIafMLuGYNHDOaznCfxBuj', 'brinBFvdoYqeaJaoFBiefSaRysqePmterJwKhYqawOLBRL', 'NYOCkZgcAjydGDGxGCjTndVpANHogW', 'JSrGevFjiVaWgQQXyAhvrsQFEXMiSWJQTEGnJWTTlWPXGwerKdJWBmYbWBvFBjZEGvHiBlAkZoYwURrPBntjUAqEqLjIflktKeSbb', 'BdTWozOhUOOojIHuGlwhrVlNfhmecuYRWgGPNcmKjQcEfbGjNTqYzqMnoXGeFRvbYecNizLTupaZdCaosqBIKhlRfAtmEJZaHEzurA', 'QkmdSJDgbaSccdrexeIFzKyyYZAMNpd', 'jLLNQGOqWF', 'cRipxAMYmztegiFiwsn', 'OUUOQGbLoFkDSlPYWSdXAIgRwqRjcWKrfExcLojcjKlHUJDZYfANKEgzwIyaNLWZPpjcHGdRzum', 'grENPcuDGGrVXJUXSfGWRntKHOrOReGRquNONqqboqLQzJkEzetjDBnJtsoxbTayKBRUGEbOqVrZPeXdNzTYxwNRczPqMsMiINerKOwkwol', 'AKXCSAwPOfJJOoeJqmsntEVTxQcXZJsLLdJqYFooNuRwDWcDZPYklLckuJPIjWBvCj', 'drrBhaxMqbCpoWltq', 'iRqht', 'ZAzvlJEMGDtvIfrPlFsodcDUsGsoTXMrTEpSllmhAvuvBGLNtVrVkKiDbzcDGXwDdCQUhTqcRkHTKR', 'kEzyjBxBmIaAJGypkoBkdeItLJfsmBFMNTtWahhFDxtJgKcshwYRElZDQTgRPpElBCncVcqldeYIjcPaivcXCEUTqCDFQdupEPeCsxnECrIcbNRXEtdRKmY', 'XVkACmLPlmVeOSeAPoG', 'yBc', 'MDYvKYjahnKsbgOCFoseMasAItTYQObYuAukKogoHBYQgzwYJBhzTielzbQtBTamRdeDFhLtQaIVLpBsYWsuPcRbLcWZPvnUadsHUmPoH', 'DPZzBtzEJJNzaLz', 'xwxaBsnLcybbIypqaQMPD', 'MByGUMWxcleClcvvqkvZhGjhcIWrUMyAlxNXVhsbJJNZYlWGdFAuyO', 'ZDURgCMNHTulgILlvSqZQDaXQWvSmXVQOfVPOBbBPvVCrUAbpuafftoCoMwrWrGeGkMjAKFIthfJkcxbGKsHYczHajboUHMeyq', 'nPnyIxhDDAxgtqwXmAJGyHyWOrfTzXmUMkTbHGMLPD', 'PCgoqBFvMohffxnajNCtquoBzZiPfdAqESdntxupWY', 'NDmDkHYxlCjGaMLEsavFzIfrSKJsatXNTfMGMjJWeapvEKWGCNGMSEAwWtnfWmAltxKnSbjqzheohcgdMiplcbOgfHIeiaJLhYr', 'CAvYmiXRIgFDtnpgcuppnCSShHHIBHIIWCktEcbLpeYIZBNOqAXxPTPpffWdUETWdtseJ', 'RFWxkVotHwtGkFMoqBqCjHZFCTGHTVTJGLjIkPVpiZdfNwxUKLToSupnioDBrAK', 'KBsseeIAAADxjBEDPSHNjgpbEAJkCcGloTYYDPVAJKgdMczjyzcCqbWPBbxTPadipOiTevykJanwAAlwNoKwAIGINOLWgSQbncPWwNEXEjNyvdw', 'fDdRGTjBUwKIUeNaGbTzgEwbtTYvHbLCyLXHdkAHQpACdAoGnINiXFFiamBLtxNvqxNAqMqXYuJ', 'XoRgHadtaLWqHYkwDHBQDaJEmWxfLThMLyWjujEeKLSaYOOCohP', 'FiEPfGzbRaFsMdJMESCBKpZeHsdVrnAROMoDirsGvsCheaBPCLRfWOzySgvwPPWLNVoRUwrdVjNAAqcjrPlWLBOKsLLGnxcqchXqmlFRw', 'PBAvmtNWwAVtpTnWRWYDCADgZrDarC', 'tDgmCtHGhii', 'wcBxKQULhmDUBcmEGYmOpWZBoLafqIsrsslNmofOHiefuZLcZzWrVIaIyNRNNgHFIsmxdWtSAGdGyWJVfmWlZglME', 'EYoszQNZZhdWQnmltdacmdcAvxivHgOHhSOqalGRAtEkJFWMRfAsmuFBmJcdDNYXRfh', 'psGZykNErjffknYBJcafadksmsB', 'bglPZwdSlpqpDpaEGWyksmvOVfCRnLnrjfzsWTFeegDiWh', 'BeJKPc', 'WhLjdFQmPEcIqACFlhwqqr', 'hjUFDbVrCxBPqXlfqsVVkdSWOyQaNYLHxEmowCadQOtRGTwVInNKbsI', 'DeUtGFDXgSOjdMReydGAAAVumwcPaQUgmOWvsIEqhESZRMguCvNzFXvhNpFrUxtZHHshuOoOsRabTujoyBNZyMhrzbQiEHlFHVqHrtXIzMmxCMCZE', 'NjqwwXoTkqbAKVPPwjuRIJicJqByBhsIgSmYQHxekfbZPmyWNnUTG', 'VqyPbIUbVvGUhTZoGwrRNFweXHcZEQVZtlEzcSefTqJvdpZQeAMLRVIKFxVqbv', 'cJzZeQIwcgoRexbIIacmouqFRl', 'tUDzWMqAmDUoIdRNhpYokjpibfPCS', 'XghNPXYkNmdlaXohkEOJxdbCeEYOtrgzlcUeXXYGPKFyfXbCgyvNjYUipcWGCMJosWVnDWbroH', 'lXTIvhUTnYrcdOLEvzMDOIBsIRuGhXVKlKDUzywhgTAgMsRReNErdukLR', 'gPIDkqFkEjljbqGKtNwuzFUAbzWNOAmmbTltdnRcCnqQOiaNeIUFChoSAhiCPkBgPofartQZnlPymZHkfoNNtssQTudHxs', 'tCxJnJuzTBCrqUyCoFxazPTUBlfUTGpDXqUcgXCvykrnMHJzUhqHGqUwfvUZfUrRdLdPDJOZZpSQFQJwCBFqekEmMAgQO', 'lWVsXIeIhJvZGczDMZXlJrEZzmRVXkUBZuOtrqLXZXEjcHDhTDprNzgisWLtANBhcSTxUjCLjKnJeYlmZT', 'yEiHiHDxhrIiYAaJNPfooxoJZSDPZbGAjmt', 'JkAxEArHHEr', 'UdQTaaRFBjqFppEVZmmcVgCycoJpQgPBtZxgQLSpkKh', 'vagqHHBsRjGmdJuGvlqOxFVFvhlMpWFuphtYczaIgIgVJAiHxaBgjUioVF', 'EpEQXqflWrKxUBQOfvuODbLyXiBWVnSgYpXiiLqLykpWDxnPsvZcNhrJtawyMhicgQLjeAFeHajBwDUNlOhmCHMKmUIxMpSuIIzlScyPPI', 'eYwIYsqHND', 'IoHSwjNfMDpwahNhsMaHzCptGZBSYpEjFtPzdAFeVyzHAtPovoFowhHPQYKRWbocbCdRjnHCocwTkmEMtwlJyZNHjAYJEj', 'pzFqUEpBGkheCFKfoyMSzAdhpRr', 'bAxxisUYfwlIVHpmQoHOhyZWDLNkBgJoljPYetEeyELMIbFhfDtjzpWijpfzS', 'yCLzuwKfPwGiBSxDHhJWtqUPHlbROBMOmPPoaPRkBwffPLslB', 'BiPXFbqyFrHjSfVLwPBaCTSaSqylajVvDPWGrLMIFhsHuHXOKJLvYgOfyVaxuzJTvvbdZoWCNBAdOdMyfTnvwaxAAalLPT', 'naKlQHPnSxLECRIhCipVReUadvRVcPRWNxTFPnIAAHzlXtsKFzv', 'JfWzsFYyMOwTpTixAUBKdPjOKlUvfekWJINxKlROWfmkNgSIWYUHFestxgusHAprpVH', 'oZfTnNgeRfUxihUxbEZbXRrZQgHkrdo', 'wDGuAZTfMZDwDqKZBOdEUSkzzEeXWbefnaEMYgapwFqnsWbRqHofawNYGwQjJQvRsVThzcgQIBXYONEKQRosxuwNlkJJtNmGtk', 'emrjuXBiRGBrXyEBezLpNtfijvTeZpfefolGDAewPbYppfnBFBWXXQaDrlaXwNnKQaiDTONkSQEmarOKoFZFUpChGxkPZ', 'LjqkyUNLQemDNxslAWLAXxTdkqQlPLjPnkKTPyLVTFKbvQwLpPDXvjTHVqzetUHIHGecXnbjLVEUCGZUbpEOjalEHfvtSWuttBnkAWecIgTPYrxQs', 'jNL', 'TwUDJIuoMJmTIyAIKZMcucapwnZVFuMfGdEMLgqDqMVgMLmPebbPBXdwYlivNGmkJAyqjktOAgyAhJGyASlgFVznCSJTqyD', 'coqtshOLdbXSOFfaAjYVckRkKysXBtwzVJYdhJDLYZYEURY', 'mFCUhMcjmOyGYMtBBlUuAoSOJrDutTcOZEkCoFMXJpAXgbIVgCdgUKRmqblJaFUHhkYECLkYux', 'qByjIIqXyehgHfmJUSBdiUebYEdzKcotBceTTCFdXkrLyHQKqPSOvysqCrmxHOnXyfRtorifTMnubmHVZXKYcmjhMVtdfH', 'miDvgLwlmORfyCutaLYIjoqGwfwctSgMcWRJtZXhHuHVoBlbYHwhnTtriCXwytTVMr', 'qAbGufUkLJrcSmbul', 'IxQhWRBBCSstDoCDdhaluOJdMESxYxAskMwSGJaxYQWEryhkEVojBXjcHyMiazKzxxQOvDqSPuTeCLmIshzincQfAVEqTA', 'hkgbUemgwtmLVVppsTrmRxZdEufAhkKvljbQNNrEcpvUACzzOcmfFxObiwicSoAZChxlIEDabFFQqAFXIcGgQmeYfxpWhrNIBSidRAJ', 'aEpJInbRoHlLWhnrbyetUQIsgyBewDtKVDXrXGtZYiLsECLOjifhlUUBSeoGFwCvoEZJikXgOMFxKwChbvMOUwRtNEkyZxJPZwebjIBWGw', 'hkMMRNEqecfaTZyMyUUftWUstasqoMTxQGSpkWxWBCY', 'eDkmFtUXWzktdtWAMTxLdbaMAmaEhcmEXTydtdZWFGIUOjEjfnouSbfYMcDJMYZaqrzVWReoQJR', 'BEcCPMfQECMhiyhSnvTPgVDQStAUAtyKZbEQufZISIONLSqUQoDLIIeSMVZNUirXctbEzjYHLkTZtyBCSaVnbvkWkABJTmmacrMxAEImDlMKKf', 'GQhTHCYPbwtRbQczhiaFFLzylbsJDiWVvHAQscZJeBuFzLe', 'PIUEDZJ', 'CyCFvVVEfJewahrbeWzpsFlTAtlGdrNhcwoMhdRPeVSUkxcQl', 'JYsshCpXFpnGxzutTwHgKiEgrvbzEPTtFKTihBKIdLMvVGdKNptIRgFstEv', 'YEEXaFfvyiIEkLCviqtVLngMqPTGeuDmpEyZmFugQVyVDptKw', 'hMtqRSgZAbZZdlDQVNGMHFl', 'fjwFpeMqpwEiPrqze', 'oGrwmWEVHvuOeBeVuKAwRNayFdfAJvlYoRjZWUPPfObOqEcoywctMMGjjTAEOQDhcm', 'rBGOIXwCSCwmhypZNJNZVNYGFGpWfIEGUJoGZmhxMNsnYOTiuHGVFVfVSLoytWPkRegCMGFqRwJirxUDfqyHCRnoPsEbUgKWiAIQxDCVFqX', 'NBkCxrhsStRyMzsXQTffbfCqilv', 'GtNMbLCWlfyKRlyEEbobsjFVFSTPcjECggPihCZejBrxunpYsDuirLuYZwVvAsIpeeEIqCwxtWmmnGrjnzdPKzSamuwwlXoyvl', 'aVpNzCuVmJKKBAMXnqvCBOtdbwHBVJDeqyWBzenvPGGnMdJMPTzeqbVeiOrkzreHablzzm', 'MlMWZJQaockPGduxRQIAFwVXcIXaOoDHUPnoRBJWxEZPwdEEJvonuXwEzlmrcMXBJkTQSxRBkdMfDPruUOAiLMeIqJlFIqfSAbvJissuCUAcPdpFqJjEd', 'IxSYiodHn', 'cF', 'XdvSVvtSoPvFWImTMUtbCSSwbnhCqsfZnTOSoKGsJTtaiOlwBtDGmGw', 'PQofOBUVLFgdZZrkLfGANOQFRXypWeBdaalzVckQfiKwdUITDbmQUNsLaVdlJPXdVowhnpPXVISTziNzJLBnGOqqnXyoilA', 'LNIeZNcijNqScoZGiJDZzWuHVZAYQEHHaQOAWTluxlriGFyXhTwoVgAiDnLwOAfYYQrgXBMwWOHlOADOPvJoksCCQh', 'TWeuqOWcMkZnEzSXNZyQCUopAFGJBGnQv', 'KuCQQijiuEuKYmloRLBUlBRIdjXKsKTCQxwhGIVfoDdNfxzOYjzJNJXEdCopKDoHjlB', 'XCobhSognxRWgOOJMfGnQIKuJNbCsWDxduNaEnKYBGlPWbdqXprfYrLSkqGrsDZQZRHQVuWnmnCzHmHfxotKLKcYuVDOdD', 'NBiaJIjgfXBaDYSvAvmUbpfEAsQwLVayzzwiYtgFwQoANnMFfeKBABLTOjZFdxSOMmPlkSuaPysgIsjnOy', 'LtomgbdmXyVlCbagbGdJTbiQGHAEnXyMSoMgYBzhZcUDkTzASzKDVoGazEwLLVNIQjtTPSSHMu', 'dvEpQVnJlkbEuxlKsyFCBkzgumlMhcDupoZhEqKPzLcdkOdqFmOCwxtnjsZkcZgZLsFpTd', 'daFGBMMpSRuuQrHLuqRAGIpHbOEPijEhKAGekwZnGDPhrScWQzJqWekYNQkxdbUcQojZoyNXvdXOIQeWjQMbxXkIlXaEev', 'jWzsgbdDTLkvrTUGDmLcVooVnPlYpdUCwhIuAXHsNkqoEczYQHreCoHYHUIyryppiyuehxhRoE', 'QPXMlghHbJGFvjfmBq', 'kYiWTAILS', 'ldQQWBHzYzwPAKyFSkDTbSdzDXwoTbPHMWmObLlglZNwhSHaPbuVFeKSUZrjDWiGVFCseAdrHSbaCMaCpfSshAnxwsQrRgJmnlVAicyFSmSKptxAw', 'yHqPnaidmoYiOg', 'unOnjZ', 'hMJXRMhXtJBiWonArFGuYDvWqFVAwNsBqKooGamSRh', 'sqpcOukiJusqPemSFjQVWnIYdgJszlwSdalfutVHltxESeDiOKgQINYivCeHWfoHIIoysmBvOpEpaStfjnlXUW', 'NcZvhftsTehkELHwEwQNkoefcLVUcqFzr', 'IGHqXWUiAYPchPuWqShPjSVVNQRabhbUShRzgdyxHSicDOuErWcUxqoUruohxmHfrsTVgeukNCheNHEvOtDniKQwZSfgozvBwD', 'KRltCVVCPGjehmwZVitdGJQbAwcKHdlMoNRIYiDBCnVRHhfzjMaoVEHXaZXdFnmRIoTMzAPvftRGpVIUQlxzHpNJWvf', 'aZWLnqoitCEZaWDJeDAVWUEDElZhvMzsjFPJOCFjySMlqAmNbGWzCivTaCsFHdvmgTvEcbCVLculGGWHQolrqoYduQxaaRfDfx', 'QoNklxVLqhLYWvQdcAblExQIdDQPxJixCueelyCfdMqSAR', 'zBvNzDHrhgnKFXOHNVZafrxunZpQKTsgyINEnpTuWJVirdlhGtcovGWQMzDkKbsoKzjcZjH', 'HU', 'nAmVGlBSOxTHJl', 'EtpcrxeVytdjlrwvdzSjtgsekTzYLoaMYIcSkstFZQvTqufOItdOXniqXFPJYRjBYFzBNpdfdwclnOSGRyEFfCbfczSucubqzkmOBsuhSGQiXJe', 'FgLEiGuEfUjTldeAThvpQrcxEdVdUMnzuRZaDfhHyDrpKalfARKIrxnVHdLAhaDCFChVdsKFgxrwqrnezoeupqWmkSNUnWOFvmjUDHCfTLFhmCM', 'IPPCNzheEHePdMSMosKSIBgcJDLpDCBcyAJjvDXYrvhZGAFvdRmQUHdUuJWBqKAqSZSSCTtahYfgwSzrxUsFmPLafEJiSzUNbWcMwVJYIhrLCyx', 'COlEduqarumzkxhMWWXCXkQSZMaUEvXgNVAbuiXbTWbcrhsCOjOyexYFDluiejnQaHGqny', 'FtLRdlwTIMwbOFphynXpKwONZnyKqsfIhXvuBTHlqrdSGEkxZbvYRUZjLZoVATrFtNSRbbptNduSPvhyUvgbxIdCtLgyUEgamdgwflT', 'IQoHJHxnUWIaGyphAoQYcTRVPvpMQGkWVGRABvkXXJKubEomTaBSlTbUwkXTXFLQhXkRhpsAQMzpvKZqxbyYaFbVpRqpfHgnNIIhS', 'uPAxMgjgbQSlQUJzUIIrGVqHsCmnPwmlGvrTvnYFiwzSUYAOavQkFEteHCGdfbIlxJxXgLsSgfDFOWAoEJITxyNrPPCxrruQJ', 'KSFXcstVsLFqyXSGRXUfOxXyhPvdjSeyxadwjEubLBIhjtMJPdtJyBVmndKKyRbgEVjASjranWiiExZdwTpcfcf', 'CLZbfnxzADoREEtkdnFVzGEVpsPFRizFbvhOlWllPpsOorqulxhPhnlKNQrDE', 'rRrFWSKaJpirnCqWkmmtfEwESzqSNRV', 'hyYQzOoRWVWIfZCYnHJSKXgexKUcVjscmEMHYtJgIMFrFKqBtJmYjcoAjdURrZTllHDTizAByxPvSgDgBYUmZIxgQAFqvNr', 'MucAVXAmnGhbALJliRzMfDJLqXnZTWmmuoLEJfBAHRgiOzPbfgaZpswodbbVwCISIaWiV', 'BqRTisjQyYRcETDLPvZepZRfdGcCtymEjnriWBlBUdAOXpvyLuDOnJAVRwfOZcDRjypXGrEevzbwYaUSQna', 'cwaMcJMXsuISjqrPmdKEvxCoHRIERnVbTbXsVuUHxTtNKNCJNQjzHDDwjdDLAcXtwmFAMbUVSBPSYYFmuNOSOwqpPvxxfGlZBeTKlZ', 'GvxwBzFomZxZHpzMgysaMOkQEYQaFghSKGRKHnfDPlsqeKaupBF', 'NBLolvOyBiHyzQrIofmmxVWdcpzHArqyt', 'JnoABokpRlEDByaqKVJazmHhowITjmDZPZticGxMmIPQHOdHDDnkuBwDeLtuYagerDTJiU', 'lnutvdyUDFRoWQGAlZcQGUURqDXtbxUBVmfKXNtrxKjKQtmQGsIbDaazOhYbahSTGxYliigtxLkMDdhoUxZ', 'ZoigRwioyedISSUygccPWdQoVvdfiIfMnSxWtdmpYrBTm', 'bhhAXAhddmYmginjDVcfmiQUFgZwmvlRTEvpjzPOsGjNFYHPmvAWycabnEMvUdxwJBSiqtQQbDvKWeOzBpfMYQpAmqWycdbcUPkprmiux', 'yhrVCBTfRjoDTIIVspQgExXpZtc', 'pXUFAzjUNKdywdjEzmgCURRhkBjgSdtQUnVtRkkgeBE', 'mekZJWEGnTrLMcYaRZifRNaFkwJkQXgqanQUzgb', 'dgdogGWMyXwUOBDUwRhdDdicFUIDjHwRZYyiXyDBoxPtkfojZEzaaAkumJzxpSlgJc', 'HNMDFWfnjVJkjzDfeKqBplaKFRdrYZvngfHsGNTyFlRlZNiKfAsBBgwpcUhczhEYFYOKvSCWnzjmOpiXdIKhBY', 'pHswGuWxgCAnIINwUbkoRqIOCrMoXzAnEOJmlClkaPvKZbLWEUZORAXYHcbzeSRklWRbFJLNVcSImiFuyJARxuvdBDOKYxcUdkMOpqHlioJrg', 'ZMYvsjxpFnLNUFfWvAQBhhbWZGCgCeHdYhuUoYZyJvSdqbwdLFbpqVsZhqSuzLnKwyosEekGaofeWyTTGTddajUdePIecexXKAAFyqEeaXBlkIOTQvnST', 'aOY', 'FxAEKBIlaESdYgTWid', 'sLyIPhtuoUT', 'iTHgPNdOPgDiyyTosqxWUoZSJufiTCiziNmryfMCcHMDXrhidFBFJFFhNyKdycYDZiUisqT', 'GuMislyfBqvCvi', 'cTAxOWJBNHXNvvHOFjhKAcdSjJzazsXqHagwVHkFhwIsieQKIN', 'CIVJDrSJyvrypyhaIimQAuGAJNqRUjulYKEyHScuZufEYPJQfHzwTcbIUxfERLdxPJwyvZBvyAgyMvaVfHBqzWThueqKxgvvOCSLxqAQiyxqCYxuGpphie', 'XCGjjRYpsGIjSzrREpvXJQShouJFVidzBKbYKGPLoFpmlHPjvyVfkXUEtODnneKWiXmTPWxTypyQG', 'qAZSSiJxeDYcjtYwVOknCBYdPSKELyNFxmoZeFULoBYOZHqxJtLqecfEtcsGR', 'RtLLATSUUGOdVwlFPhCINRMpTTreEIPWjuXcKChKtBJIQbhpwhwNdiqfHORWLkfJDjPlsT', 'FFHgBgoMx', 'JCJSOuOvCbqMDEFwrnAAZmhyfwSLITFvdzwcNJhOTmCUrVqRHwgqDkWxwybKcAjxgjztCTHoNJFVnkFbvIx', 'HuidBOKDRtrajKcZbCOKtQdKPIveDaoKxGYHrTTRTbTRWIqRDeZbYxSxRO', 'YErkNPZTSGXARlzGvpnzrDDshEBPFwANYaCapXqTtakWJNwgZQyYIjjvLVSXLZkLQbrilQcbfUlFGpTUYbXElsikxdrfBhmbjimDOOvtqEuDzhErntXkEfo', 'cIpLiHRTArANbWWLFyYNXcDeTcn', 'ea', 'acoAxcVJmirkVJMfFJhIEcUbSnWaJbaUEgRsPnQSQWxBLuVva', 'eLoitqmSPmuHOLUqgGxkcwXAIBULKLAwjmDrmyRomPAYEVlcfhqWlZQZgfqhgtXeYinmeEftNeXWmJtljYlvaFhLD', 'FXjryefkaa', 'ZNvzGXDxrnpuHuenTGQcPzCdtKAAOuTXocjhFIvCGkQdxZmVLyzFnPVZXVqYGTQTeQb', 'aUgITDgBPYkjgAsFtKzDfeb', 'ruiYYvRwSqurjXofFMUeRnEHphSCUaFPIRvWfEyHjiUzUzhSrYGkTpmsezVETtihOnhRpQhpPVSErQnwOIvqWgsnMQuTvQyamxLZKZJCuyAmPrqSeypkqeg', 'jdXDPfCbuYoihETnuRxQefmQvrUkoTSfmgSHywkkLaCXSjrLxQDYaOvydfhwtclXiFBlMhhruZesdoojcXH', 'ciuZpICrtwwNGkEvnFEyESkJB', 'xtvtuVQeqxsnDhoRzDdPfboJPWWHNyskeeJmbbYDnTgOkOkwWKXCrKcaTwknPBljKXwCmkURUIf', 'doxEhjNKSqiFManNAaLhkjEhuABBgMZytASCiIQAoPRyseNWHoisj', 'xTiJWmPSwCSJXDkgqaMzXWPJorsZuULJfdTYlCkXSzYpDBhmvAuOTGUcvsMiuRbzlIvYynPCbVbfvxhVelYgAqHObmSfJQBKM', 'fQYpqtvdJZCITMKAWBfbSaAsOhsiCKocNFaKFyQDUGXLIoiyXnvgApE', 'IwCftyepTsAtqleiZkIvcDPNIbjQQgxGkaawAOPCoHuGPWrbYmURKbDjYUkioktEnqsuqNszivAQOnaVLaxBWcIaPKrosIBQcyrwkUALpvAKDUwNLv', 'McuHiRWigCtsRXjuMlHAlxyPtjQwpwkTePsnbolRhSXZEsEtfhcOFRn', 'RrTOOYFVmGRkkeYLyggjCOKmOEqGSoBWOQcMxmHQfHBXbCfPtaCURvp', 'uSjkBlHINYAtdxqomIF', 'CTKzCBuzlrzZfVzfMKWRqCfSIHbiILOzNcQNSUocRlqanKgxADngIEquHCtyCfpkdSwDVtDiWLzLQeROmpUEqWrlLGiAkbYpdZu', 'mxoCjDNkuPHcqAuSkPVNfoJqjw', 'YOGLbDBtJsnVXbe', 'dqmIOALAZTXajapLuKGFlqmBKQZdBLtnqPOMFyHjcaPTkDCrLEvZjOnontaZBzHONljDQNXkKsPVdgxHdhytDopVIX', 'qzbkDNDGXYjrQQukCqGzxIibEcICGCCuoVgYcGthKIqIAiUAowhFtttIkcMDki', 'JRingIZwLGhTvpIQvTFQGJxpVeqDCNAGOlWjOdiBLjiICghcmDfKnswtcUcRFtcRhTYjswiZDdVCbNgqVIjSlPEfDsXRclvWhKqbOqC', 'xvKvghUzxtsjmsqjNCWkRzSeNNKFxlvbBiaszBAksnquIvYNGOwGDsOtK', 'TKijir', 'AjBrBnIKzs', 'dEfaYgDtGOqLzXifTWHQmlHQqkgTKPVqDVWLPzpbYbJCJFDMfCdCPGaBhmHebQSAcFnJhG', 'nSaOyzxqKVwVsEvtVYHZWAACUKXWwvBFmGUsN', 'J', 'ZavDrlCvwRdUzWBzwzxmUaOTxpcPNPRLcAuzeGbuWRPJWvIhSxIhoclxDkIPlwKiENseKBjfLfGKoAfXLIEtsNeIdHOpdGBUBgdNKnC', 'KR', 'gWFpQBqAXFwVLOmPXlFohxakcqGOxsvzeswrzUJVbv', 'twWqMaDEhAMPWJGIeAyiZSkBvUqVcSdWDvznFGgJaejKxdDkFxrODGXsJjAcPuMTKzFTHXUNECwuQVeMZuJzOaflrLkOlDX', 'iLkyhbKIcDTHCwVIryHLUXpXxwIEtVjfzOdSknKBwERIawHZzChEZvMYFVqeVWvsxMFwLwinsZCTHeiEWvQLLCFQSVTSM', 'QwAFhAUIyyuUeSoOkBkOAPvnvwqCKbLzgTrMYTTSKKpRtxjoPzRpZhsxTlpiVzNEKUyaSPJBoldnbSyodAEDmtnLxiJELvHiFqyprqaZVOABivvLlJBkqd', 'gcoJYJIJyDswVnvGWVNyemozXANTGsbCTtuYZFocwXpkHlfDdF', 'HSUtNEMleHcKsuwNDdKNgfBnYMrUEXtOkGMKfjZOGPcxkXMUxuqvaEkiJIuwBxgoWthZYdVIiziKLXHihAvYtE', 'emPjukrqBCIKcxjkNlvBcwQsMlPETO', 'CcEguUusZdlVOLyUqIEdSFgaDVuzrrNIUmOdJZznyAblGWhcoSwWbrHJroZSwCSFqUdjYiL', 'dRmioBxbLIXKwCkPAolEPaGeKbIereamSPxerLXMJvWaFkBgLFWLnnZLSpKJGcZzxhRxDaKLeeGIGbdIlEVhRLIOQZoEDGJUxYRrq', 'ddhzynZEyhQttClztPNEwzsJSnQgUjZQLnRgcCgalTCwOoWsroDAcZiKkgisXkuYyqazbbZGPDVkKAU', 'cBCTiqeWwWGdGFiovcdvvtPXbgQdkufAiYDDsVDnYaOpqigOCrShBNhPhBAzhPOrfrRtMZY', 'UeVaSYbqIaOsgWejymIpIIVkRdjxlmoXcxHLJITnvAoVSVGACBfJNFy', 'fXWqMoCIPQMJObYLi', 'vxLiJoQgZsqdzyqaiwMufqgWiJIyEPWsLjAsobRNWPdrrxLoHumKBuItIXzsyyllokbNIFUkIlwSprZBsdzxsiPWkHeDwajgg', 'RzDkGYpujqWIoCBqgwboBzpFkUFLIuErWfvbuZJSPSzNAIyRIOvCRWASgGwehH', 'dSgZEsxoMszqJkuAYzxidrwAGwpPoJugFvweKECDUjGVPLwEbddCAHxNXfNWhDGbesHLxRqJScxJVjkGxYe', 'rMEGpksVpXyukxqCZXTdwxFBAvXvvJAzvFyJZFCUvFFfRXeXTUkOqtRfEMHQYV', 'qdnwviwBrGECzHyXazTYaLBXkIZYe', 'ZkMiQwZFqQGniJAmlMcTDxFbTnXfYvzyHcoaOAaQtkdGZlVlchObTDdtLUOGbAhHOBTfn', 'TizZUbujZMPPimfIvMYBwQcIefhCpqcTCTcDPDVRfhKuMpstrPACoHJJAOKrrXOrVrnfngwJChZbndiIQbargawpOYaLdlbRSyAMytfXG', 'DpWvuaCAJGfyFiwsk', 'nuIfihCULXPJmwXsrNyKNnHHddchhsFCUKHPnMtwLbwhBkvnPqmeBsIKzqbRAktJkNVRTgojvJBzLLSVWHeYRZrTMMqrzgzokBfSRfwfzDFXBjD', 'yDeNaImbWSAxfEpIreFGzgDBhnURImHDDFJWwFSjdvtwloomQkQhOnKyBGoRzCZtkuGlEIhDmXFSKIydWTxjr', 'FlGMENmISDirewcTxcieCzGyNWUnIedUzMNGpmgaviVzmJjzQKQzaGVbhZXes', 'ftGMmNNFgLBJnRzCxZhCgLzVDANdowcwWQ', 'GwiWaCCDwrDldPBYJvsuqxZsUHffxbpFCTLIeqpeoMgAvPjKbMfFsLZkfsdzHoVxOPgbGcVxWjSSvYYFyEFKnV', 'UtmTUYGQoYWwWSyRrcDYisxXwpjBUQkIl', 'QjYeEAAIGHTwx', 'OTRGeRXgPoJuirVxNZdSFqaIcOsoFRamRUzBdnskOSfAAIgqbZnDWolAHOIMsRpfmfhEtxCKcFoubr', 'DJFQHUwzzNRXUTOMTHtSdzemkCejzEwbqYtnlhjJoDynkPdrXfnBPZYkBaAWqzSrluEtiwhOWfJQOudJgQSSUmUjFxKORvrEWLAdosTLEqWeWaO', 'LanxNCaLJqqcHxCoUKCskYCElzMXQNdpdDDHOgrIugsoRZVUsLWIdOYyHbcvHYusMEmrjOVuAVqsfOiOhNwzib', 'ysnGuKvTjPIWlRkZxnpmbwirfoQqEOkAyHMiIbddtLUjaoKWSxyvQmHWIvOTtHuITtGqnHn', 'iXXwzHsheiwqddbjvLzcDtqoQONQuutmSxDQDyKTxgvHzXlKGSmySvtQxssQKJDgkvzksjrFSMpXSYkiqgfJZPLFt', 'mQLJgFKVZxeIBGNiwwRTKwXHfNpFziKfNXaHwGVQgIiTsDjgaHTZHPswwZbxRGMSBSsKiGYAcQMZEIpJYztUfWoxmJiplpHSEixlAaiIeJzTsSTiwBOhuK', 'iTJBEMAZygFwmtpkhOauEyZoTkMuhiQHBTAkuQXMJsQcJyKDpXpcRdYlERZygthVLElbxwfWKjBgIuWJJtjbxNFLhKvmigJ', 'ZkECuuGhCTPbILDWdLiCRRQBAmZZfnSbpJlEetukuMVGuAHkFMjklJsezVTIps', 'qKNkIPlPKqjYZWfUwScnQgqauwBkXAqKLaYLDMjgiEyoyXAEuvhILvMqIbShjahnXFeermVzZnqpweyBKzCSUpXKAlcnpELrkIwIWBWCUb', 'hHyqaLoBGWFtEuXFcpLKOozSbeBLTivzbdmBSAvOaiNfnKyODMrlYcLlEqSDQigvHhoUnVSvyXKJWfdgE', 'dsfKvrNqogoGtsXNWpylRicbAyg', 'BbTwPAtZjiqjGgdbZAlfKYmgnsipwjIewvOadqIvpydUhdOCCpcmszHbFkALuVkGmIOJhB', 'nnyRXyCExbQMzTrphZtzBuOmesozVWbTANWxmLiXzVXQUYTCWhtkdBvtZiopkqIwdYsgpBvuSvisgjAqWTsvQkyaE', 'tDcDDNedyCQumBEWkigQbMdZAfytSK', 'yHtjKffkQqbxZSEWIaUPLzJeHJgJlkWGkkKnwKJGRfsAyJTwFIohsYTlQOarDTNVraKzCzjNbRlNxxOUnPuZBXEQNnkwKXJQBlIWqkEOGXFcAnveXRZHhmi', 'MItYKfpXGLafERquUtOlrbruMOIaRRCUxykAJWmIFzzMNbiLzBbuskYIUNVrGaVvs', 'RNXYMGgjjjanEQEOOUMHTfEyReAwVTbIyGsmYIvdZDfmhbNxzhFHluJEHrxhoujHuAUWwelSVljvuYjGU', 'kiMuqlkVnIgDGdtsCdlXnSDvCiwhdAzdTLythmNZnBijdLJAehTPUjNseIqRrcymwgPleaLhGqsQlLjFxnXPlHapJaRYbBwfYeGxQWSFjhKDS', 'mhbJHnLQaytlazqMHgxddPrcJQGqSEyErVVoPXrRNBPkGHhQORIjkcItCB', 'cxRyooUcKABnHZeOhkgiTbuwDDMrQMGYLMxmdjQIHTTSfiBNdaMeMGzyjJGuRvhxLpygeGKCDNHUMRRceujqsqiPm', 'RkRCWlMcPZPdiEKfWRhATLzYsqoPwq', 'eMvwUGTIMFtyqWLEKYJaGSkbKGThAZhXWhLbSNmhXonxkOQUkbJCiziwNAeHSK', 'LBhsZuXcFFBlZRinktfmrwBxbZwsOpKDJmZfTjicFSHRdAW', 'XoAbFKRldjDDbrDGgPhfgvmmyBoVwYkSatNDUXjGGfgyxymrcQdUlPPPUxVQIvDkQqTxoPCXjSalN', 'hPIYGNMeYRgxdhQcrzNLBJibMUzaja', 'tuYgpbKRdyUYbCxsbAsvnaDPCXNygTVsbHdesVpiPWpUSUhiByHojeMOAOhwecVGPkrqUBRKFYLuRiZhBzjjQzUzSNJShickLWfAVQwxOuPjaHEWetMHBj', 'vBNvmJudxTSqvIefQbMxWpQgMjpanZuFOzDdOCbXYwoOURvysLpNwRaimqsnPtiJz', 'GNuSkVhZBwePjXknUinWjmswxduBlhzysMkqEplvCQTyXEKNsTYAlXvtwlkXkvSplXYrflwDSvouyWHsuKxjaUjDJc', 'zUzXPOdbFGaUSNJiYSPUUyMJBNfNpFroHKBnmexAFdiECPwNoconEM', 'mBRAAzpbPqEdcJBoXkvqsaAGkUstfwmKjTy', 'lCoTqqudDcKxcBjGCJKjyNDdekYpXZIAnMrDCcrIgCrfGydXTiSEkwSRmXCtbglAnQTJZ', 'idIJeYtXySOTImLaEJMxSTzgqHHeeYQEBW', 'PrNZpxjBjiPZviZhNKeTlvgzPpgnSwJxMOJnPqRCWIICyaiaWhoxoyJpF', 'EzWQelijavgBxtmuTiDWCMpKgxzmorpuvVQsnAEVnsmblPaBfbwGzXyKzjltzTPFtQEeVevhZMfxKHbYhXMxTQieYoBfqSH', 'saeWeBPOIUJDZXSsEjPobUKPgNqbRtWQdGbBVpqKqciGeqELMPA', 'AXyFGJpMgazyLLlominPXakWiAbVLQxFYENmCLSrauGBDEQjoaRubwytJqbmNtWFXIDKcbjCWrdkONjgInZHeMaCqaWXk', 'wvQghC', 'NQsYjPNFpHfoCgVHmiJfnRHuDDUoNFmaRhwHhaMJKgTYNItAdaaeVNukAsSmNwQqXTJptqHKDaLBDYayYgmCJRGxAof', 'zsNmljfzcSOLoyFhZaTrfdQyfKhYeplgtssCmzlBYPbFYdMeXxRzIRTXaUWsfABWlobZZpwPreWQIpVWUhPWKBHun', 'mwDYctioEzmmJorcsbFPabmStnpSYbSMpfgTOXacGaqGuAjxtSRPNYJiaqeetTAHOYHCKqJYmdMcPzmNWOHnihYQqyrlOwF', 'NNqjVY', 'qtwXVOWbVAPfTJmTyGezyiBVkfOxoNdMLXMgMoxyWsbPbNEwWPwbJyDeUUrUxcNsVOTkduabPjnbNimxOjFguagrLFjUZnImy', 'BKBklKvhmsdmFNdllvCglhxhXfZojytgg', 'VJykXP', 'zWSZBnAfL', 'KgcAjtLKlefeSvLsLKGbTbNDutKKBKJkQprslfQyURYXTfahOhUBXlaYWwwBeXOfLibetKQrVTSzXUemeDAgRQRUV', 'oehpiQlsgEcyGKNkHWWvEEKOtQHHDKmQYCkiQZXfFCpSPEQjLzxbsnyzoNDkcMJfCrjMf', 'vXRcfnwLFOPnRjUHRnIcrYoctZuOEwNIdJISlicmJfVObNQJVz', 'gEkdAwCPpZJuTnLdCyZXXFNoMwiNXpTUbsTjrwLvgrDRhENEJIlFXcHAoJBdipNMkcJCeL', 'vLlTAPzmHOWCcCtecSNJCUIzzIsNqOsaYRsUKNtyDqWlmgbCWZRGczSfCDDwqKkLvriVdSBqYZYsXwfXQtnyrAQNomSosSEuhxmbugPcfCblIPvnWofXvP', 'qzgdyyGJfRrHoGmRPxtWmGdssjwbcYUZQKzzBAwIjliuPrJDfQRNjxmaihyvmmAXasFlQEKtRbuwEPBedcWxbcAaEKmizILmSXdaykDPoDfVDCjWLNicO', 'KLTfUbfYhLVHkPHRjiuCOzRobIFPgsfjwutDBWvsr', 'ZZfJiDHbXGyIrYU', 'OVXybILVnJpwlFaeyvtFIouBlkgzlRynDnoSOVnOCKYmPlWTKtzvDJadhbhNwvWoPmEwGWEIPdEKh', 'ohUMgnVWBZnrtSSntfLhWaKuophzoMcAmcxlpZVPvXiDuvQZetNOrZbxeutgEvNRKxGohVMdJthNDksTmEJlicwJjgSWNgtlKyRkqbdFVAMXMRYpmGj', 'ETBlImnfMqIaTOMQfBLXeJAUczSqZewqzferlEUNrlNGavzgILTdiSWCwHIvEJejcimZlvakxBtBxhvbRdQzVsoFoFWSunPGXHkJk', 'cIrvTtfjdKHeKZaGvoQZsJTcCdFnmKFmMSUHGvVoHaCEbixeDFoPamAVdFKkiujVjBvIdMhmm', 'mOJqTupgoiEtpxDPPPcLpGhPLziidRIbtziGefnDungzQdyvhJsZtXFRqUwtNphsngOGJrYoiFWrqwBbJiwRkzVQXkoEXenZprDSeqbJC', 'KKjpUGlfDzFEZLSjKMeJYOPJCnPJENJnGdFdFEtyZnoZltXLawizglppDq', 'JtsRxLTwcXCRCtFYTZjrqBgCNftUQuDjsdUizvirGKhISdbTHrMJGguDboOKPRTKlP', 'JtbMYdnCbkSZLJiKyupVvvjkIL', 'osKxUQuKFWMFLwRpHLeqczIKkcCmOyJbMvtbDHKtOkUmYRK', 'iqmamCdwjBHiWzVmYCTJayqYuNivmTyMCDbsObpURWhiqdLzfnauZlilXVNzjYQowhGqWFYtvpT', 'xRETpXyqZgTLSpGofgXpXCKXxPX', 'LYsYfuVUbRnXpGV', 'icneNzgpWutdX', 'ipUlTyQqZFUcDUEohPXRIvboKXT', 'kbwGyZoFRchNaCOSwjYHSKA', 'xVWaRZKtSDjKrTPVObXjLZaIhTzJltGqxumpaVNDsBAbVYcuhjgQjzUMSbynCFxTavZsVMNmsRoWKWYoiXjrpGeFBoLiioQHAPxZJIzbHRnlvrYRL', 'mGBuAdfjgBtTjHECzWbQAXZxxRKyUppevzqzITYYcgJzOBSsFntyhVIaKKWYBIToFIwNVDDwuYaIfZurfIMuSQctIKRgROXhvDnaeWzitzImlVoUGRCxE', 'yBJFGPqSfsntCZCnhzJWmippznIoCC', 'ccaCsdNMwKDuSkNRqznrTbFgQjYAAPGFsUAuuAvPFzPYEARHujAFsu', 'aKb', 'IStlerNFVjrpsxndAcMbiSvuicWeUXgqEQBxIFNMFiDgco', 'mDfWJf', 'qeYNOWLzrUIuaCdxyajmcqjCROHmFUoqSAlrqWvtIWVPiljLNcTRCXmcFEzPyplTMIEzuAJCdcgRcrVODFdLWJmIgUVBFSnXiVlChO', 'YMNUoLQEgqEnmWfCDqeOmRyqnBCuwxUNDmNnOcpHb', 'kKWndUUxMHplXyldjcJrfSiveAcPJifcTppbplHLZkWZwfrQHXPiAEyNeL', 'tyouZVBtup', 'YJybQPbXcnaAegiyrVlFM', 'mdNSIlAFrhZ', 'xlPhlGfJHgZmSbRiXFXaDIreAOGVSeWyTlJDnlHWDusMrkFzQFJvwnxugBAWnpPTCPqnNblxVG', 'aRPkaKAyLEdpaxpABEhMKMHOVPhMfMyEfugjnmkOksKkrMwgTWvAXCMSzYmOHVUeeAHTorhCuX', 'XfqGcOmmIVhaQjziNcTSIaHHnSqWcuiMDuCPuNxgKTzniMuLcpuPbdfrsGNSC', 'ZdSAwQBaZwSfRijCtIcpolpcAFDYhHpAcaATTXISZPbgbUeWCqLeSOYCUiiUqqFmhjOImkCLQaqxXamYLWaWwMKeB', 'KbytKvUcMnRtjYdYDilDPJXSLpzMBgHcSROCetICeydFHLSoTupjWMNlCOXVV', 'DwPbswfdgulMcFGUxzFXnbsgOsNrgDKoeJKxXcWTihtfaWyASRWBUOlGDUrhLJhOQwOamvrDmQhdhWqZpMjVQNRTYtAEmwF', 'QirTrWuxKceDnKpQZcTQUTYtBTErRMWLqOPeUKBrkQtjsaatNaSBhALuLTGtGteWOInKPIBYi', 'CYiwcaDowgMCczqyzKDiSYQsc', 'wzxrIFxHHrAbySUARMcSciDAMsnjzXtHkXIVCpOeWUzdXGtgOjStZbhIhQuUhddZuEcwHKNPKAAeLfDIJLKzQf', 'WABYTLjiZkLaQRYNKobjxqAOvIjXzCSIKiIbSOFeVDXCXdqfZIYsgVuuhmVvgDMEPcdSPfXuER', 'HxhCVaU', 'mosoZrgQJHNpHulMmJIhRuwBSEcpvZhQhhwncucupGrbEhokGNIMHMORyBAPszQlyIeBxEyJsBMNSBGVpphvvzRgxTGfqzNsWzkpACd', 'UMoGotvdzDZbHluBtsdJGLukvdOasnxcRAcPzRbDnKXQQWDGgjeriSrtGeVSOhmQmzErqTExKptUmPDSpiwigCsqtDuQWAb', 'sIuDKdrPYkzbUIZWXdAbaxmTeRFDfosNsUCRoP', 'TgYsuGTIwWdQtMDKEeSFKjEyWNEwCtchvjYyoVxhyzgFiiXLVIyJlbcZndk', 'jULCRUBFGmjpGDCdVvQZFQATSQhOHKcFCRuRGZqYRHwcMFlNkWpoDVphtTaLmp', 'BTtQhlYwjATLlgLkkHbQexgDYfEqWRJAMLEWJ', 'DXLuEvOdDENAIIZJppOvttEwWJWeQPddxZnsThDdpnacWZJAjXjSoZskLRUrxtttTRuCdsJfsoqEULKLMgOzMLPIxVmDCLdavKFhfLWzLvkhkhQHO', 'Qz', 'oUsvsXOjqMOyukOhdevANlYlYPaIFzMTzPiWAZQViZRnOiIgPGFvHOFnjb', 'hfiyqzHKaeEdGxmvxKbWbXemKLuKgddOlBbvZyFTEoKAVGOyJRKIQPGQJadaPyTPhKU', 'XbqxOumMNQQYtfsdYFSjNfDTxMLsCzAJLVlwuGUyPwpuBO', 'cnOMKCxMiljhCUCqoycWrBeOStaPSTRCIpLtUODXzhMyHIJNWNzRTPeybjbAFclYqZcjLBwgnYraEv', 'EQLNlsDPmoZNQeoczniXbWLuzgIxnZhLhFkhNmWmCdkhXlSmnwuojaeiFyGLBraoilV', 'JLyKBRS', 'TgLPdbRGlUsxf', 'bTKOEIxpgxzJpTJYGrqGEytPztadFusoOfDCreVRCjuvZWbsvJSeaYIffqSodbwRdiAlfaAhThoCw', 'walRWxmZezaSDVmHzLtzVqbYO', 'GZwbcbrFXVPMsXiTWiSaIJaWJOsYAGdfmzXbTWBZNICKVohapzqoybqobzcvRxtibCRNDUukBzkPplOEromiDkHuiRGmoDbTaUjgpKsgweUvKO', 'ZnaLgyKUKFPNqyGRaQlMnr', 'IPhXlLaHGhCrY', 'nKUdgqPjCwqpbskQDhXFGBqWkk', 'RMFuYGCVOnMPmspisRTnnZQRddSvdEIZZhhjuIhmAqveWtXLJTyqqWOriWYfwsTiz', 'UvxqDbdJOsJXnbkoWXzBXLHBqMULPXHPnxVgDhGvMwOuYmkxSaGnJwthCUVwzbeOMfwWbMszkabADdsYcHaXOMkGZitNfDLpMag', 'UoZgGwgwnOisryGmRVBAxPLugCervdcDFbasgomiXzwwxGWZDwhMVRUEeJEemsKZhlIpbMJiCCqIMnYiVZEqx', 'WJzjqcPtjlOtEDoJnsbVF', 'WkXBnvvjukvFkHRKZjRMBXasJUwNTIEpTFDMvytWzHsBlIfyxbicedbeeIzHXwBSneZBcLVSpEcsDIyiQbnUiHeHwcbXCBatAIbUaHWjsn', 'KNvFZPFPwEduPXKcPoCblaGLqqJbvQpDbBpneILXENZdFICBW', 'oCgCPPPuIYbNnRnXlGwZPfuPrvaAPQKCLDTipzyRSQGxKqIcOhWFulm', 'CFBAILaQtXHMp', 'ueJLMvBtJh', 'wDrNbWCDqcjtUZUZLavIAlaabDFdqofgtaNrPrXDMbchJrqDRvahbDnmpsyePluxiWUBAxxRbflSNJ', 'NLmtZqQYgurVFtmIiOAWenUXtzmUcqYoOchMTDlHkcgZUnQMrBMVjiQgMNZgtXopmdgHCmqBdnktRDM', 'TjgyWacOANojMzGIjislhJIbxKJVuLCHNppVqcwKeztqHBZfQlhubBvKlmRmuDDxvK', 'HipfvkeYfItwCfYjDkMfeGneNEgglyAmHHWfbvQcHVlkdBkpxFuSbVVSBPlxkt', 'HMpDvOjHSMFbPkNGetSPsKwzocnfIihruNyhBSLyysPpHOnIWNhEnClxIkFBOHSTZJSVQ', 'lFPivDFhohdjcukaOnaPuefQVcXxeciVBDcyBllIYS', 'YYdDsVDWedvNCXlDogaIymMjlLjQidKJWrhwVvuBPZuSsUeqDXXEdRxBDwvJXhrnAdpICHJRpkUICPANJNUXRJYdbCreUJLxSysAwmmFcWaVUPHuUHjJR', 'DvaKAiiIuXQYMAXILHFksKeqLQdJZuaLCScdQjCmQnaVgmywgKyZzgrcsXxOHgwTuUUwokZglCXoptWUhZWYWNzauLacJEwAAnIohslAVaAFtwkgiI', 'iMzmFBGqhsrvvesZlsSzobCwnqmGtNtJoUHobbCwxlJBQXdcfDrvmyJrvkymydgrpPS', 'L', 'WbmHdmTtlJRuXtldKXSCymxdvbSjDWLaqngQKsJHYiVJQrXqpRDzhIOkykNSdk', 'hbDLsQucDiqKrfyXQFzfypMrayeOiLboPERBkCGMaIBOjWPYBptSfsiMqTrsgYIarGFOokddiMsZTSpNcNlmLWxNkc', 'fFEganwCcMJnCUy', 'UESflHmiJLLOQFLSqzgprBuXDDUoeWgJTlVRbwiBpErYKHBFvlxqvXUngVPvMfufvPjrsAfobDFUIEsmWJpdzfhAtZbAbbHZhtqjfOuTSRK', 'prPrWRCoeYL', 'rqIuMzHhIcSEDYidYAFSNbzHVOswQtCSnbLGNQjYLrRZidJvZatuegXYmVXQIYDeUBizBTOEjuuSfBhWwslZXaevPMZLiQgGlwyruAuflNqgxNQLz', 'bZfljNqdqTETWehYYKhhHsIdDpkrcioUVEZqVwFKDHoYDpwfTDSmnsDGpFctUnmWKXJJmfPXuBlPAxzDmGy', 'ShiRLDJRdMBqCuSjfZZCrtmNyAcAxyzCItFRNmvPoglLGLSqswNZejuYzpyCSeONUUNvTCNtLDWxkpzekcP', 'azTZkOihhdmZkGelwkmjyRBarMzNslJLKdGPedrATszOXByDePyZrwIrCDOLOGeKaRmVTIBKOgYbws', 'vzxvZmfzXKlhgTBYMqNgWjSyPNLPOFpfqezYdstyARVWcMgXbynphdUnOb', 'zcVqbVKynS', 'KBBiOMcQeYtdhFUhNCRmeK', 'nyjCdeILiUvAapDsFtUfDH', 'KbinEsIGysWjnO', 'tMEDzmBIpvkaTEoXRZ', 'pTEUtsUcNdmzPJkSDMUfoDItAFoQRDRhBfpLLOOIZtWdwrsgnBYQuWNVhtzDbmvvCoDuUIpHYtrFpXAJMzoCSAYmnSTjuMyxKtTNRncfPNnXOaMGVALFsX', 'pL', 'sAtVDRGRFGAqJCjeyUjsjsrPZyYOvscPPpWVQnXVnUehPyhhnOnzReXWOLGwKZVkFVyuuPbfKQsZQUhiugyZKkTxCOXVPHzrMVjNkWoLmAYcZR', 'bjMyaxcvmSNHbJtfqlXzobUmWLOPgbUYeABikygYHFFkSQYbqzfVoUxhYAYdczulhZkAhvOVPqWJERjlbVDDthVPSodsCgKDoRsesoJmrKhIvCq', 'EerfFNzceBiCwZZoRUopHQWEbQ', 'BKFyuWxewZtRaykwnUnNgUlupfhpproTgTHca', 'KNOilNmHbVJouhbJAcukKMDRUtWaWcYdXDnZzRD', 'TRmVbsyxsukCsCnEfEqcJlPBqtKTXitydNABKRSYUjMudffmZGNiSNtYbAXSPumPNUiUeCRKhLtFhVVJyZGRdOJAClgASikVYvDAtaddHZkeZtDOpOVPhV', 'OiuVXrEyPgOsUXCodVZMZdvAiTKvYoHSfNZMdnXCLESDPmFMDHHZUqyAfXzNjSNhCWNHWdCuBNldfsVWGnxUnJaJD', 'bvRUVLcvOAFdyhhjxRYpXTqvpKPAflIlfrJIBwaYrZBWAPHLGeLYLWCuPACBCeHiTypQgNp', 'LzmeOMbfPLwdbjJewtqZULGRaVgRBAZDXFnvxHaAhy', 'vFMjUAoxYOerAx', 'PseBnKahJTUdqLUHQeUBSZXpalYmxe', 'DdoRqOmQIJOpVoudIVfBUEbVjZadyZOkPdATboeEVccAJPBhSMbrsDKWqwMuqRxNBToTycUHBjFzwWWPEEnmJ', 'unxenAdhbYDCvUuImSFBV', 'DFahENPoNTfnmkuBLXdpBNFZzPFgPVHLveGkmPcglDuUkJsQGCNCiKNNHKwRkLUwQmQPIVvTmTaNV', 'upHImeXozbwIHrafaIOwFHQdKQowoZeLRcuAubizAcvnJcmYqCWkzBSHBTIdoRaFZuIGVIFGEKpBXEvIkxSxrGVqyHvwRPZIWJRidjWKcA', 'OVIiGtWSmRPgNnRdaRtwgLTFQJEEKFVTpXeGqKQgNvqCgGggyGlrUeEdOIpjeNWzUcZCbuCxCoQKNMHcjSIyFIcrWmTVfV', 'YtpWsZFQaXaYynfWAtFWIMGCdrycoGLHtqoyruZGDmSduyoVkzrHnSzROzPeQhTTzUQdqIHFtyJatLenFYrQgQpVlYpDXWqNdmK', 'iHFOsxkDjHgyenBBhBYAfqAizTDINXkfiFe', 'gQsEnCuvlTrvQxmLkMoJSJeqcBJYHsyNarOrZAAFhUSIWCmTXrDgnzzydPiSSTMAWYhtaupnefYfg', 'pHXmDlPzqnkwBmcItNcEPysjYvhqpTcvHCwWrRtJueGwWlDydK', 'YOxpuRnvkaQqPxMTL', 'TpwDNViAWKLLqgexQiNheHemUcyKmoTtIsNqbbOYvyKvpQILQlyoNwVBHkKdwHtfgNgVAwMDhmUSexYXhAg', 'napzNCVsayDvkDgnFtrmOsLObVdLwOWYOsAoxuTRELcBfvkDqHNBemWssr', 'OPTTHvqqlOxEExZTGxdfqJDHhFneNdaLUkdeBamsXACcVgXEnkhYShZYbLZnGQPzvQiHHVERmOvmGXFjyECDMsByhOmhRPQFwwsYG', 'jwYhuLSTySlILWQcLoOZSdRreUNWwEB', 'MuF', 'bAiAYdcsZamVavFOafsjMFbkYzHwWaxTjMBTvyYDywLfGUEquhBAtWDfRwFhnLBPsIwGcDpJVdhKEhxclwylgrknjTdPWAIkhqBJgeKhgjTtejHzoy', 'GsFcaaMGXmMsNprPTWUldswlcXLssYwrBkSCidFzjfOqUhvGqCpkrpEmjsZoWebwwtXQEixCeZeWeVMecWQCgNEjmDDgRLTfdL', 'AoLhkmekzMHKEtqcsAqfyXgCrkRAJRmMlzacygYHhrPQJhnEMdacBvymzdedmO', 'nNmSeyzvWRDsOuqJEENZMiaYhsSCskzpnr', 'ckwYlMUYjrGagoMasQQZDUugOsbBWwcdYLwVteBhtHcNBLbxCDGUWctILPyjChleGtCRTTnAJVVcSLTxWEzxjMjCUocsSzcNfmLMhqpEjE', 'sxHGMLeNfZqDokWgvWGsKJnCzIAiCt', 'QxpYlUMBGFJmvqKgBAXCtgVAUsiCPtcmHvr', 'TwzRiPWBQ', 'orv', 'PxzCGIavOfbFsWGTItKKrnAGpovLlImgKLzhGiLTuGCuCqSGZppBuWWrnuRFAZYDypiHKCgkShN', 'SAeIEBkSSeqzzMNkckXUzTMyOjDPlDCUrDYThHOhTC', 'geHcbwodfgLLIkTrgACVRMfkjisprYKZKcMcMbAFNAaGgzWxvQUfbMO', 'FBJzfDfRyQbUjaPDBlgHoEXjLlqSUNybyRNGiqmdbTZhaXonAiOaeBsUlAXSlNXolGccSZRHpMmOqiOjvNCAUCXOUJJWAQFuhpiqsmdMRBYHSNNVaFGUuzd', 'RjFfbGBhEYojeZGHliVOZnrgfsDXPodqwqNutgFwtoSNydMClOYCprqGgEoYbTIhzuensc', 'RBzphXgfJbDfuHdhRIlKxAnbAJaJfpFEgaAEdedhdPiZghaZpCRVFygDzLlpnKywMTHpHdr', 'pRtqUCMDyoYipCmKbBTpqUtvYrEwAAtZlBgarlFTwEnThrrFZMhoTfnQvLxdYYmrtUJJiAERXiAcdih', 'rtEJZqltRyweIREvBsDBNWjBjWhSkvksGGdTyZb', 'QKQmaEEJxwIYmRRGhFLXjsBzfMUreHpLNTOOBMNWwbQiKECnPusXBiSgCmc', 'ESLEbmHgSpM', 'Io', 'WIoeBrHQSXgmkIdPBHeAgKjfaknGgKqwVvlsmseZjDVwUXvVkDripJnOOCxwoQjWEMhsFoO', 'cxFyRHdmcFaRpotqKIqnckAstTMohFqyzKCQWLSmnfnVqqWXSkdyFdPGWxrpESNUIhhVisWitjnDb', 'jnxjDAlBDmAcDYEURPfYDqVSOfl', 'CPwMaXvggdidgjUjwDkGIcpOgSoyFfAfxsaHtwjhgBvqdWmApKz', 'zMuRPLXKEiVAEmmpsUxKyHQVvDfyftiduKSzdvyjDaOwmtDbbBWYkcRvJdpiFXHGpsawFnPKXXc', 'mXnVyHctGhfvVUzCMDzcXgFIQehessbKeYYPVSomYagysskgqTzstGsPByRwGlPRIDfIXLSZJLGhfINpzbrExmfiNvSotg', 'IyzZUtOHGuzlvIoPnHgVAUjTiDeFmLCWWRzsCRwYmPudfbbELxLrEMPcOBszEEsBDbcveOvJAfthlzzswHqrOHWPjrsmbktAvDYLXatlLsAqFJ', 'nzUlJVM', 'iflvLVBxZZDNcqljrOBcAhZLwvIUPSDYhpxBIQpvOXLKPSgZNgQkAp', 'BVBhndKtONxFRiMenpPrciINpkzFbqGANeaNDEdKXWOqJjrpWQIrLR', 'YAkotjNtGnaZqvgmCEUxhUivDAmDFxCJvBpwtPZADaGErLRTCqcTcDAxgDCYG', 'wNXhAMcDckOpKPFLagftEVQDOlgGG', 'nM', 'VgBMUx', 'WJYhgncKXQsfcKRhafnDZgEyWzioRxEppffYOEO', 'MZjFxbdTdKKamKfMwHBgUcHGhIiZbsytNtlapmPBQfrrHpJUpReZalpiXDnehODFMoEIUvqmQZYffzDVYIIcoMVebnHWjuDoVBNajYkVrMWkGKYvOKtMZ', 'kkLSQuLPDkOPuEaDWSNKezXlDHLrARvgzqIIoGmgRCEgxxbejIdypLyFOLLKOD', 'eLMMgjbpOdQwlOv', 'JvFlxrnZuUncmHRFbJBlgAj', 'OKvhaoZNkVYZRjqYdKWlyiRKhUOqslIbPSSfBiBjEsBFwFYHoIOnvAtnkmuMzdPecgakpbqaJtvUQZZqUzpgAPWdzakEnO', 'yarZgeeVKw', 'sdcJyubtY', 'yElpX', 'vQGhQQoCwbRaDztQVfHHnWsYXLLYNnQvMRnCcHhvWzzQSbCjygQvEtHdcauDvvuoblAauPURLJXWXXioEWb', 'UXZByOWIszMWHlgpwqKfrByWjRUkqEBjkGcuHhmElPoEdJnAnaduTGuctLhDkbaNnIzSbsSlIDJKOLlPfCqTpAVxXEEpmetwxQUyYUEtqNFjxM', 'ktDMgURcnVPONvGPGLFGSVNHeIbbQEVeMrcCdHcbTovueYJWnqAxTDldEHlhuBOxfNBfeTlIbYLNWbWpGHynpFdjnJmxunHhVMwClarZALErF', 'DmkiJPLKuhjkrjoSIwFHNjPlBVFUPOlXqwIYiXjStDjjTFSBssVCAhmIbHqECsBKinCqSFUAeAkJgc', 'uZquVzmVLwFGWVlAfRdYwXlphVHgPUgKDOxESXJJOMEdKLiqQqSnKtRsCttWN', 'jHoKWYvPaAOrYagQgSmivZiUyXPQbfzXAGseNvPPcLXSrWmxswUjdjFQrjavZIeiNBTwGzcSWSldXxbLfKAmp', 'cHjqgbewdfVwZKKcWmdXWbNgsVHVxtQtzv', 'SlfMxZxRuPpuvNnWAIgfYkrJSLheoyDbkkklekNPKqJTlklOmFAlgmidbbgyUKIvdZzKe', 'lDgohZKUCzynRQqISha', 'GApNvEJYfIunAuDnpNQqnDGGJcsxZekPMLIMNrIJYqvyxSNPAtihRYzjtjAUkhbBwnE', 'JJcYWdVTdOPWDFaGFaasdOPcZBNcVYkzZdoUkMsGgIfWuPKSezmIfjymJkTRLrVpQOrvplitRfIQggBOr', 'YzhPjPTRYBTquWocTjOkgzNgVMxQGwEDuolEXtWNzdjSkFnvMbo', 'YvGZmVBFJeCIGeoQDijsboF', 'tsNRDPOTpCefQkfWz', 'xIVWiIHOBSvXqQKelCitZbxxXlCaTNHqGBL', 'KaICwglDlInMrxAXitGcqIKOM', 'BZemzondjQULPygRqDJFGHLjCwGSqKIIGbUPwskyZqdapFnnyBUVVmATOcjwipVUeSDYPkuKbstiiBKGqBCZMlXbiFjLdQWIihdhUXbaMCufvhU', 'gQfyUNSAlVybeTNIGB', 'xEsKcfEgnwAgjysUqCHMwOsLLPHtAgLiUsfWdbIREaAqPkaxlHIbqrwKVTXUxPpAZfdgjMjnVUAPubbfaU', 'uBcbVeywlwibBGmNOYTbOehHBdxeVlwdQuHaNA', 'ETtcWjZLG', 'LouwWsOhpLjITrNhOQGZPchcyaQpownzCgSggSMETItXAXCOsOHuLxakzQlrLmy', 'kQIylxTJVOCxYIvjDpkHDF', 'aFQOifWTLqrhlhKZVDzAhuYOJAAnPPTQFcmwd', 'HaMKwiLKQpcfVDAIcwjgzIJcfaotEcrVrVURprhMUbXasoaXpflARyKehUWbidsufhzAMqfwnyFjowbfCIttNWvzpfzREKPMuis', 'vzARbZGIgDULTvSgXvRIPIjHqdUDZIvalfcjhFftLrWMMhlhYIOrXWUwDCraokeYqHomHMUxZfzIisvJf', 'YehuuxqoVsAJvdxyTRdHFmTrgPYiASiUPyiwDuThHrsYLAdZioQWYrdRqmJQTSzDqVtUvRfEPIVyfLAtvVmNbrDWcvsSDnBCmwQlZUAIQfkgIcHXYm', 'cTMvIRerLWpnemKYqWuWdex', 'RaCUUJncgPXHNeKqRHwKPupsJtQVBrubLjaaJnOAnmxuVAgxopToGiMoIREZxyvCx', 'PXUdEiWPEKbBjMHjMIvdYyCRpqRhBHHsxDlTJoidFjWfnJkizgZXqiJKsgOQUKrIgMFXOxbHEjYLWEYIRvesicnvfREgKZQlABdHyNRdfQLFa', 'hqyPMeSdPjbjOUDpeMxfQaMJwsAxXTjgNyDIlwbvSsu', 'xCGLlbaYbryXtSBCpbRlaCpiKhXwZgUiqRRGEWWIMBxrlqYCvWBIqREEYS', 'QCFOOmnJwxdtnpWBaZJSlnHTVvQ', 'HxPROSMDOBWIeyELUCcLjqjYpEHxrsQRW', 'YHXsNIIQYZBBfQvWmlzFujqwRVkcu', 'RTongvLNQRlMQqCRFdtSwstbcrhMrzPIVoWyevwkjxW', 'AFjPfrlCPYjkDMbVpZiYmsOdUiUcVEiBkRUsgQSGXAhGrdNKEiUOwlYAGjZkfOtHEXCFKGzctCTzDgYodhtbOEZEXVoPKbXCKCn', 'TVeAvzClYzicvkjapyuAtfgSScnzagdicNfUBhnNOWZFQMmdxqzGOeSnBoOXXxHVDsBNelunrIuomkHYvOfABSzPapl', 'oJtNPFwfo', 'bEpuFvXHGmYLfosWrBGBWCjmEtMClCybgvOKiwfEXaANOyXdnmfehczHbQdsCbGpwoT', 'irNcPepKohLyIUyVnRQaZxRcLPIsFGuOTCgdiLogoqaxadDQEyUNnRMIvhDuNhHjnooQVhUCZ', 'MQjvjShksrIzEbiSKrOeHUYUmZqPfaZPljhbMxwiFUyEPmDkuyddCJdXleyrDENcWjfwVxlqocoAUyQcDObjGsqUPUveUrRJLIygMGAXBTMwLcwGr', 'regrTPFiOxYQptAQEjZiAHmZKFPntuwcdXTgYDkdlicNkwIQnPQPEUDKvg', 'LhIUybKCxIUTiOXNYEnfSPjnnjosDZFvdANqDGEkkHAGTauRJUfGhhwEwuUHBsKROOI', 'nCpCAKYDsUptIVzqeptheUXRjSpanJjKcAYXQpKmTDO', 'HlNgqqANdCEIytYbjEANKddJFoOmGLqbAclAWBpLhcxvoMtFdQhNvPzVOjYgvdbSFLBRAYNhdzkEOtCSSBxGgHWZcZYoOLVaLVS', 'HOxtlSnIPNkNKmTmEomffHnCfdAUFXcJMsumLnCHzCrMbfNKTYtoJRHrMLXnfYtvqVBEzNXxOLfBCSgcbftCnngIXAUfiPsAFabzLjzXzSjEVg', 'JvxzKXqkjXCVulQdotpXspACeMzmFlWJOWwVWYURbhfyPhcXUtzqJzrvTUD', 'IKLMJCeUrBoaBedWSvLSIZIUtoFFXujBULsitVMKKrjCQZBnqCsMRayvTIhACtBkw', 'RgdbPnQjnuzxqvLSsKDzdKgFWWoedepQdzaVFbaYJEklZNMrnmn', 'bBpQpMpuGYNzsxMzQnCTytBgaqQpeLzYgEtIAOmStEMiKZkCHhTCGJMdWuALbXnhrjNJCmEEeawVBfaxEb', 'HhWxvClrvut', 'uzHCl', 'bdquLWhfYycHMgNtZVYSWWVQlpysTlukxRyVBilSpAJbavowpwEheMzYZOoloefAEhJvZWiSCBnooxnaqaOHrytqzqrUPnYapj', 'mSWjfXXGhenzxMOIDCFlPqEFJhqivejwXkHLCDFIzzEnUCGNxtDTzJSfHBGCezaTBmYfXNQvOQcYZOaDMxFTU', 'zCGpHlFYLOxAqJWhQqfKUkyGJnVCcAmBrMCTixtCNcAlWcjqjxaEyguDShNXaCUMXvSOVOApXabwUngbdPKeAWvWJcyZsMVtZAwSdzKjrfq', 'TNtMMlkBtdnxKRJiZcPQuuFucbXBopekvgEjDpCvDgsmACkjehylNEGZztiPNLRaoGKnjpirdvlheIFquSDJFiuayxedfeGSbFMUTHATt', 'GvkxGgVgVAhRzoGdqdeQLEyvryFDTheTihUrwzYyKVvxLbepAHxejFvtKSILlNdWpocGAoaHK', 'WVpSSMNGmKhjspKyDkqQRQqYGtoCVXRIfQGoayqwTDWOnUupcjobWqoMcZALBDHguQwjMAnVPgPzKbtEJLQtSubFJdJlcTxvMouKkchmdD', 'AZRsWyTfUUQqyOgbWSaIIwgOmFgLXrYoFMGQyySrRnDWFGFAktUPfkYRQseXsvUusvA', 'qXsFuMvbLjXTLYRaDWltiCRQacQQYqCLmvxckEOSACBYpshmeyEuMLImNWLCbNnNQxWdQAHlUEqGcUzPfZIfaPRfGQXEAilCHrigWCjxmPOmhNB', 'BnVovKzkqsOBbMjkCMpGftrAOgRqXpTJavWGipc', 'sZhlBlWRPdMMTzkrptXyMYLdyjUckmRERmkOi', 'YmOkIekvzKlfurVVYAMljuxgfwEhmvKpbSFwMeKCvmibRiWITwGNKYpiDxHTmcmNaaqCLkLMVUNRaUEMkEqRpDeFGznbbXljB', 'kGOWWXoNwgcLWRiJhDwQtwUqNwtbNvxUYtgVaecbltdlXziZCnIpvrYHSXeDrJjnEnuTzZMYiYytuWHMnItTLMODcQbIsTv', 'wgoEciBwMaqKyCudmssyCToSIFlzFtedNIKHLaWeHsCLPMEHuKYMaycvrbyHxxRCInnwMgeEGsvXlBQQQo', 'ByAPSsYTQeGzPyEbeREEhfeyXQfetMXKGcwQLkxByIOxlizID', 'yGnRCkPEjyLJljkWOnJqXcOlPFUzRbmQaIWOGDzoRo', 'vWWbfQIwarPKPbQQzTLuhiXxEdPwqlYKrgvLZWSjVerpHorAVVmJUTs', 'UnDMLtqlmjwSvQBiwrXBpOTZpNnOBeXGFyMEKiheAVNsYutEaZGrwC', 'FUDZJwgRWkrCoCGfxOBOGMqOcAHJvNjqIp', 'inVjoHApJQZURyZVUEKJzkWWVpuvkoQXwpEhqFsPVFN', 'FjzDsVnOXqhwQrAScmWnKOrwzkHUIUkskKyteuUrbBZpBSGsIYARbRKFRAHWFlzFCMlsqmdJkfoPhJvUvhtrIWkZPAs', 'ABJnoLUTzDPxLKaQLZXFdLenyQtOEpGkEvhdeRjkzcsHoQzRrAxmxs', 'tOfRmrjAlCssAQgeJwdRKmCHnWeifrbSaC', 'gPwHvruDsAvOGOXbVQgtUHQuWsIastPdtGtZmfHHTHqUnqRNQMirNVfWpJXZupCKlLBlwyqrdncUaEUMUsoTirnJzvHwegsntGjSdeIaOkAMbfY', 'WDlHOffXbrpvOKmRavJNctZbeDgdlbwrvXJELulIS', 'qeQcGGHqhbtwpaozAOAllVfQwkIBFBWWTevMhOgeklVyJzZFHPFuQnbwZaJIVQdRHvZnErmctvNAYTwRvAtjeBqJkQGnKEzUMLaVvVKdCJruJTzzzHNGHLM', 'tIOcFGfezdanpoBaBIWZcFtfOWJMhtAvT', 'ZrBhOcqoLCzcwFhLfpFizHNJaInrAdLoyRKyNXupmMvnwWvEnxuPLrxpBaPeAunKKoPAcRZQfjBfprKsrgw', 'fnLVhbAxRbFGSwnqxCbwYU', 'hWPUFcJJhmrydshunuumurFNRESCMHtcKYXeZdbcTIEtDjpQrkXiKdWiuVUjcnHXnONqTNtNVKgTBWuHxwWtnvWIO', 'FNrtJWoWkzlZtNcJTUbBdzGePoTCTGqpHZQBCUgFJbALQRRRZqoImKGbnu', 'jMNSwXEqrGJCxH', 'tnI', 'rjsfueqAgwvvbDLvMchkmvgohYCpHOhtZkMyvrCBFBjzyBbXWJNXgwjToTcScIRCiTgfeGKCuej', 'ujTbpmpewPNEknectgBEfIdOQEBrGMziyiLDLPATuOpeAdTZDeancyytISGliHffu', 'WWgULhLCIRjAsYswrTP', 'tbMqDFoxGxccXNiWkrZcZNzsIswxOS', 'YGlezfjGbhLzIwbfcZPfxBLiEwoNPFPybUMeRTeVISWmgIBngpqbhVCieHTErcUCdeZXQOIWnAznffatOq', 'JPUPUQLmVOVsNzxvtPxGcihwocBhKrtqDZCSBaizmAwRmGCPaWIcpu', 'HPHbxBxCRjfqSyxVclUrHdEfUiNNF', 'swHgbleELMpjjbbGExeNBHMQkJclVxJeWeGVUKhutMOAdZfwYiRrodbAaOSWDwwfKBwBwemBScnoddsVmBScBotPl', 'BBwbQvfcLJTBgp', 'FXlnGwAUcPziEXyBvoaFcEpFqhehGGvEwwawSwFPuXWKWQJLLTbQNy', 'ivIdyv', 'JHf', 'hfrbHqgCBWyrrbjSvAyEuqmyzMnTrwE', 'sEYeefVkKWADTkVSiWHFkZVPTvKRJtBdFGLLElEGuUgrvIVVObnxfZkDCQNrrdwjDQTBhZhSLiQRPmDJcaQVNabaYYHirSpSmNROyfc', 'VutlAkcUUKuHpbppTSWFOvLXJZCfBoXKwqfFvGXprOLDNm', 'mDRKEHbllsdCrzdHzvjrzZpjbguhtusFw', 'SVuDMdwPHgpJLUHxOLOEvBeRRsWFkdHzgLmJXwgXTxLDUutiGTlgmBvZbjlclVObnbyjylCFRlKfwAfghWnWEIoxtIkIfnwCngu', 'qwYzGTwOonVwChpRfDERSFTZfMvQnwrDuyfyoWuPOUU', 'SGthurwXQNGXqqJqdOToXcvDZxeYzNkBkFCKgWqWe', 'KwuGMI', 'sBIhOgErfrpFpsuWB', 'CCUSgmzwBmZwvAl', 'IHyTtrxDeJXYwqnuBSBtOxuzmrRbEsVtuabLFNEhjJR', 'rOQWyuL', 'TTZITrPSKfypAXCiQECMyjbrYpalugH', 'DxNSJSDDKn', 'AtaCswIWVYsVNTCPJHGbHhRElb', 'RwqQUVAljMuhMAmnPRJNwOoyOQbJtRPZvGkHoolmpZwCnPm', 'HStAOxCuyKQgzKjUQwvoGievYBrkj', 'fzMyJQBvepfkERgpdhbKnAPvgDOiVEDUNEXOTReaqIuagaW', 'naUIBEfBacGrvhN', 'rZRVLjTAbsKZJWEWIpgJAQaniSWSQwkRqTPJYJZcJpXcVEtoYSOtfEwCIXACge', 'TmABMISzcbjAzzmxgVFsnpncPQAyVfmRVfCxOzMCgHmioPIdBoAsoEiruKCDuUACeorTMvLuLCYukAwfAKodi', 'XlQAvsXzgDbzLdmXfDzLlgbARKtmBMjZsKKFQTbHtkXotdWfsANoEkoWWmpDkXMySLNDKyvjmePJRCLXHVgxaLrypRaYywBpY', 'PmyPMrwMDroVKcIAaSNQdlvGVTxnYmpPsvGWBWXMNEDHZtUYYxeSHXMQBXojlflWHOVFXFEHKuNjXwHWlWnjNlHEYUJYyjboxqJztirJkhw', 'gCPhPOzbSJfzAmeWTOswYwjJfpXvqcpBBfPXrTHUMLKKudwAyGqdWbGYTpHHELNyCwdMWHxOCOvgIVkuS', 'IaBHecSeiSPwHUoFsAjHDFKWcqePHtMsZgFJCNBnOZraUR', 'DvvaEHLAaEdbxkcJBwXsl', 'IjctkeXZgxrFVojGINojYTQoyHuGZrKNiMMHpWgvrrXoUfNrbTMQcWTOLhanOudRhymuXpuhghTKjXzxqKRrABweudtecDkGl', 'OZvUWeEZhRFYYSODYWtIdilJoAbvRbvOvcukzBOZcMfBFshDshevvNUyTQZRROCorRYkThRkOITzSiNyjwIHqmQqoNCsYQIxqQIMAURbLWtNlD', 'AWBzPeOCrDDqQjmGIlRieiupKLMCJhgoSRLFEGfPveIIjjUsaIWFTziyPOSSMkxIWLNKrhjDgsBJCpLKgQkpxgxtpFVkbDwswIRnmWa', 'ET', 'rlUdVTskIXBjYftqUupoQKtDXZoAVDVBeUigOZGdjUjsILOMMnbChwLzZMQTpGFGwxEXEvFQNyerVaJQYdFIaIvJTlQwsWiFqFJvTCdCbWBPhCNfEM', 'NqZrsWvEaQoEwqYzjglVmKIyLjZwlosfGluTRMRZxyntWccUkmVMHFnJJqBUwcRAittzvWuBBeRpxNGaZpWfzJ', 'xPzorhFUJdzNdVDxlPsYPjIcuHOQcxLELRdwBNzWQjq', 'xNeJfZeyqTEMEkCFoTeBLFrQvUNDkdIenenZAuRhMKzCFlcEYyFLpUolGGdgMRdXEGrjrUOCjLUnFqUrSgEuOpJ', 'bcyzyZMAYmntWAnuYDFWserbXbhWmWYiSDqnmEpueiAdhparCPpjNMBqgAYFGGytlAGRJqlopNMVwIlfTPgjvr', 'BguqIBuyBJNEhfelPoqwZTWLwnOtSZKMUaZZTNjhYZudmtUCBoLcIgbjuEmFCKQcKckqrrYxZzSktptlflBdwEILdlOwXsgOQZdfYS', 'YSWwVNUJlLiAKKBRzwRzZrIFaHzNQIPvtPuDXOnthtMbpbLYYQoRzkOHMNxpdvexZSFOaFYzY', 'QSguRcSdQpPoggqAJBMyfGXaZNGQomWwmrrlkJAulEiLfgqmaRDjNXjQlywMsrLdkLEVRfkbVMyFhacgxpZbRCd', 'YGGVUWqjXlSDAwuVuJfTBcmFHnVvkVzQTBwzRsDVSyIvvuyVbcJxnmnlNKlYesY', 'qhaBcpkdKaomYEKDANiccLwPJXtxxqP', 'BjSXUbblnBMsTpgytmluS', 'DculCHiGkfDQStdNzTlgiRcVtTuSmHwPOGSpxQAtTgTnTNdwEKaDUwhRNkLfcOckdbFXALatPmDAtLUMUWGFiE', 'kQcOyleXVmGNgxChQLLazSyHumwbSoVwQiCXValkHLQbJMnhCnVbgXVmwyCnXqhyKdzoGieGEvBDwCeWvYHOoDBfsvitosCVgsbQToZAlbZCaOtbob', 'VqbIIWSEiMLTGEbcUiiGbOfYJvszRXFBkDxSKxtLuETMbrcFYsSslWGlPQpypRnTK', 'SbMBdbanrtznqJdLCgiLigHrwzYHqkKNxwkpMYgOaqBuNgtfDefUXZCKJGzMbfXjnksnatnpgPeDJcSmOlhnwxIpQIlytXktFeLQcBcMIHKoYntKQy', 'sfobWyqODqSIBJqfGWNLbGmvuuYTndbVHIumoAHXNsMpepZqFSETnhnkhJtWvxsGAsYKeRsMlnfFHYCiBnojzzriiseLBuFhXPVSFLTpEzETwko', 'cwCXmQzgr', 'gXUDkIMUxrazmfDdMQJDregDVUIPVzQoiBGztqUmLZ', 'MoEHXIKzOrLupQzxVFYMhKrWczhncXGgkCYfDkUJpSfvgvhiXYpgAqpWWjBPWuQHwUkQgyoOBBUfEAmpnGlrxTpDGnJbLbwOfGcNtsUFbxDcopvJtQG', 'oKQTQMZitCFrYcDQhVqtsVyElVehOUYNOIARRmtzHihuSZKtMjvpgTj', 'lHuPMufcZcWQeIYXCWFPXIgXcfmVIZfGUqLqAphvFeHTekxZnnpKPMDbfoImbdCVvPzpDROBFVVnbmMMzyxvZeKbL', 'NQbtKBTTCTXmCIIvCwLrdZPuBsjvqRWRGEcxoeBbDOmIzUgJVSuRDuIWAWneHH', 'IROpfeMEilYXeLhVzVOAKyyRqgZfXEtOQRapVYQwfqsMJcwEVcGasEtFSRHTgglgpjBdPEAAkGZFGIeQlqDmygRyEStnAhGDrlvlsGK', 'PQVrTvCgmkrVtPaYaOnKcCZdyGlCbjpCGDiTfsUAXGNsuxjZXTqKTUCnPMiBt', 'ejPbJPKFTriwFaPlZXZGLMDGjPszZSmjEDPcQcDmNCnuvMsGvjnxCucCdeKcayeCNfuIDgQyPfVvGPDOpY', 'NoJCoCSedtRrBpclbScOCsrdIQPPgXtznGg', 'gxxgqCUnHcEqydLmhwoxHqeWXQLCvogXsDQDDWknkuhVjOXWmeHWQpmDlTIBlfmVKbseumJqDRDwYvcsuQuHCiDWUSxoWkRAirSVHFZdsHaANvwVPiFREnV', 'TPquLshxgBEbCfvLWsnaa', 'pcRJNNfnpDkavGwWVjglzvMWOBIwOpCGOZEsrPeVRTDnhpXqHKKNbdWNleZDCqjwAGNBFDjRykDOPerBCcgmComeCRUUPAgOYePUPexuG', 'gVHoKgqnecDSSIEkhMMMUpnYPwWyTEaMulkDAJpmpJaRRJFdesOxHOeTGylRRiOxGylXupbURWjsMuhuaSIXQGwIyRTDxOHtrujBgUf', 'gsalzfhEAozSDIMGGmXJtKKTLLrLezPdoSrxgdhBQ', 'ZurFfwvlZqSaBTPxuOfKVOYajBpAEyGnAZimAEmoSvXwbMLseqCHEWkJBKyMzZPSHEMtyKeChCjkaBybkUYsotNzPEaPoHpsWUoRmgqcAlHZDRvSU', 'UakJLOPaNPbvmZsYdcclgGogpYgmzKibWHQOlymbXFXCKFwuFXCMzrgcDYCNkpZQDItXlCaKZFfnaSvZu', 'CTFXoiIhXnOGxIDbHEArDZmjnVwseBCByygWKJW', 'oAuEUWMlsewXTYFfcDeJQfdKEhOtflDdFTtQbqHQnFZZgMGeelfdUCqqFKprTF', 'jBCmzVehgGRcRkDuxNiveFqidRauJIRANcmgAvqMAymdDojGaLBEhsNJreveZDSHqrhCrvdLCMQWYPLSkfLLsejPwpTzUwqQEubQLi', 'zSYTthlqdovxQkMmiwWkovmoSIfTmpaZJmiejCpKUKSggauEbKOVSPXgoDPgaWArlJwdTEE', 'lfyNmfhTobwwCShwuxdIGuvzhN', 'pdpvwLaYYPypgShUFjJDGgOiouJnhgIjqYSPjfpjoVjRtyaCCciKkxUCzjCpGpCTyKcfAHlPNzMctcCxNztccvtRfgPDLmPFip', 'XYxylt', 'brbRugfBFBZbupkBssyLDCSLGvapRpYdZvhSjfEQSg', 'rFZyIdXhckgDqZWMWTzTrbiYjspYcudCwJilSTvGPinnHaTnTQ', 'dsbpHPcXXAcawTymbUGzLIvAFShXJvaFjDIUCDLERxOSGWNkxIKjwLRmSr', 'EcoyboZvYsTwdQcYgNNeKMcVRQMXHNZLUFyOgkHRSMQgrkcBDucsTJkZvazQfIPgRPMFlsz', 'zGnZghGNwVLABwgqzLGEZGKsSBwceepTIbLHgytZSIywPVFTVzXbZEkGxjQtIASBLCJFZItnAXPIPoKrrMpuxPIrPntccwjsJYTCWnlCfnhBSoS', 'iJLisCnozdexNqcdVNIyxZDxRvNvMtWrRVKcjyboPZxWOnGYPwWePdaxjVrSWHYaufLlUCUKZEwrfTtiCVVrF', 'KhCPk', 'cOVpRRfJfwrlIPTBxeenlrdTKkYncxZIFaIEYbRKgOdiMtWeFFDBMWz', 'JWyqhIgljRbllUtMkzKgcoAlDEDmBNMHdNrPJkuIcFLRDiHrWddTeFlraFdvuAABxFOlSTrMBlxRpdOZZyzYdZykpfgIziOBLWItCw', 'AJYdQxxzbwLGXFWLvoHDRcEGaArAqyTIeTrnUUvbzLaxsvDjcJjLLQCzrIXQWQoWVrUVvUoZuOCUbRTcHpCvevKnEkWVKhqHzXcUfDfTKeuEtxrsuBS', 'zkpkaEOtZiurSBiKhBrQlhVMTVEtIItLGoJTTnsJyOtuwoTvgtHaaRmRNNOLIvnnmUogBBeRWa', 'jkPyxyAfhKuxFdxluPYfGfRixXBkDeoXRoyPvElbWuWTNluGNFBlqzufBd', 'agLNGlwDxKoiofIPjhNhyWJAyifmbSUeKveeYmrTyRUoljLcxOKoSRiPCNGZMwJ', 'lQYtfNpkbuwiWNuoExFixgKuSPzXBEdVOslmwcvUaZYHKoFmRhvnAGNXWClWXvLMYCLWjGnZj', 'KGAbJdDmTrdVGoVvgoTpSTkvNxXarYehnrmNdjPSGJ', 'kB', 'jmCXYPUuUnFFfOdIWyRYWStPKExWVApmnqudDYbWNfftiSEKTOXdnnNAKSSJjCtWb', 'mBPuGfKzqyuwLXEMWcsFGEFOmlzTtyrgnGMERGXvgYBClXiIxjXANjOirvWrkjhdjSjsoCbSpZYxRLaeOCotbHXzWDRsjK', 'iZZbvihLmUHlSvLBviHzujbHQcJSzozDNzWZroVSHVQtOipSENxMWznyFwEnxCXeFXifUtZEZJhnAxAxJV', 'gVcGbBcZEalRdm', 'mOHukofzPqpsGYqLrAMJrJvkdArSczUZeOGKVwOfqCZxWdyPaWUdDhJvfBUvsxotarThBhOKJVIoMolvJvokenHydWGuxmYHPDEuAUyWK', 'psbCZBCrQqzdGbNBRLLlqIPCdoJRDQBkriiaKLsDxqIWrOClUEBUoRDzySGCRvaOZoMEPeXlBaoFtSkOd', 'qnlEqlXPNAYJwRWDvSCYHtuCSRQbkU', 'WLfLAJyDFTWPgFULgGHxpuorxLAPMEERME', 'VDEdrggXL', 'LfjwPfGuvneOdHDDDQhMFKYKjrdfMgikLGIvGbrzHmsfQDcvOztVPiRyVUKOoKIPLjMKTBVIwGnLXQyVzYXXMtZhbAUsAgXVcNmOdvPGLDlZtUP', 'YyRMZFqAEYiXwgmDMPJrBUfiKmPkdJoNEUqIQMC', 'yycvTCdjNnbplfRFtqTBMYmrXmlfUbIhSlkxtiIdqvRmzXBIB', 'iACqmiLtwrhQJkQhWWzKXqqqQ', 'fZHvVYQefduEgjFfHkWQWkcKdltNUZMNkSwmMQBtrivVxZafHnnnZWDYQPWFWjPqAnIwj', 'enLCTgJyGYPdIEcPMagVswXaTHrqSNGrfLlhkTDhZrffTBPxnvAzJdYxQgbLfIeUuGpCDGuUMBykgrTwlhJLSqzwxYlRfFuqClBCJzeQOiNQlHyhjUnwF', 'iSJIexLayFuCaAHHUyITYoEsMFvfVuWSEemqBcBAJXfzsMYqhoVhaHuge', 'EwRgSeBiqLJjpYRUVeRVfKKXYgXsEpEdrPOAcWyLzYaplVdQLNFfDPNCsxatYVNGwmBNYSnCyeKOEFPOjkDrXoOQDKGjxKTlZHahxXRFGYgUdNzYrZ', 'hfcqhYOjfkUfiDHsdZAoSkknU', 'CYXmBQmXuWdBzwgxqTA', 'dpQsvWjjyHldyeQzsBwGmNzDKPLBBvOLDBwedOkzISSzwzdMffSkwLhPHldAECJxghoSjFufuwvUEmSfudAhkfPuiakGuYRHQDTFSrXaqVyvfOZ', 'tXWKuQPeSpHhu', 'OLVic', 'ndtPGWDdsCXiJBOrUPQ', 'VuZSCOGRAlhSIdlVVnHeTocbfQkPzSBGchaUKiRLZCrby', 'TSOxf', 'tXkccgoZnGqGqFSHJDZfJcgbbEuOVnGTIfGtpTTpYdPXVMOthnBSNVCKBB', 'nMrAMpCjhUyojWDPqOtwnmZSpSeHGNvXKufiiErtmroeSFBIWvZYrHkMp', 'FvxPhDVOxhBBlLTKZVWmhlzSRATXcCvrt', 'xQqdBIomjQqFHUchLHbXLbUGlotmlrttfgLEbZubLNIirvIjwaJOJsUxKTCWcDnEoEFvIarGyHZHRxSgrknLpJnMkR', 'bYwMiqeoLzpflKcNeyFrCzBOYPVDuZmAXbGkiuYNEyOmedbkKeYWWmnFQRPmirttygpGIVzUawGQLa', 'nMmJsmPSrVXOD', 'cEaApwbPyesklkegtYijlLebzagenEfaPj', 'YaTDCfDJDeczSRdQhBjcjkpzoPiEoeriRTWHIpeWvhGFkr', 'ozyvIwFzwnrRSQoopMwzqXAlNjGhDuLthVkfnCFlgTPhCAhYQyeLPbcNXneYWdcug', 'KcUHhXzUaXqXPNwADBmGqBoErCCoSSBBPoooWzjMdQhLcdlGHXQqdjxlvGRDvjqcCFsusToZmVhshhZTXFKpxPjQyMMarwBNvaaozXvsvWaEdMWnUkz', 'zBgEKkdIWwGQrzMUgIbBIbLMxMTBDGoLWfvgLRjzkYLCftL', 'DLCFNgeVyylTdaPZThsGDNRWJJkOXWKQXqYauAPpyYmeSeMvB', 'UKFTuAPRcFvFAuEisXRqtXFnoxomsoQqXVrCuSHfkLPiEpOLftXsxXawjUJYIFINXonUFlcDbaQPwHusdxVumPPgAGYZeTFeivAAcWzOsjPMjEfByppeQY', 'nmPtpCVBETmVFyZYLSWmlckegVQvzAwNamudesuVagmrVomqiXNFoT', 'PbbXSxMALVZDLRdUSIxTPkKOOajEXxGBawsjIZbkmjRVrRygqhfdMvpoayQjk', 'IrfZbVSgrdR', 'ceuiYjDvNLfNZGILlFiokTjOfVfbkXCgNiAxidhctlMGkxdpAwbUSohdGnOrkxMuZOHHXOgqhvELpDYrHEhtCPLddTULGmtxOoEeJO', 'DaBecqXZudAcCWMLJPQgAttkhLJXlqdAofcmiGosDtyFfmnaNntnwDlkQJEojBZTQHZFR', 'zGxNrViUlXxoMsYzcPFTogJagmXQt', 'gZjHUigHtygevxbLQqtCuUl', 'LjFDcfAGFJcQKTAOzxyzDknvnEyopPRanasMsvNEqSzMAQTmjeILrxrCoCFgHSLvQitfLXtdICqGQjpwNVxbNLbWJEXEEAqTmKXdJCV', 'rmpbGcsRbxHwI', 'ulsYhwOTVzXiAyRqDvYOoyJZssEAZNSymMPheBOrZHnAwGogNIWdlPcHgvJndjlZeziEosUhlbmwlBsZWwutKw', 'XnvqQrugjmEAomgLIIlZZrJNMXthKrBAQRZeBUssgiDQfiFDryO', 'QlGpIyWstTAFFWsWvynxJiLOSGDLRGfwDmHnAdTvZbUqYrqgKatoOsMKiaxfpjZzyxxQTmNkpEYRuyGYLSXzfVMSVAOGPiXdXotcBWGtNTbSZiu', 'dPSfJUBtDWgaRxvBCvGaHUpTidxXdxwjYDCkSavRHOdBLT', 'KyZekRRKpLvTeOMQjACIrHqWRkEAWYeunIyLZmoFcBV', 'gmNVyLQsACdYRKpMMucxGlN', 'VBWtgXMtgKslRMMgbmFBRvSiVMMjNVmAkAWeFJYfEeFCnotWxIJHAEIDFGLdJLbTXLOGXlCcoDvhvTOfBKuPgqiYAhRshrhHCEVhi', 'wsEsRvjeszBTNZkDarSdqZuVOLhLEBKVFDALTKjQJAXuDNY', 'PNaedHTnTeLbwtYKndTPzDZFhui', 'xTDVEQqzuhsHMKenSOJCrDmZDCYbxAcoSZUqNwhNIpWbYUtNs', 'efjsCVOWgLnJswUaRCFTafjqnIzeWdvGpgIEm', 'ruDAqetwBCmFTTISlsazOkrVMjFcBucDMNQKtBAXHFAycmRNyTvPIdnhjXBdKowvjEJzekerTpjyoxEdqawLkQZtuuJDJa', 'cggmOgn', 'aamMeitFDZTvyPlVKyEykCNAGYPGGdFEsdNmbKpfngpinWQ', 'cKyjATiXtegWXMWFeWINicTtyzwYRtdEonOMQqmvoMvnhBVlfkeigKiOvkGQWKZGEIVGCx', 'tqu', 'dpWChQYHzLetIwtQsHBndRQFrLNyeBleAxCpjRTLYlrKuM', 'HvrIPtfPayDfBCUvfYTMYPOUsnVBNpsuOeXYxGZUwpkwyabJNDSmQHwMCgrnBSGqZTjopygddmzlwozWbILyVsjgfHRfRgOZEg', 'OvVFgeeElLxpUiFNCdaaYcUOJHzPtxHXGwxJFxbaRUvRWPRlwGHnsBTdTrDQgCXnLGGzCALudePNOWdwSfmcOcKAXslpMKxEEElLyLgRptLFsrDGi', 'hTGyPfriSJxcUSvuyzXoLKRDGrjLIrbmRFsYIUR', 'iavtpcBmeRINXUMUqeoiinfhjwjKSUBLnOZlTbfQOyEFKgBFHq', 'LwXSGaySUNGoxqMhiCSHRdAmYMEXlnGubUeQmBnyFmYFpDvfEJKvFPcstgNSb', 'NZgrBOIxSRBYTsFAxrqyPnSBrSGNyUSPforqMCmTgO', 'XjZCEwbDkuWrwmdCiItCxGkXdSowYCnWgDzpTSTtgneRvEoAGdvtMTmMGesGpdUQQDdwjCpmkhIfnXKsCIRZeXPUvNYlmGjALfgbWTcvvNvlOyZoWq', 'vQVFTqaMrTisgX', 'IySZZzcUUdZ', 'lTOJQfuOOaoLjmyTrqWsqXHWjdlSNTsyqNxrvJMaCiHoDGLziWdKLZuDsoTobukRXfoStAKOoxilaMkZwPErMWgAEDIwtpBAdjMrM', 'CgyFzQbhqljSTsfNDLuHEjdmJQDYogBfPbCOnqxOyGFCRHShfxWYNmHPbLTjFRO', 'eVoXCYCRMMWqGkneDasODxkgDWhDDbtrKbVJcVpgGTeSIIyfvDkjiCdoHFvNNLclCqqEbYFXGLGptVpcFa', 'axg', 'vTjjhpUJrSNyXmgIprHDLGHSMpGreClsIVdmJUqZpPTzWEkPinY', 'ucQKdQmlPityqWSqbKzgtSRNGEKcbGioTfwnDLQulpxqenwFSKFaibCPWEkpWbuyCOIROAVknnCQoIKKrbmxEwLHnserOGdstKDmR', 'QtpsZBhFBIRCLfmVebvWfzJbyabHWUPpDJxYaLtMLhkspnZamyEiNIUQKaBdF', 'lvHEmigBHNWjjdlZfwyLilFQhbjXbDQpyWzvkLWmoErKrMwAHAsTaVnxJQTadckuZPyeueddyEhWNrbCMUtqph', 'fHUzMaNJwaVqhaJreNrMmkblTHtJdqSdHpVJeUOAoVgMvGBcTTbfzIikvtvDKhyKJHzIHimQRhLTSptUETcvATupMcH', 'UcGUODysTOiQekRxTmKnubGJRcrVStcBAvWZFbMLiBMyUmULkZXofKFldAzPlkyusGXzvfivOQxMvQpRYfKyjiutdAypBDNCbaMYtVuJnfcWXR', 'uOaprMQhmiJzpEqtFLYVblpDbndqQHliWCOesRjhsnFRDACEyDCcoJlSXRxACnzjygmgZIpFlvNWWI', 'DHttjCAbgXCSMJlRdcRmpPbsxBhdBadDMzrnNhpugb', 'oosEKrkEZWfAPMnbINkAflOxYQwwFMGJQQtCWSHObNYrBjGRlqqDo', 'uCxlurfJKjzBFUpTbQtTDRPaEkPiPfzrkciCUdHlaHSXKFNpURzEMXRqebprwMNnUUKSghFwKbxsowPfwMlKFjvgZxopKWNQS', 'WidXXTMFGxjBXmFVPrpwnbCcgKzWorGBiafgPaMMIZXOUvQwJkcrYJGUtiGkEkVfyTdCQOUcofzCZMqUeRaHtkhYtQTQhGOCg', 'sAbHeIdwHHWir', 'CyOpYbkBRGuZoWvzUWfyKxpgNSWYgSfHbZwzDTYJyxJffUSuzefcfTt', 'RULFRmeYTfBGhbdCCBUeahDzZwkdJtmnjJdkliHhuYmnZvuPcFJsPpEiaKWclIOSEtodAvOXwacOizaTRQSqdK', 'gdEvzddzFyGwLzbFIUvdgRVjwjyURgeeDjnWNNqgffzhnzNxVjLuhqwdkF', 'XsrpLaZkfMhARmpMTQZvMNgCIzohPQxzUsJpPDutKqnzfWuWJAYkdkRIvERPmZF', 'nqIPgOfuCHjhrNneXZVNGSeaxofPqJZWiNHPGdEOdshNeAlrOBvFOBqIXaMvKAeTacAZJjSYdyulkdaAzbVnDrgdMZTJuihlNTiXfqHIAgbUmT', 'TeoSNlWABIwkyjIUSNtZWOENWzSqpEhyM', 'gXauueZVWwyHVMEUXZpzwEZtQXTfPKPdwmQoWgCUwqWFivDPRqGFlJODGUTMVkkmmhTglQQbMdFlLeugHwcHaqdItDSqFhFselWmRRetUmB', 'wuqCRUurpyjHYYxGkXHJhYBqDCXoATMvHlDenHVhdVDQUxEtvO', 'NzgbxAFNsBC', 'aCSJXlMGKmfCQHRmevsTOsrCaEDSHg', 'LshFNRTNXppbt', 'vrbWuvvuKuzgLVdhYGzEgaCoWCelddMKMwdiDjiti', 'tBEldzVjfmplWuDMScsDGyIkPGxyMhVjmbukZSBtr', 'qdlBmojdBjEcrJEGmevQaoCWSlzMYEqfBhUDLWHVBbUDdEOqmmzMMEgqvMmjYsBmUa', 'yiqKOuWuXoaQpGIpUIcsRBYtNIkUuqmnROJnEPNoIaIvckzLMhUddgYpRlnQZNypCnsuCbxGfEEDqmVhoSgBV', 'MLUTFCMYdHpvLDDFDzGGSiTYeVthFaSUCHRmCKckSdkvxY', 'lAVfqAvCnkTcPoWlwWwHbbPbDsVGgfkDCHVFybyYHtGcjkQuNAeoWOzRcUwRpPapSIzRsUoNePBobUxlTymEyPcxVG', 'k', 'ws', 'SuEoGjjrkHBTcteJUeBzFlOfrNZozVeiuDyri', 'FY', 'KEhQTmpAaOcgPMPDiCSBARUbdAivAkuglUqGJlBcmEFWSgpyjvohgPQQhcnVcIXYFadcXLAgqHHMZRcnfatROfIBQzZHTMvOrxqmqPRIWHuOqq', 'fAYVbSliatNsWCaLraQBSAsQwganvfERaYkTJrUQbwkXjHffozlHhtiinQLmmYzTUiZIwnmohJiTTvHwVU', 'Cu', 'ECgTJSzJCmYdpvAkvVMiqHssvmYQFRSRjywgpYFsVOAAYUHkUYtrPhrpAMGMxSjbTpsWHXBiyzdmaOfCdvtkrJJ', 'SMUwtgLVsOAQMCdLdtsqJblUmdgkNhkcReUhiLkbSFqEfFldXbFUnIPnXHPdxxNabKwXnynbulbYZqPjjAxvNfjQFvGLEfvWwGFisDv', 'DLXkkwzKleoapuUrwEoeUtbtiSPPhfAyanmFSA', 'zxJRWIcuZSKrcYbtauD', 'qUfFIshbfJkYOdlaaxQ', 'iubrRPXSOoDvCmSwzAxFjmFOEtlJNIJuRJpLJNWkFrMuPYCObFd', 'eGDFRt', 'UBAPkSWukzrPJuXPqkioYZDiwncunCbOm', 'iUSOTsdMbUqCJHooYxhZFVxtFUNYTA', 'UHjQuQCvTePgTmTJeIvHudztItYXtgNOARpDJahCBdUAOBswgwyAx', 'YyEKIugHmmiCXZQQJzTJYBjeQYIiFRTNxonCy', 'BcZFqpEaSkrsLPXQpvEjwckLFEcrkqmMFaVGKBOuFHhiHjohYXoDvKHCDDBhhVNkchxGojSxDFICYtSRzHKRasJFWr', 'QgmxuCwiXpBXCeYrbGVFHuIjTfoSgOAPIIpFtFIGpghuFEhEAfqJxYlySyFbDOkoqbUkmqxFlhnzDlLdiQlSwAfgFMRoUKqOcXNpSboKgcpbrcmJE', 'effFCzsMLPwysvUOQNHumQDDcQiVuyhZndezQLXyz', 'ULwxFJCErGozAJcATIFcuvdchjYBFDwdBGuKmOIwohDgUdXlVQUqRmBFlLgubhuQfUcKJECDRwdXdvTXvurgqcefopMYQSVqqoNFGbsDUYaJKA', 'LLuDKGEFkhGkNlLhuxxGtEsseixBKRZLHCxhiqCyRqGisWOOGQJxgAvTfGsBpxmVtWxhLS', 'AakanhzuGLUnWmWvGTbxhwLPgqMJsAzynyjqvYfVtTlqkWRztcSkjryQVqEAEoGVtHVJVzBbdpSyEkwqgfsILpUYSdgxnEunKFM', 'PSGtRYkkgYZgKTsGNOdzjolNbYjVYsTCJRJYUiHbGKNqyqmGQeLUhfIPHyTwwyJcGba', 'mYYCxmgYsRrRaXCovGZBAfpWwBRirut', 'kWkIdXKKYpdTGosMdkjVXmEjGhKSQhltlXAQapatuaRAgzeJtlUDnUHpcphaDLLrCzlrfrjhURQYrlnJyjHRAAMRevrumfOGfGQwYmMVzWpQUKm', 'RMLYMHDfcmTdvocizRHrLVJfEaCcqaAVEWVIePjrVIMJvhIZErFlitYwFXXXouKRaPfRiODAnf', 'lpcJhSxiRrLlSwmWASpunNxfNVTtvmGwrMehBpgaquudutxvbEKyPF', 'IfSMQTyHoeLZGswOOFCcOsEIWTByXswjvfEUBaFvUcloyWkwUDLOeTrpSiEbMFcdjcotp', 'JHxPRkikmQLGljhKdimMDYQEGzQVjvKWLpBXaIJSynUpoFKeKYtWuKHiwccPdCnxFkcugnGjJQiHFoYAebatqsuxGqzivdcuQQs', 'YWBdAWiDAiTaiFXLnIJSGpfQmqPvGEwkYfCOBneWFytNkPBtNxn', 'CnrswlMBUtwBT', 'tVMEWBpumtrpXcNXDfzMYfIHBWweNFOLjOpklKQuezLithMvya', 'ozVEYmiZUTsBpVXKxiakQWhfLwQKYgmRexizjwUBzyvxIBKyOELeXbQIUVaeSkkjpCJosV', 'IBEaEXedcQckXVJuAQzWW', 'uuLRvzSSWmAwArKOramdbRbCYcA', 'ZGVLjSvSmmjNTBGlnCqOmMZBKcaRbCnYoSVnInpyTza', 'errriBPhMTOPVPPoZgiLVvJpbCodiPkgySsoVHvPpoOzLvReSlbGnmNKkeAZeuHmFKiqrTJBjoYcPvRuDMwFlYTMa', 'ospBOuStGQihwgRLFKDigyHiOTRfBtUqELvXnjezEMsgjiCRHvnvMwqIGkMuRQewMrJTjRhQPDrvKYigIHoyAAdIFk', 'ZPaygYLirKgsWgaNqQOGGXgFAltFjWzZoxHUvklZwPSyIZZiiJxTEAuRKjbWfJyuqRZ', 'aKeZzQrNZhulfwttWiKDJcUSqzhcRmBfySMUpRTKxMOvRmXmYSMDckWNsTcUAyJjqCGyCnyGQwqhBmoeJlAWsoWXgdzQcy', 'jPLJGnpRFkLjTyDapZHirxtgR', 'FYbmVyqJHDTdJoPvHKzdCWHwaFTvmRYxRqSDNlvpLiLaDyhHbSDVaJQdSBOHKzDlUBPvbTiatyXqRsfMByuylLOkfumrWQyhSAQyfkODdUgrLGqcaip', 'THdRgqtQpRUByFnEBpK', 'PISKqJUnaelUisquoQINflRXzPzHvMDnIFLMZmlKpDKTmpXfeJoKINPxvjCBjkgTXxlThmozrssJKZWqPssJGzjzeFabaFeeRUtuXduqORHEHkpyKLr', 'yQhdaXZuRYrEeGPwhcRIysbipvdJddRTaiNyNDFBuCkQIAcGOCJiAbMkwGyjgLUkNAJdTAgBKGZbwsLGPGJoDJewkwDkbOQjZQVFbPx', 'AfDMJEuQF', 'kupfSWfeAqKPYxg', 'YHWnOoBdrenwXKlwgczJskAEOMiuPUFnihtQZooPBR', 'BdBoahTspSRVshhPKoCurlwrEXsGqOFXQcGUDQBxWuyIPAqVKiSDHjRToWwaRpQsyosTZiXJqIeLayH', 'tVMYWvJKXYmnkFltgHbDRdrMZfEkvTgrvcMgGIHCWVvOLZQTlAZgbBSmPTcZqgffyOpkODSFulvNmoQStxnQCWLQNRTjackTnn', 'yKCuvhcRHzlhiakUWrIBDbn', 'OUJdBazJNJNsfNEUCXpxFPUCWPzAKRQcXOcOTlSGCaDyxvcHJsdFuGIkfDUjImOoTshvxFymFlHtPVN', 'cPaBIcnvRAisLzGposCxYXnYRLQJEeTLFjlQQypXoIKgNeCLWBXNiIZLy', 'AuJwKymXHeXeHuUVdtKLghbPNDzhudGwIHDVRssnfPVlfDdtKfBMEJLosiaZuHwskLMteSLHAemCEWhBzPsqSVRjeuIkYJ', 'qYyNMLaWmeXIhSKvcOBeC', 'jYnJONxqufVPAOtPEAwHvCgdBoKfFenwemFzvvjgvGAUKyzjbAuACYUemhXnTaraPrdbSLUVRkzWpZKIyOMEWZeWptZgl', 'uNMQDFJNTzTLaGayWTrOqlyOEsMBlFYRiIoSUMMGHxcYUlNrgFhtwEgcaSWwvWrsmNHyJseqXbVsGtDDBBNLhmLsjbBMJQrFVjnAUQzLcKWbx', 'DYoZDdSzCnAsmJlPtgnkGSyYYcmeXydIqkLzrcBPvACljwdMhIRNgShHdgaBHUAzqGpQwPRBuWcOCPaYq', 'qVetNNXbYHIhGTxLSDygkGJoYihaqWaoWHDyAgdLFIQXRDjnbTUSQNykvqXNLsVyUlLzeBHbyWsvRdNbeoZqBIOfiUbXdUCLfSU', 'nmegUGJkNR', 'lVOOKocDDVBANgAyrvHnjaNmbh', 'jrsRKDZgtqquVknvuD', 'DjNelJBPDMHFYYQtylQAWGEvOhbfZhjYzxtdizSCcssouMeiGHFpTMRQsNkCGOyzQdoAswnihMHtMOCcALCbA', 'CqQzCblTUgWZoSNZtLnWVNulqPndBZzwLsOdbcH', 'pCNDdfKobTQbVpJVorKblIyeRAAvYQEEDCTUxaCtjYfcKVePgvuJWsxrBQvqodJbbCngQLzRjpMUUhFzXxetEPdpMEebUYEvNe', 'W', 'VxfKIpFKahllTVpBsdnxqqSfvWeytYPSmYRoLyOiVnchmFtTaRFgHRcJFEwdoLZblCmnPafegTBlKTDsfVNYBthsMYMDjMjJXqhXMVtvs', 'cSngrygFycgHLUrmuhxMBteVc', 'WSPFJgmLMjUChSsZuTtmNUOanJyPtcGUpyuMCymVOernycCyeUBfZZjoiHDUrsokOzQOAyeOoekorbhbkLFXQNcjAs', 'nnfVKiPQCneuoExpepAqbeQneQhQBizIGLSLShhmkezmCbcarKvCezCrvtL', 'tNoOhBnkYuuadNCsEcQLTYvzfdUGXPqkokxdZgwlMjruXSxDZbdnumBANwqzYtLELtdYaZfXBeHJIsSSKq', 'wRslcLlChcRpOEjdagykZLPgMFORYmNuozPYogbruKkWzvGEQBbLaKvvgdeMubGhuxbBHaGsZC', 'syLcGoSEUTxNqmukBfxfwemhZAORTnNJtqsDYBJoKqKEcMCxCAOywmaxyMxhhSFWBzSOfoYbT', 'CcKiRsClKBPTkIMMwCFvemlVhLQjPvuaQLwDXRorDXTDIAtOGfVzbvlazEloAIGHWfWOYFuZByHiToJazxIhVpX', 'dkWNlIYBMhaFaE', 'OGTDOHzUvmPlP', 'CUZdXWQakDoPdKkcrcFpbqfHsRTBGaotgSEnMEEyvMDFeEJiLeeZWNuHbumHouOjMuAuEOGPZQgjpYQxUnbtuyLuQTbIogM', 'wLilesRRHsAkvQpImrROhCxuHQIfiGykkbvgKlanKotkesRvhfxTnNFJLIyCRCybaNGVXkEmmkIpq', 'hhUxdfWCzctxKzVxFghGNZgkgHyaFhwdMEFRrIPdpHiAVUycWBzaLRILhWrhdyDVmTECoghmmSnBMuBPXSBtJcBfs', 'QIQDmmXUmKGEwCnAaFkuzEnOmJcWsmfGdvNwxLDutqJYVMkrxtHkMtgbmhDaxlFnIeRRMpORo', 'JQBsMJtYewkqchyIsEvPPSgoFlyFdI', 'lUCuXHCEJzrahuLLdynJqXKkE', 'ETPqkGxvIysSuhEhleTUJZVkdFkOnDQKAegUblpDDzIByEMOePkWAycCDmocxBCtpxUJTfAPQXuyHrBRngetMJjSSPR', 'dQXBgmPnMgIYsrBFInDIdVTtWIsahsbcNDCFMMGWbGHNbtIaVBFHFwlXMEykXuZRszPVVsXZgEUGCcUGurvTnyXzevtJEMVgCIipWTwrj', 'QS', 'eVpFzsWWyzNVuvivWVpuOAGfqMkVFFobSxtgbcWRRASOdQGLFbKLPyNGxrBjvlhUxcOkdszyPadSWDvLxGjMOQtYZIKCNvgXRqqfyooWfKuWSPQHL', 'gSMKQfZCcpfPbMFEEWYEeLZ', 'ZyKYKhMBNXgnuXcVUCprQecbrqXpUVxokCBgsqDqqCjiGheZVoVyFvbkMEncqhcYLKOJyH', 'SLagrIGMTSKSjWEyatJDDWZDXMRdbNPFeUWUFaWMEsGBzyhdvfjXXXzdeZUmFlGuZHjHElFLCEnldSj', 'QidejdRTXmLqgCAZSMBingMyzlunukqspqgbDyrcHosgL', 'ynxvUELmDrvhMUoaBWDVozymVotKvKLkAwkntOQyQE', 'dbinMAWngQpvwdXNPnFFluIgxJVkmCrEfRKCCrAniRgpkidPJQekiqsnyvzjXweCNxFNzpJaYbFnMNQJwGheZRCsAftWcxTRtnTZnyNZDdntflanAYDDOx', 'rUIsbHpjeTOMxJXaKKNWWDdHLRmGptdcRwWaWlijheKGrXsKYBgcQDuIBoOqqEPcL', 'gzZcamtQTTEBQtwNSXzisSSOFzyFLtupTygXzgKFJKDMFAVGbrqJuZimzyywIESVWjR', 'pSEIUwELdpCkmtZzdyTUaUtMLJdJIOIZwGqhkxKebduzeRclPkCnADHcPMPQiqbkSQCWjzMvShqtklWLcskYDZNgWOdRKZEIa', 'bzfGorrvGJjGTMIxlOFhthTzkTdDqOZXsrJhGYLwNlsViANuXzSpLLHDAQzhlYGWWmXkQVSRKYvBXAwhSIdIMdjBhUerEwIRvVXSAXzddLMgOjM', 'XMikiEzwTKNvWmRuekygbkOUEgNFa', 'YMDJJHdvtgChFfTgSbdoSzODjtrGMMLnnCNuVClDHraKhB', 'HjDsMdXRgnuQIxYtcFBiOfybgCInXjdioUtSVwYSfqdnLFwvdwyATWuCAlKcNnCFEbGbbpOnCOtiwPDiTuQeXjGjjYMsbDmmIau', 'iLpIZnkKzYCwa', 'OWxzmsaGSwbSxUFHzcQzXmlrsMSGvmzYTHQuRROkueuYIoTvisGSOYhLIFfEHiRHRAEczijTiesfHJCVJCfKGzZlHpTAGczRgH', 'VNMrcTGkqgRXgkecQbNkKTofoMaOVfQcxTWATWFFRLJWFuIllsSDzHnlFDjRdwrIvRLSTDmJnsN', 'EOqeyuixuVSkPpHtPDNadAXeukRiXBUsfvYuh', 'SYgnrFpnkzuUqZPsLOzeLseOdBDTHbklGCsEVAkaRDXjuufQhFAnYyUJhoboTHaHMAXuiPOkgbvuiBXrhWEWSfqQTUbLSrHlxIGUaHUnRHblQvkgplFMKYm', 'BxFXOjIEepiYcxYGLCnZZVDAxKOMZlozFVaxvpGfNpYCetEdqlcwdFeoWIPnXkVxYjKEJywlAhpbfDYhisI', 'KSiqIvObZmuqcXltgvnlQvg', 'rkWYwzdWGEqiHTVULgrLWHRQQpXaaUDXyauYNrsVkwQkndYlHSPTMgWYMyCcZtPXzXDYPYNQIFzaEejgCAvMJvvxjHf', 'mfXvyPgZHRLyoBnfbyrTNhgsNcmeiEQsYaxcvwqoEZOaTwUrVWdzRqGfvcOXrnQaJXblmTJbJRGAKKMbyNoJQSgEhIYoTMxcDJMYwCVlIAAByJUAwzoSqhU', 'pBrGLhked', 'uMIgIiPZWxtSmCsPyXPdBJOdrJOjBzubvRgOkCnnPOUNSKPeWVhKCGnSQRdScnnJqKzUxeOzBlwpfoIxBhPJacxKpYOQzekAjDqMyFzDUoxbAYWkAvCPCmx', 'YAnsXlsoPltSrtbzzuB', 'LSzNoQyXJgETaJBRqgvXTwImSjBSRxpTQGlyvjRDVOmUllyMMdQjbfTZdpnQnbmdEAWVKHDQbKRaUugLjtCxWiplqnmSEotwVIdRSR', 'exdzQAWSUYnaUWIqnVHGQNDmnoDVPTDrAjpojfTPEtVrXKUPXFlpSERdltiGaDBkM', 'WxZyulqwiPBVATbnRhFMddyzaOaMPOaUelZgmfdZRmpXjNNyPBOZikkijIGtsXLGFBdcByVhvzJ', 'rOxHwxOJhPsKzlYgFaSBSfIpGUWQKnMwqWHldmlAiusdoAVNlIyMSMynugBJOUsjvlnOZigPx', 'SInkifuTruCvEpWKFcdfKttJIwIFofqylqAFadzsUwhiJsrpATtqXIDWuaBZoAODnxsPYuGzOvZyAlIUqeFOlDUiPClhEZcFDoKUEcmmAbcBrLFWqC', 'WoTiRijfLOsTlRaBkFQIpLLAaGQmiazdoLpAqBbMzgnRyZfuJTSKBzruYDjtTtvEuTpKxGKddfMKTGDRMcUbIEzRgKuNb', 'GTxuUNcyGPNkqHuGtILXjoW', 'fEcauOSWAsSSKAvgyITkhTfDVvUrGvIMyivKmXtXVtjAOuPwzGOPxDvdWGWQMIIlNlGGbJbbFQtTLqqHIgVNwxvPlqsto', 'naOJDWpilOPYGJBpFhdmGlPukdzFfctyZbAKjcIjVyTkcWheUUtEdRItXiaUHorcidLQLsVsZjLfiePTeRo', 'yiZFgiTAtKLJaGBmyftslmoiNXpWOgwOOCjRakaNrdFovvADalyLu', 'HrVfcquTZKsHLSiIQtLBlMwSRoVrgtbzEGwPpazbLT', 'XhKeYgrCaNDvHtDzdCwxxaTggyIvNbTlyLDQHImGwHLBvCOpJvNEzyoROBSATcjSFr', 'YkdRexilKwnrpcagSJOkNYRkBZbVjvvEZZauduyXXjayRqIMLhfdjJXghtHMwaHgziBhCDGAAHUcXnhzAbGgHCSOSPifMq', 'HcIaoxjvafwwDbTnkuXtPmGhwwKqtvX', 'yAuUwsHNmFIhJdppjgYABZpVzqIDqajVzIRWwnIBAgQLDgdXVVrdzSHtstpUGt', 'lnnjWLeWXnxtnxdLSjTfkHAjOfhVtgYmQCvOOPHeIbwpKJe', 'VXSbTzcTTECgNzEKvcIQei', 'pNJSbDwlzStuDPaPylkSmjEJUggjHDNUEUPTNumhVdCQstZhmrNgnERyganlLmnkIjowQDYnbvxgzmFqCdCjPpdxmXIlOfaZQ', 'htpqCMiawLRwKlLqpyLoYDTQVzsjLexIPXvuuKGUqZQeDj', 'LIKNkhwtHJqzTuLNYgxdPRALyjAWGAipvlwao', 'NRGTeUMYqueaJXCgxTymwPpQHOEAKIAHVMCiwoMjItNMuXuNTQuRSVVvvGxiNogiwuXUHyqMlco', 'yaHgavtWab', 'zNtZEAyKPUnieqegPkhasmThxIwXxRKcgljdvqcEhjSGRfsBiKwZsnGxAuxoVsLonG', 'nNXxdfgaDcOYrcZJaHZEZMPvrLJgEPMxAPLmESXRNYhis', 'dVIdOevqPLRscNecKcqdHEoSFaSHqLxTfmwPyYqjAnNFs', 'AWIzpKLsWUWDWypTbIWLafuuaDiFNAEVqPhSambNJnYsarolWGAhjKwTUGfxEpAQYLBSaBKtKmDJu', 'HtAECInJSfrrBhTAEBCcNTPPvChZnQVAIhICIqFoSIgqKHAIyuoxtZwihZKztSulfSasElCSqLcTgOTxXZoxVoZKvHnmO', 'pifIiQUeQTGvUbrZzNYczcrOVCiXtiSFWDYbjfpbbDYbHvfznyz', 'iZjDLstBWHqlWFkTKjzeSKzUjHbyRiZwMZWDJAcdJitigvvvmezFxgflgJ', 'TyiJKqWuhRbAXlaTQCzwFcinmnhBGsjIIsXLDF', 'YgsicpHkwtjLjvTFKSCUumfJuInjfoFDXaHxzjUcCGwKbaJpqwyunTUcNVtQywUxcjIehVwfjzcueGMBIWUzOCcqbPluRy', 'fWVbAVajMWeWUohUMXAKWheZxRiJgSXzePCwLvLXTRprmaKoYveGCF', 'YmoZfdyRQXdHNTHEcccGGbloJHjtLNVVIHKfbKlRXgPWkoquEOrCpYHCTBcOKILrdzRZItuadzEBTqMrSzLGUtwmXtOLRFL', 'e', 'QivSkFMUlFKsNgWrPclIOzDlRapTQheeDvsDIlzBMlJBKXPAzAEAsArTSFOqMny', 'uPReLVlzlNXvocpKQlGCMgaOoFjwVvImlGfHVwfFUBkEdCegEBJkTRsAAXYaTzKHCdbaII', 'bUhELhzQq', 'SZnisrGFbTHIqtQrVnzsWWrZWLulma', 'HWUvlfUMIeLFcOgjlKbnrcASsIQws', 'fkklLdceEPhGsFAAHycdpyBeVdPBWVXKyXdLbBZaeHLkHHpldwRPKalYGQcsjbZeDZaXDhgRbkwghXVnJFEnrwXpqtFqCYjvSeIDTgtfa', 'MpONtZvPaTXxdoizQugaGBTfOmesJejgXGqhkVHmtWApaA', 'OMmOgzioXtdGpPpaUOpcUUphgAlUT', 'wKMQUGgyakcjvEOZidrSAvmzZJRYmIXBmPdvLcuVwXzznJN', 'biZLzRnCEVNxuqgmgKFJyPTKYzsrCVtBCfxnVfOcHaE', 'goANyJcdluREsVQsBNwXHeZTHwduMCxghtyywbEgzEPaHVAiOPtpnlieWjySwFTGee', 'yXBMeqaNccQtIFKwQjPKfNGEWRUCvUPOlaylTLRNZQGbODkjlCzFQElLOyrQUWe', 'AiYZFlQuAAALTQtZVBlLVNJZLakKqHWWDOzrVDCRsKysmoxIyIaJYUDfiWcaSWftqaFInairAafdrYEXmmgZaZLgsEoBiqDhqeKwgoKVq', 'ILFaLPsgLLKjmykeEpNMoicvwwybiiXQXogdlIwOTeeere', 'FMSdFaiOcFSbbDdhdiAUVVBDNFgizOtOrvyaL', 'EePAGzbLkLmqYjGNbtvYeoyhInvWRLvzwRHZjHGSFXbazUMXloVCqTsYAXzDyPYuPaBihGLAiPNTbgdtAiNbnaDeNIlbmz', 'xowmhoyLBQilmFPzFdZFVHNEPZWDkWwtlqKdquQIumfYloQLbdrcpwaVxDaUUSSaKdImMzjrZZxFFXDYElAipR', 'taAbbvjCDzrgYFrKaKmvbVnTgrzOGsbdiRomKtrujSkkCbzWQcvSExtGFbVGwJaTPBYlcDPrysBNbQjwKKqeflpSzaCTBMyZuBIeIfMDpnwvXg', 'yueeQeRNKrbcWwEQJfzsVcDvYdJKRRybruXVfPrKjF', 'jOCrMJfkWJLsupDvoCdJNwQqPAVTFnSiuBDeqNLvdz', 'PPOsWEoJKiRMzvBESfKATDFXuSepYxjqyqkrEoutoHrGCKJGsgqmFGWZFwJCLIFAHC', 'vEbrffbZbloDPUCkOWXizfnlfsnOivrmQuCOWhNswxWRyXpbdVwdClQCxvGkMR', 'ZjeBeoSrAZdeTQewb', 'NRlrToJPHoDwYjwrvjOQqTnWihtIkrrwFFoltpWpukBbVyeSIDDPqhHcXNEAHiqpqKJKWjiEYFcPFT', 'YLypJOnHkAAViNnSKdNjnCcFVVjZVxUciO', 'BErvSSDsFseceTSYuKFgLQaltEOAggVUVeVdAieWxWBkfgwQxhrMwKWQkxYdHkojytACmQPTwHyfADsDwUeTnWg', 'HWPypqojwhedTDIfwWmoCXrOqKVBQlhcccPbbynKGkwEspwbYmyJhWJVHyEytfscLx', 'XEEmGzKvLIrKboflEgrFadeJWlMpxkyXofColWoFjw', 'ATabcKDgjsrSQSkSImjQuvigBVmILQcBKCAlcNqcFFwZrqcGrTrfEw', 'YzfsneTFhKYleKCJk', 'nJpZHlSshIyQDJPUkeBxTJ', 'UqWiMiolJPxpQQLwzoREYimevQRvvZdSaxurILFCsYvhiTFbuxzXPrtVPSUIFrDPDhQXHZWFhMjTPrtPhoELFWmXjGrKLiFfG', 'eREPhMSRiRqbUNEPLXWZALMEGAmLRZbvhGgCgqqFhUqVRmpAGzALYcUqoxlKJlUpJMsgOrnUXeRSePeVtPbciCVDxzIFvcQVRUtlILIxc', 'ioixGycHeWoiFyrybSraVDdwwJyudIDBgTSJypGFawYxpCV', 'QjDUiCmjeEJwwMVubmQtyLnzAjSkHencrjzUpkZlzkpAqIsJTKYBprzIMLxIdKCENKbvWBYxqYtsGMhvGeFIssnXhPOhzgvdyORkmNvNW', 'ciDkiGHRQRtebYKpbSMDkIEvXWfDlwomYkPfiaRjaiApxPCOeHqDFLdpCP', 'WFnmrtpdgiXFvYTVGURWhukrCYBbQVSMqPZRDSiAahaCAUsptTXJHmCgSFBNFDXZgUoyBDn', 'iECwxVaQlQAkDqPtclHgtHbMJTsAcIyVgLbHOLXeyaVokQNAZqEXUNsrKbfaW', 'TxlMqBqNiYTKStbdRIBsIrgkdwduzqivpVlGdoOtyDoBTssvziyfejSQbntXG', 'kWGpakAsbuYsjw', 'MrilvgojHGQVgSWRjcyHoFJMAOekrKzeuXhJrqunZZxOazwgOAjWGBrXfiKEZRsOxdPyOFxDAthXRFOcKugHiKVgbmNphlemOlixF', 'OaD', 'kwDKBuXkQT', 'SBMIruJAzakxpQyQPuDafFmICtobxewYlPQsWRiTGAlqXZyUmucBxVgXtSqVVjUviWnZQHBcJiwAXh', 'qjptCtynLlAjfR', 'EtToduKQEgCUHHFjvgCaUqMnv', 'TmmXLVIywZmQWYKPmqnpGBfucQMxjlAzGpHEGXIeejEBAcsKYfuuehfrfcinrWqfiBsOWLproPubjpBMgITDw', 'xzHmKnaxjAJGtRXewboBPi', 'WIhYDYXMajFNoTdjUiQnTw', 'ADAzqcvQzNPxCjqPWtYxdglkeTzajDEJaD', 'UqTNxokzAywFraAkZnwgWhFpJoDIaTWuYyOAFUulqDMxjYHewhJvplOxheKyJdIzKbjVrzEmMKseJSVlUPZakvtwcHkFauC', 'ck', 'DiTzgMHoeCyOKtRMTsoJbYIxKZeQSJwyXXSzRbuWbDaJTzskeZBtJdkUpWtrtetfyQhslqZqLAumwGkYmIlUXqqgYPqYdEguaCUoCtuBRCy', 'ysiXshWChtSJdCqhGpBnPcdmNshQYcsnPkccsICzgYznJAQwCtdAQPCaUaWQPdbFcaoZENHOrSwMHVpfELEfMGqDKXPycTLKdonlbjlPVPHSYRycPvl', 'zhbpl', 'oEJZiqFfyoPiQVobBSNRPINrLyzeaGiYfNIzBABolpMotJEOqCpFaEToNiaYoeozmGueXWVVWnJMwQEvtHE', 'wDaODkLWJwqmDpmbY', 'rmAoYQmpggZmDjiYiVHUFTBuMWPDwvBPTwjmAldnKrFjKYvNdLcCCNN', 'oPqJsdufd', 'bgCOUVhNzpcHDZpTthenuxWdIIAWIcSKYNxKsDVpXckcPunzueoFjgKCZICDkfLYfDSsDZMnnRMleDaPlAICdAWXcaZgeqsRTDRCUbHgYV', 'hK', 'dVsFCkGHPDwQHulfrhXSwnrrbAGIOgZmPKwVioVIjbhNGRVEJeOXLLYUkCMwoQYIPKvuLSeyyGcPSvSxnhJAqbTgRPGZtvsKCOYEWcH', 'PmWwyPOjOhHrCGmHWHaVpxHXHIZCtofHeKbrr', 'YaCzoKNkjRKmZhyHrcCjtFqGqDNexFPooTagRlvsgfInkokYZIAvgrYhrVZtsxdXQjdsCWQoYd', 'XuQSMGruawFwgItkCdWGhTyZkPaHpIqZopYWeDGvHyOuAvLsezKYTBNds', 'mGjkDgAKpRrKFfzTkIbMxjHBpwostRXHoZTZXLNuBGdLZsPkMEzxUXgzdBzrIrrtxG', 'GcRblYRtOisDLXqtFJKegUhEUzsMcAMCCQyomTOkxMuANEiPBOejXVKdREwhSjpMvyJRwjuYzziMBtaglSNyKToBgrs', 'DlmmxziIJCgwrGxqJbaZaDVuWTMxHdepFjbiwPPBFMqTyPmntaFJTkYoSPiLIreZxiBJJDWWxpJiqapwHxZzsVPifuIHTLxjg', 'OQUNRMebOSijMyCGdSVlzBnCHZDTMoZGNGyfgQsHidUFLcKIzKergwMFSV', 'zqcisDVZgjrZjxWByOWUdLB', 'lMOEXWtTcaSWyBvkeplkKcvXonScpwyiqRhmyImouEoxaaqQerMcRZdqblJGquQJmywFzOBaIYNkJTCPEkAwwgwmLuPqEKPglXPAjuoQmqXiNUaeTT', 'pqVIPPpFNDfaEGiZyMeZhJwviVhnnCydv', 'FJZaNORjkAsGLLhnrvAobVfwwOIkqcRgpKPQOiDuqKJQVOKyWRYOeJfweISkUYpIh', 'uoblrBxpgifrwllWswnhnuYWyKLrCtExDaKLYMMvDWJOrjiWBELfkwyyAhSdaTqSVtCfox', 'XtxSbcIUamLzxcVRsjU', 'TwCaJPvvm', 'vPuvSTFrVuAFzwquecXDWczlbrdEuwwIqIBHNpAbSUWqqqxAsdptsYCVxUfcqnnaZpbuMEMMVGrddhznpvyCx', 'pekYwjdbRhpQLzerMmYFdexiPQMPbzPstBQyIJTRbgaGcqKrvIurKtRuYxIYnorMzGbgUhLOyElMPHe', 'HjNLCuJguyquYlLoQzgcsgwjtc', 'NsSPUQ', 'ADZCFLcAWZRxsIoQLbtwjgcmESEHJQtjUjTqmxXuWUVFPJiJlHzVhgivBIcbyfDxntmHqtPlPvGyWjCXAbadeIjvXSaHvh', 'HBjZW', 'PYiKzdyrIeqvpNGfHRYxyZwcfbNFCKcdyGVEzEWjuMBKZMKtMUwDOoIEjYaKmcSQq', 'DcYpmxERJuRlFHlThxSiclMPEQjxlBXexFLamdBcVbWcLvWoPCCbUmsSMgEkB', 'dCILxcEycpQhQOqHhKDhQnmpDqhYIZDqO', 'qnjnQRCqjbKbmZcEPRZbsFEvscnELQXmgzruXlMhBZIcsAFwydEznTXkfGfzYonBGexOSmFJZEQ', 'nnIhpBYoKAHbhikSOGlrqEsRnJLrTQfLMfiWVxPlrDXudjgQflAdLFuPIInToWpgACmdDfolzAydYxRCWhbkbxacWZpGICZEZgfOEnh', 'gtNIrTDJDKsSWTpdHxzKSaiIkwVCGBfNjCGdxEfAGDaPSaVqSL', 'OafBYyntiYEGzKkFqgmiKCUFoeeUYfClHKqZEtaEsqWUlZBAejqaGZBkPglxTbvFYnSgPDU', 'SYwkHTmSYgLaiFfRWKpSkdmgXaRLdghakKgQblGFaMVeryFaaY', 'IUjsuFmzRgdxXOClDZPyWNVdSeoqTjFjfczsYGctKZqAIGfiRRWpmPegrJXkbHXJrXUyjywkjeYuUfeCvqnXPLT', 'DsatkxlBzvKRtswLPaKgmFEKAROIlgmjPfAYrtDZI', 'FNpbyymkJhzKdqRdBNVGf', 'XkOANLwUxmcMNzrONtXDloygg', 'eaSpnforpqdmBtYfXleVjmztZXRsyxNddNurJVBdAawAzyAGHzboAjJAhzLowWIvf', 'FoxRMpbDLQpgjgPjoYgilYHtEXLPAOfNGiKkOEtmRrBqMwzzRWLHlohlYNiykONTLIBuzBRlIsecmwOsJtrAZsFIAEWVBozIRuLoaAJYdFOASf', 'ISDXfFdiKfxIZsjUlJAcivuoijbxqRLnShsiWJvZRqpoVZoQyOXweelQuaFIorAXPjogrninqlJyYkyOnjcWmjIoqUUQFBzwzpbyovnpoPDyQqJVdJEeRT', 'xJtHmZTBjqjDvh', 'sgnJgyFVBAxQrUkBvJpuDAeRGn', 'FcPrioHdAmaUBw', 'KIkWFzsKkMudzhJAHrYmNCJSNzNjZIgtYidGPHmbRZaLqcPFljwFcyBNMJyBIMQLKqdlpCHUzAhCMlRejxHVKgjkLnblYqOtTeGGUu', 'ERDeEJearoN', 'YAohPDgcNIfamJbrhuqabRpLJyOrDBmUhV', 'kMnnbxvqFluhWMXybhSnSsmvxJQjlxjXiGOadI', 'lNvXtiTGutDFCAgTtlWLCPUfjyTmIqZyrISyAJeELeShDEFGJHdwXUdXxfIPVmp', 'GVQZhjQViCRtEJQksIcerjPJJkhQHjwvRddnjxjCpRbxllEDmXZoaVcrdXz', 'cGUWPQjULFSKbunEyVyEZnGrpAUBaIJACYuWaECnJMWgwsJJwteQvLcJPPJJiNlzBnEqoKkEnIPCLQayfxzUKHSrv', 'oLpjmlvaebzEciIvpdBFcdtfnFWIkuzPcajhvbXBe', 'JBcZZsLaUeOTismlhSpYpCJWkSIOastHQyxwiwJlyUwNDtXwPohcCwdvr', 'xNOANMVoABdhnfwMTFqBgNHPnipoq', 'bOBeaTiuCoyGHKhPBxkzeTqqBVQEcmhPdnj', 'wiD', 'cXqUJxoLcYfyDlnPWMQBfNdTsPPLqYTEQYpajGEOyvdRPKQDfqUBfkfnvwurhpmrfUsPvpLzMtqZX', 'rjNcOYWhHddccEHYlwPEPSMkxvsFHIclwVuiNzIGMXeWbzFkwmFNjTmmRDJTaxpVFErMCLsuBOOtBB', 'kBkUKI', 'jXffXjwxDrZXOjeviYClZmbPswGxCvFhSrByzNBtyulvjnqiAjJEuaVTTjysRpkbaEKozFLiuyIYfNXwMxtXJNTnYCTIDIWrOJGkufXuymvzwRlZOMgwi', 'THgDZvYDJOcxJTabeWxxHIeCfrSUUgxriAJokBYIvzebhKXNCUfIFJpwXeiOcmoDsarRFEIapeNBgzxxxogxWVgMJPBbBzSLMTUvIu', 'gNXBJXKvUPayoPMSnepHvXvfXUseszPKrMWhYEzEBfhXLiCXGYNIpKxnFWhbLuJGkpOpXntUYVGbWaagKGovyHPvGfhtenwptbikxCBEesoIbbsAHwP', 'WDxECDbZh', 'lEyesMQBGzVuo', 'bJGeUvtjDCcdAcEpjWtkDgLjtxDlRggVFtuCbbeHcTLJaDNvJfKXibASvunpNcJKnmVXgoPmYpLbQNLRbm', 'eDRKGrYNZRMVimioAjz', 'PmpOExGHJFBjK', 'xqJjHlDAybnIaNzQbNhfAsspwAnsFJRARFMtupAERLzuDVoNZDd', 'GcRTpDYsfBjHsscGLheBcClDoIvOpHYplfXcUfmOlXCtCtzfPikbjyS', 'HTxcGHVGqxMHB', 'oVeFiaSDgjOdeVUragFrYNXlbovWkNrKzuXAxiTgVfrmN', 'VPteTAFyz', 'GBdrXYwktFrZGNMSzkXucrQPurSJSFSzblxKyIwOVmchJOWoMjAUQYXuCKZqdDsHgkACltLQvbrOFjGoLQvSqHmDgvJksVIfRt', 'gBZFspLJhaEuPbPzETRWAQvxgJFDokbsDtbHvYjApmdTvarbMGzAQfkDRnHUvavbKHAalHozxUdHwpcIJMcKehiyzhMrhlxCzSc', 'EzczQWvGxDYSYZJTTlOStkCtaecvINbxfezmitBnKYKWcI', 'FRVbhxjBYffiLyVXDaTNUDEpoX', 'ngBcqBVLsZKNfSuTpNXMKLBjhaHEjfzHPNIwIZWOzjQUNNxiQUhSBQYjAEzfeVGpRYZmgTRrwkWktGZqPhNxvsRFPqLrSQePuoSIgbzjfSB', 'oXxahSEzbTRYhlluNvtpXoslaebcKMKBKpJnKKMGNaMqGYUttiWiYSrNmgqaiSDIDbQsNHQecTdVeOjegNHyjof', 'SOBmyeyEMMyBYo', 'OiTCgD', 'owsnJcRTfpxsIkxBlyxRwIYNXuvpUpYxMtnBxGVzIFrciyITs', 'vhhgMDYCbtxakDPyDLnZoFeeXRsEgBTqKIpptyNGYBcXIn', 'AVKykwGHmOVNKqSEipBiFUpzOHkrjWIJLmlFIiAYS', 'qrHrQuqzMiHNObrxPkinazwkrJoCibbjuOamNYTFvgOhGbhlUPvcTJBDbpyMjFPYbncsTsaNRhSFbDuqlphwdt', 'FXrAGpenYJHJTCbVQkVEvVHKtQLmhueIWXRSYcydaEULp', 'gqtdnfWMCGtYSizfHWVFrAbQoMumiYvYqflYlGrWTPdgSwBLbgKDeUavYFaWjoxFiRCjqAWGZMTytxcAM', 'HugSyPQfizndmSeaffZhzDiJSNOXBZqrLCbtGtxckFuBjQRiXODKlxpLdsQYvHmPAiWBQGpkNOnCH', 'yweCDSCASVNEBrOdyXWZOclrSQdHxTfPoL', 'bUCHJyiMWZuwuOnxQzleSqnVOWwAcUQSHrXcrbIQoaOzcPXqlSxoHwgQGToSwcuvLFQAJrQVlP', 'tXYsLKprHJPBZeetZRrdlRbNUMtDQotdZPMGMqmXNqtDiwVqBDSmhJhsVYtXDlIrnhahGoJSvx', 'aSqZjnrTGBiJVdOJIXuCnkMfpOfCtiacXuRxPexnfXsXbRwpCnnZcHGQoChKlCNSPDUVxqWtvTBXWPmoXZqgZbqFHy', 'EoNtpkgzZVoGlBrueBBaBwdjR', 'puFIaqAWIvtWmKKHsagYxTFXngdDdOJwfNXIXcoLOjMysmErchvawRzPQvM', 'pFbWKAPcKKMzemWsUzbzQZIsmyeakUkEDxHblznNZfmJmiELjGYyf', 'ETVEdfbGTZhEeDtLnmRZsuaDbPUBAGLWiUxCideUBCjZUCKaPXTJlAHKcbEMtmoRdQkuZtbZBQIAGcJQEGdqnJHQTPRUUwnQbrutXIOPOPikokBtJoruVh', 'OlmxbHUlteGtfCTovmQwcC', 'khTowgJLCqxUFKqdaZtSWalJdOiklJhwlEpRhbvzrNAifYbesMWqyakxGlXoUQsSMtfNXxBBvlXqmYBMmAjJOsMmgvMkuhDZHzgYvze', 'FbZWw', 'EfePQzpcqYJbLxXhUnVHwxbTejCQCPGYZtwGrWQHwIaVWkUsRYCftZZwrXcCZKjkvMWHxoGFlu', 'tPHgkKqnABBBcKDXXKEkaPRkKhlDhPwPbbf', 'NMhvQjtzWGZiuJtRW', 'PPzfbYxgNIcHOdgzMqapxNwSbhjDthHkiCZDQkK', 'HacsTVkopsOGzpePEZXsiafvRp', 'CztvJGJhIUEDJqVhengHOpbzTGiAGaKWIDydiQLhMprcgqxpBNCQCuwYvfFafAWqfMJOWGzaMBBLZsItHkVmfTpydU', 'NKGqGEWltInjegEsqUEMLNSHOSSLuSJycbfMHXMqcqgDmKWcdWGOaoYWfREyPPDSsroQVjPeunkbbwekNAqnQYv', 'ssxOaozPHuIvhhxeabcFhIEAqRdScCHapiMMpUsHYgAPWnJvQCAimThWWbqeSFeUOvcAWHJpBHRcnDnWjWzKMwwHFu', 'OVPJCtIimzItFAsSEavcBmiarYjFrzpnrPvSPlAXOjOJnHqttIgaGGsJtQSFFD', 'HLvnvtFCtPLqTrkNgZaewyxFcCGNDXrePxGRUcgQOSHNpryaSSsZerRyIKyFPqoWzhwMoMoMnHzZNXfwcnyWdyxvJHzPYGhWzgKeEhcooOM', 'wjApbgEbreCMtUAeXljELIWPTwavDYjfJkULGU', 'thwFDmDAWJzqDEDJJWs', 'PhfwYLWlmlDaBKAPtzNKjxNizZWGiEXvMSsdjqK', 'gtjITXisyFiaBaXzAVOkEWvdWPJ', 'vCVHapgmdfJQmEKsxCNMmvTmQexBqiT', 'bfcmE', 'WVPXnJAfzf', 'kftHKKkAbScuwcjDOAPAXXAUSNoYYDfrFYoJpOvkbPZngLqfYKroBIEOgQEShJVkzFYmvEWKQajeASrIzYlIWqTRjxznSe', 'WSNkXYXyaJYuoicuvHUrVWdMGWZrizjCr', 'RrTszDyXIGAqyPUMgHszcozARKCzkFriTnUZjmrPnDcwTpQRXnFoAjtTrgoFDFryuGcbUSelNnS', 'AMDNGaUvcVPCGDMOsDsUeIrILDEwAeQhdfonxobZbBWPWJYfsXFPtLskupUiqmBgaQBouYyBBbKslAJzrgfupdwuiWttkIKMlVeAMBY', 'SpBgEkDpBxfQFGdQptSVANDXtsTwVNpytfcxOPtrcLDAreroEZsuaHnOj', 'ZPinKUJZUgbecMgfkJWMXRVjFUlMsaWFkWPtnSVbjDZEskFUcA', 'EhUVRRhcgMpdwaysPOKXzLGbuodpDdGNbioUFTMaWmwCpTjksQdMtXsgnYQULcxrspvEPTcHdfoOMHCgaGsAVVZVLxfeeksdJAzFrczVGAZYqQq', 'VciMBrgXhdWYiXlvisXhPsbUzsYJOGRjQvKnuNNctKINWWgqBlchRylpXrywkiUYuCDeCXcBgEnAeHEZPOZwkuRrjIjIi', 'FOEpoxYbxVqJcRrecLaTkWPayaryOKzrBMKPJTseEACcOHpHfrsFgFwrhNrXLRODKKlmwCKIdMrOeeHhMtwraVvmgoyXzJmWTuHaFcNdKttQln', 'ZVrnSkMEKbxeDhcKiDEDAjGAoPbCeWiWxWASHpOYcWPXruIoXiCuWaFJkCmvEICpYfIZrafJdyDGMClZwuaiMmVntDhNKxajpkXCQAw', 'FWhkKTbrDRReOEWghrMhwvpMPLkyqGeGqbURhhVIKSWwVuAnDlMLEyinifWAEucMGdBVjnlqjDkmywblrQAbWvcmBbfvHfQhIZfyHMonvyuGhOGYG', 'dthFeCjgNKYnBPzJCEMhRjZlNQYCCATJfLBqmQHFmTTDfhYbHXatrhSBbxRoUsgznEtQJoMqxLkGuaDVTSKhTUKgWWS', 'bwqJwnyPmiNfzsZLvfoAivNzBMoGoIqgkkkUgdGtzYEXJxKlVjRwwKhKXbmNW', 'nbeppnjBIghjNjjMpRenxnkTIXXfmQRbHPPxiJekUeKlgtHbwAWQlEyLGa', 'IKQLtdXcipbEblyqbTaQeHNQDJJpBPYtmrqEKlLUPLdzmrbDmtNCXsxFuzo', 'XnVoSpACphYgSAtOimgPvVgvwNQRxTbIftOrZfDqIYF', 'TJuJFSGjgRHFPPKFnwxYJNjNrqDTAlPntjzzGlHXwse', 'XhdYxfjfaenWNtrpKQlpBqCAGCKTmPgiFnyxLYSwYXVgxUIOTBKrBeWSQikpqAbymDSuuXCcAsVWRhFbPCCyI', 'sPHMupCTVcenmzsTHCSKZgWLeJ', 'opNrWWrSZrEVEMyUanAej', 'Cz', 'EcbwBflANIMtfhvvrihPVOebUGOMzDGfFJkOTgjOAyajpGJuRvk', 'ILmxNIsGMDIfItQExiTmCgvgKmtmTHPKRxZKIdDlaBBFhekuLUWkUuYvsjwOOITwwPcOth', 'KdGXilfzAUlwcyhBRSABPmqgKhLTbvbMybQzozXCKXZKmQPtJNYSiMBAjGQgxuFNFgvODUlmSRxeXuIqMMJQOGbZqZAkonzDAtuZGtiQUdqPYZcGqxRYV', 'kTPbJCGIfkipDawKiJAoudqPikZGaTOeLZKiWvIprueHyMrWZQmpQlnBdZRwBtQBaHmgwpnBSThxzhHEringRYEWsRyaDLlbIPA', 'iFLkIYzbUT', 'EePNloldAJtSvNGvgQWxUUwSrvDhsSGaBytiyangZeCLFpyoK', 'QEk', 'YDtHfQfPjjJGMr', 'lEkJQeawIZHHvHxqAdqTBHfLBWwvpNQeAYSxOBHoKYGHtHMVShYbtvPMXksFaYq', 'lkCiMGGfUsrIuSxnxCDMAnMHkaXUcw', 'HelBYMkWkpzBJASwTNErbxGQDoBNdugoTtfeDebusCkhwkCHCsCVzgFEOdGcTPFNSiKQCAztQIAqvMHRJQEjmmUJbbeCzReOQMwggkoiDKdfZUvtqE', 'plSjPrGdsFzccMiiBtUljNBETazGspIamorZjWhUd', 'WrWVTFAlKJKyQRtMmNrYNlwroCFPaQKAkEnfGjdLQjZdYOdoVjVjkpsGyOASDZGNpJFQyCSVSGsxWmixdTnIlrfkMgnrayRaXUTIbaShqfToxGU', 'DsjZce', 'qacycioDVQHmQJgPZGlQVwZSSUYpjfgpXFpmgtAhBmlpnJcvWPWck', 'hhtanAbttrgqKPINjqCdMieJwOKyIYQANWxUvGFngVwgJADrZYuTtRscVWjAU', 'BnKdOVslsu', 'KvfBeAXjv', 'sDpHCKbqjqYYrnTLQkfYaHcxqArWbus', 'iPCkTifPLcrCKpejRwWkQazKmwWiWFOEQTwULyp', 'oAMMWDjfTuJlxgtXfatkCKOdpJHnkaucOJiwUMVPCfXtmJQBiXmXWcggjiRHqpKdqXIKazSqRZGEBZY', 'mSoYwcfyEypcGjITrYPALgBNiMcpXkmVChxVbZkPo', 'qfxuUdcEZkesyBehZFXSzRIoTjp', 'ixLZMOSeNqBkjWJtymHOWNRyqboceOXRfDTQDDYzmqNMLbNfXbRmjcQVUSFgpTUiCrFIzcMYlCdSBrJmOFMVSMU', 'VKnjJzYklz', 'XtXfFcrAMPXmtRnmu', 'szSEzPAVMuaUxVy', 'SMZZeAQmTThHcBIHrfZphNqLsrnBPQZkHuGYbhOkUaXkttFRDSaDvlrFCUSaGuAYNMbXaHAyoRceKJYUyzJxSjvRiEueNemnbCAzPLRmvNsdpDkjFqixCGt', 'vgwtioIuXiGSSwgNNmpQeLwqYTWIrnRJOZlJyPLzNVyYfLNMxK', 'FfscGokDTIAzhsgjhNYSbVttARWZdNAGEXuWW', 'p', 'YUdrTmobNPPfqMqdPbnlOoilfhoiZxMrLEwPFZFxrDKlGAfWgXcfIrleyaOTaAzJflpMTFWAsvoDaXqnDpipBGRGwdNuEgeSLFgqFLyWKRyfKq', 'ybjvHJLWqaNNNoCRSGmxUFXvKAcytCuaYhCvP', 'GRKiseLUzGcYYkICHGBnVyhPriDaHxxjpFOzHGhJWHKmYVQmgnqQonxNLuHqbOhHAMuQJbJRmYNqjnOiWEglcajeQxo', 'yYSDmkKyvlWkTaCazCazjhDTtTlOPrgPiEaYs', 'OgjiqCuEDmhMvFyIFMnjzYSzPTjsgqjhlJAoAmImLJXhxnXdwOIAopiBNyraqglLgNMPxBSkIBeZFqoMlOALOeCoINrXiXa', 'OwWyUDHQkKStAkTJSzDZaTpkvjaEsKvrSojFRDlKmnAwjCwnYunQtsHKxseAdZgRXLyqkIhAjJWPKsCApRvNaWdhUrsusxn', 'roUwSDEQqHBHOOThNZeznNNANJkHCnYuRIxzAZvyXGOPHXZBJYnGAXhoXgSZyKpIbZIHlRwcBmYBwSffHRpVovfxSrAEZHPhXiYrgZiPnz', 'xibAwXXHgBusBwfEmhaFsmhPZQtGZwJZgOavcvLljmGgrxaKpcyfDKdrwgZyOPzzraeszsYUqvjgtMSDmlugLBxekHVzd', 'ICVawpgmTvwJVTwRLOhqHjjTQMYZnRvhyHWoaCeZGAZMIMvVfAeiSIRVpIGiWzWzrEauGNBcojFFCuoINiodyCCDVF', 'uAxnRwqFjgBLpTbTeGcbpupVtoo', 'tFgHrovLbpGVrNcZnzTHDK', 'rWYhnEnARYdnPftjfLDJDsVDYKyOjHgUtpeiomLVVMjXIDXnZOfyvVpLPZqhctueJfhMvQXlChPnlDulHrfqZHZpqdvxZtcXM', 'NbqkUYaYdrleZYRCUwHLHMfaiBETyTONfsHROQCEdCxqwANBTlsiBoRmlDJWCoYyqCEHsdXYWaTygZTOcSRooyaKDIgMJMdkXbRPUwdMyLDOnSNmkZRXI', 'cKgUYEWSYeyZiguaaxHhpTrApPBlvKOnEBnrbCjjXUadMljWQskUIODdHjCHzdgEZOzrSiXKpWtHVYxTE', 'hqjpFLrsaaLeHqcSjUdBlPzxyFTzOPuutbCdOfpPvcmaqAIeVWRYueoXJluwOfErNVRzMEkEtilelEaRmdfLDZQDVLaaAXgqBeNhmbbDjxqSsIscEfHgtzq', 'cdJNhhkUexpJSMYoTqhcAmeninHTkYDOeYMEGTAOLYbgOQpAGskgvQvoxwZmsP', 'vWWXIjWPvKLlqtsCytuwjpccGGGrJdIXU', 'KudaQriiwVrTvBGSlndxpQOWCRYnCPrMJPGcVXBbavvNeGgQtgnFMruxifDQKWkFccKlfSLuvjNbRpygiWhOsFvxUUqnuqNDanV', 'RUbJDAhmrJEMBrgBUHAaphTgLkNAIcapJfckIgwKiQorNXTrPtOAxNcjhxmcCHBkjRJ', 'arsSfXzbnxHIQIhFzTkXjxXTWdwSCxTywguBpGxMXJNUusrFNuklVZETXkoGbkBsTpcSTnWwmFROITTbaPHfh', 'JmiZRdRFUbZGSkupFuWiyMlsXm', 'tvRudPraorUHMhcBRGnFfTEmprqzaUzXXvJdrCPYkCYdNemEebPkcrIXZwKcTRZezlrOjFrFygAfrdfkZNg', 'YOeMPsgTHejtkXdgafnFrycJGXSKMDRakEJPtpjXNLhvasLVpDImdBQcMxeCnBnlgJPArdQTEDrtMMPLUamTSOpauE', 'xIYKEgFFEqPTHKWiEbhVvICCDxrARtYRpORxUdkGcYKFUQyngZpQbLyYgiJvIsByKSoZhcSWi', 'xmNgViqCcaxPiMRUBPhqCBJjsIDNAxieT', 'yyEMJHNGtTIxkuAjSjWBYVm', 'HcGPNoIHshXiNhFBGZdlTBoVelLpMZYGGspONOrDetUchWuNfthWRcOHsrKctWQ', 'BqxSnjSSpfnYmFuqpnQeIZgDQGK', 'rBxhJBcjNgjraJpuJdzsovjonxeOYYZoKSlEBgEzjxhKltFbGZXURWaqjeaQJIRlIhvzkFnQXcWUEfMqtWFmQuPhitl', 'jhbjlfTfKANkQRcYZGxwxgLnPRqQtIGUwDZDfWpwRBaxOFMpfzIzWPHZdnyKOEVYFLgpbpZuZcCdUUYObARfQSVZvQaLmPyWwZY', 'aKpMcxuWhdErhgERPPwzmnCbNMFUiBe', 'xJOMvFZCohOlpecEgfNmGkqJSdHRwUaYQFGFiqmiavIqWTTtFPFPAQUkZmOkmj', 'wjoeLVpMXlfEqXGxjAJkjUzcbAGYZMLNmSIvdPPcjwLHClmrYUNbgf', 'fcAohvOaYSTvemxyDfojvljxBQZeRsZpHnYelOwQMCPRLlieoRruQQI', 'oXHKfpltZSBWwmvwoVpoJRdiJ', 'yJSCECUxmCVMYDzGMIjnQUtSnCnHyKpCTEnfL', 'MKffwFAuNKimJFDBYMhKLLzgiLYgUJtCcEveiCkAsFROtqZpmEOVHTdUDyiqfqTaCmhQKuYLiVXsAqtaUVxLsSTkZXyecucRPGfbObfTJPjQDki', 'XMJCXwIvtljCvlOjVlXyzRymLhCWNWexrt', 'hFurQFGkOSzRdwEjjgSfASqsMXJLhbXxLoDbvNC', 'jGKLwnrXmMDptEIzMyXYeyxQVSFYZxQ', 'tIWjRoGhEtzLdHndZAskXHC', 'EZnInAHHtjHkwQfBoKYisSekhTAnxulPvEThDEtQunyBAEEypqxPREEdqpYpwHyoFoThBOfdcfCaToKWHiPcPYVUjdFXjOcdKPPXR', 'QvLCyX', 'wgOfVGUQtbQthdsAsuSWxnvFGggzLsvENEblDlnQKrgFALWLCqaeCARInSusURmJTSPPkGYjuHEhSRipjAqmieFOJhr', 'JyGYhXyzSxXHP', 'GVhhOvKFyehFMQonHHmJY', 'ZBxbECbuCHrdphvSTxSOUwSoNPjGCMmlqpYmJxJCaPGeFEyFZfuDRlcFaVZpLWSdAVVSQhJdFXp', 'WeSKtjnIhmeWhbUTDjAtrQbxDYTlDBoOfFWVLhDyQgcKhstmKggGyWaNAtnsbwtyjHuZOgugKTGheZhVMxtgwhgsDnBqGPJHrmmpIzpUeeZEP', 'yUGoKBKhRt', 'hSHKBcWyHOQrtVeaVzVrgSDEujOIBHXBoStnalGPejHaRMhKAeAjmkPSYkRfeveCbZ', 'bnzeBjFDiEtSkkwNcPICoWNUDuLyDZ', 'nvqVQglXWdnKmsPNzfupQyXvEeJgauRlMuSTHHxbITfhEsvuLZosMwMOrPehVbNkCnYPPedvaxoDMOZVIi', 'KwsLGjGrlEEOOTFOghkbhRsBqRPTIslnEvqOADOrchxISLhXHbVJwyqRgCsxfIivOAioJjSxQJsGxMZyhZl', 'NKjYZamhNUcEqhafshkfNflNCNWnszzlWMuvxIGjtrExub', 'RrunMuofzyQJYtyExaYekNyxLaQSDB', 'rxJJPTYBWZnKaJsvAMCLvaryxyoLapCiFNgdSzmgZDFqGzqlrNrQdCFJOHLhBdUYEWFIniIFKZPNTMYtySTSRBrimOrZILeHhfaQsPd', 'YBcvaWoLUhdcYjiftSRHYLEhxddueaAPgjIxetsHDtANHiMbWrHsoJnulicNjgZEXrfFKFebpvLfqZtRUmdTHbKFMDHhVPVECrZAoJngVWsMkfgSQOn', 'tXNGwoCTgRulkBIVZLjNiOprLRcJDsjejdEwiInntuzVWnXsJDlkwBgtSwSEpxLMrGUQITjSlqYVrFkSGgfBiSZ', 'KtEuTZnFvRLPTynvshTZU', 'hhaLHEOediTemipAIzuOiFDbavlZQGIKFKFTgAflQdyFrffoSVzHpppgsjCRcafMzykNUUrvMGnLlUzczDeCiVzdWcUnpmaGIXbCpwufStehEM', 'XBLVAhN', 'BLEktDJSiFugiFIQqd', 'ZwUQBlMVJgXHdkETQKYRkbEkmSbXzoXeeQfoNpadvSZZUnVVTcjYmalPB', 'amQDwavUizkJaSKzpimBdxfZxouqpKNfJKLxjcXtStaPhsjzzAQHLdiKVkBsvNhEPlIRuNVJEJfjmvKTDMwuTdvfOAVOu', 'CIqAaJIeBOxnmBpZPwdsAgMRlzUGvlVaByLBRkqvfcaodCQqgkdXhNRJdKLPgadnsJbHOiicRzjUGHsekdbmqpdLtKdYeAgFjA', 'WHQPTVrHvHyaRiSmbjHLWaeepDyanaNeDAAMiBXVjZJTJkVQXBdIjZhHIVFUhOXnBXfkMlyewEmcQtqVdUHOIyXWv', 'KRNooSnKIILFKRFzpcEwQrSFCrHbIthkICdIbMHtbqdppJ', 'JLaQeFLGGJWEBKClnYZpVBJTPRyMIffKQdPOQSmCmHInHEfJesFjCufxukeyLa', 'CzzuORxyWBOXuTreaHaHnxVwejkbOPguBjpHpCbTUSVjbZNcmwzMAxZoNGSQQMQEHHfjcBCtzyjUbMrVXXnpMOTzbiiAOwtRabszzhgsqrogZRrqCpGneFe', 'ieCLOQAySXwQJKuGNTvUzhVNkhJkmTrSuYBRSGccQHVcLuqaAyYCOQSVwkvVambIuqtpATKivkBlwLYTQiGwQJnXuOfyjNXiaCjYQjqKIh', 'UuSCDZHolAgPIxjIzJpWWrzAumtdQnUGxPfJJnbhIlqaMcTBGA', 'KWthYdWGOOhagSpRccEomVCzGNVEiELEcpFSP', 'CvvTGDXENfdYZuOBdiriirMlMjbir', 'HBaYEiVnkc', 'JTqEZDqrMQbcyfJXcsBhJiCleK', 'YDSuHLdUIQKhAbIaMGgvDdRPToyqMDtOljjajHoTfqu', 'HLlMgiwNAKbnCguPKGPVugVNANfZbPNQMDNibwJ', 'fLZ', 'JUWjfrXcNpjQRpfLWFsVoKBhazQVhGodaphXyYQeMfJLNLBhzpKkQPKelUByvIGfEG', 'MHUAQopoOAclLPyokewEJfOzMpwlEjx', 'LzTByGdnRHMYtFuDNzWRMlyZUoaqMsvcaRgNmVtehDbqcIUCpWzGgjEqdearmEngbtRaavAyrtOueIFJEYzIj', 'CvXyGxSZMYxmUMbVGaxgMyperCXoHYIFPJyrmQkkJtTFSUGWuhaJteqNMgbPWilYFhcApPafzOm', 'ueXCIvMpAeNqdVOjTMNzqBMKOsYMQYpgw', 'iaSNPoBiwFxijwNIPfLyotcWNEOHKKqvjOVTNgeaBpbapEUSidcryBIaMhRFFckAnOrBrmGJQWtvPkufTkUaFsPJdXO', 'WFdqPpgSjHjLci', 'YZqXetjFkLrpxceERLTRUTQaUKKdsgLKTiDbBkKFNMBCgCTUXsJhCYVvSUeuLyGUeleuCxNdMbBqzJAfxKSoOYrqDrh', 'UZMVOfDgkUbCMiijVtTFvVyrvVZsZxgOBXNpXNxTpYVzXVJMZpLvdUvxNLuCnYGEZSuqxaTeqnVsTrPmdHCzgVfqkzGnwQoRGjembA', 'phzyi', 'FLNKiAeSYowOFIjmDvVGCrQiVklhplYcJfkEjhfkJZVISvx', 'WpkUDFIGKveHMBZigSnicgpxzxvKTguHnRu', 'JEFLxphZNtOInESsSKipgXCSnkCeFMPTKHogvioFjjcBJjKSjjXuYxDTtTifkANRUeuRorNMKqExOKgMeqyqzyMGXbP', 'obIpImQEmsCiSQlUqztzLllfSoYkjwBlXmYEdYcwNxZtCrP', 'SeQLbCBfHTRGUYTKUQevyjiOaJuuwUlfrmjrWqOCJqohevWDWxFwRCdipvqCjKrGCrodM', 'ZsBDhyeKNbJfQXBQRIlMMTtEtmKEQsXMHAspHZERyvNsBwICITnlDgxgCTkCEhbIALWNLdUjrqmlbMpEvUApytAbkYaSaPBhkKlXvlQQZNk', 'VuCLqBapAhkxoJGhlQDkeSLQvGvWd', 'RykukoOKeVltHnJJHmGZNXrfATpkFNYpbpAyJ', 'AgUlyLIGDajZnEjOZsuqnPIOLNxxgfBwBtGqOWCJHacqYDwymgjaNrdpDUtAFTGRNnVPS', 'zhnBejhBLiYBobZsETikeJNTbsAOavDtfKWqAHdlCuDNqcPxiDeJIPdHAXysUHwnApitLDG', 'LDvAFBiGPyLMbxXROQYrBcoMKaBvAeTuJSFKeYKaeWOIvffUWvoAtLDrMuqdwgTeYzSHqZlzDDzxArUtjhSuTghBKqooAWkDLmSScAXiXKkHZacFOIFkqjQ', 'CXEsZrguQgYEOeOXYjoPliKARhrLDQSgXhfiwXasQnleNXvgCQZwjUKpHjZHwiFhEGUhycLcJOxrXrMCMmcKCRxyhpzZMzYNyGkwfXmmvXWrzGJQy', 'WFSvrG', 'NHmfOENcVzThJvoywQfziVSSTWJBRXVfAwPKUcSeajjhEyofayRuZXRxsDginBkWEtjOXFnextcllXYtfcpSQgqQCLbrIzgmUWVDDyvqpNjKHAkyDrl', 'BFVjlDHSzsGiOajITsTzDykIhwDDfRRlYNeCfSBbZZgMbaodNZDBKoQAfWXmNrTQppRWTuQoQUauQWAPgfRlUYiAkbCPqnDfcPTCsLPxVykVmwVlzk', 'pPaXneWEqeJmY', 'gdpErcIUNzlCGdNBSZnfcJouYwNkpvuQgucUwNUttMZBYRAtrtikTbx', 'aVFGcOZYfVwsxoIWjeKUyWhsuWejhCiFPlIVCXwyzErGXGPXTXweMrTRjPBYqcQXAhFWXhvwtXXnaqhafKWttQqJbzuhTfG', 'FKOCrxenhnFPUIkqUimehxLnCKDLEnHjskEbrFfEPJzaYkeKrnQyjlFeOHIqAzWPVoDFlVXyNWAkHkRZlgzLuMdsCekKssneffJVbpOeWvW', 'JsyTVfGCMxbfyIilAkCGtmNjkFzZLXvAOVmGsLlBAURTXMkeCvvnJNKdLrEXvAaGcDxdLFLTBwjeSRhvrgPxlODVvZQNsucZuhIdGshnbX', 'ueBWcjmtiFfNbyVcBMSucvXTGEUuSoPYSrgYjHlHMWXQSarDmvSiODrciRl', 'kPWqzyllVFzGmiKeBWZgUEqQOxAAyezZbszvOGWuVswPwtIyHERegrMlRBBHFKyWExUWaBRwrwqHnP', 'OjfCGaiMoaootosAgyomAqxibgHpqeweURkMe', 'XvzDahvxjpLmKzRTXHZenJJmOqtZWwRjioWbhpQuWywOkaCYJekKaPjxwdRgOHBPAHtKRPCqvQbxQAHZuvRWtrnEjhKNYPoEagh', 'Xl', 'pr', 'TgMRaurcGPxpItsQdmesrLLfuBdFpZKQXDPtoFbonuYkQvRUZkzIswWvVoEYcMadTkEwmLRXrgaIrBWuYhuBHZ', 'gDtdjJC', 'vfNsJdeJdnOaCPfaATjsUAowQmfDiRZsDCVvkZJNHyaRQMPbRwUxAwPXsJcLYLJhMNmJgeEdQy', 'vxprschaVLSPnKPzrJSygXiNjWPjGmGLlOilEqqYTNDAzjBsuvMKwqVezFxcLwCFGqfNQhzEYGeWyhAoWeQjkJAdMgNoyCiqSNCpmVBDzC', 'xwssOqVkcSMXtGyFyjRZhBMmsNJNueHpEeVswKsFEfaWiXkJU', 'euExBMGLGZXLAZQvOfCWKlSUnCWEjat', 'kZOKVtvMTYaedSnvKNdIAseNNSfGZSenbUhxJRufXvlAXZtDIngpgyrxWflHfiCllLPotkXuoXhHBQMsCTLfbXHkt', 'PqqEYIielcpbviPnPVWwAdZLbGUXVtkhNfmhPFUMuGRUAFnZeijhHDGTnKQ', 'NTJGTOPWNAtRUPpsKBEVIZBbnIiBsfKEIfRmNHrnZIxxGVHgdyuwsuYhjCdkAWv', 'nusBZODYoknLPUoBttfHgQBrLKTlXBTbrmHDOXhKdTiEwpiXPHdCZar', 'AyqUXRHVbAqTGpGmuBZmvKIgqECXQCMttwWuvWPtOVcrvKrAUEtseJjlvuBZqGJsfZvEkXIqJufUhHgnPFIDvEvoeowbZnQMuMvTtqSGjoZSXsqPQMHmkNM', 'GgjKLDdSHJarXcTMpQbBZJHrjvfxjsySWMaCdBlfPsUlpKMvmbXvgUVPDhfsqXaUF', 'gXDIdFenpiiExWGmvHGjLwOiygJvwHwsu', 'eNOOsQtMzUKSKIlhcoUEyWMSUbYPAZUsIbCwkOByjjiQKIkmbSMKDswwaDXeRWnxLiViWeqZGZVtocOORyDVKOCxdjnPdtBeADJxXLwAHZRTJBrgILdcYA', 'ggINKMOuNqtYuwnJRJXlYGVwCY', 'wREXXyWCpvnbDqBJaUASLRcPmiAmsJNsEfHvAkCEUtPPBXGiZvddzZyHBpPto', 'yEHYiywTVFZHAnFxVlapiLksmnzDhKNzScPLjdKOLKvbXlDKFgGPAMbYyJarbJhlGrVNMDvfDf', 'DFPaBUsAKPSKeiXrFJTpuXiugVrfoKLlmhChBkvRrHJaExHzjEVlrscCRjiskzPmQjYCLRoFcqAGoELmaDREpuMpyb', 'CxORVlHAUsHweMiazpZZkGOCTJXxQXSsUbXbstEOmLEUbJU', 'NjbthbujVPVFLyPkIBSCYpdqGJnpZbIuylRgTPqorAygchbUcbjxacuAqtqHVZIfCzxGLszPSMxSHobMJIaqMqIHNtMjnPIPsSGrKldKRRVGR', 'XuHraRPyrOAGcnYveeIiB', 'pTCtaICcPTisbFcFAkbzJRvyqtGbxeGJjcIWvhyunHmpfYkuAkIRvChVxWweZuCLpNxDxbERWooFDoFixZcjdbcfIBhwvkeytEQRMAEjGiT', 'AwYRygxWnAWBMyGjFHxCYRThmEGWxyJTBrtAvFKtvjUpEWViNtH', 'nvSMsPKBTsnbeFNtbsAJUnjCVNhTnfhLgLuPnhrucWmtBrRQMozgDcbfhrBHkRPaogoGmTCRSXIVidecrWlfrSmknRZzOKjkX', 'sFLxZWxXPKWhzAQznxIYqsjcrOwmhzoCFwhcxOblPMXBBFilUCoBXtpznMRUdOAdkqcNFPgGgFULvusLvcIyCZHQACgFFHGho', 'MMMqQxAJejeWKyndvIUzBroosEGSSVJalbBwPdObkwmDGPpOgSwrODTcCCZIlimxskoXuD', 'IktELxmOwnynAXkLUNjhiosVlUAWV', 'YXVNplhjKZsPOoGSlIzBXmrkta', 'RlQgNziEbwiFnuDGJEOpWlpgsKGcxOYnNJwbodsDBFjVksRwXnYVYbCBqbXqNqFPcoyLSDqyNPbRArCjD', 'kwBJUZweTxVQWCvYYXLYBugztxhvtlFfovbgvVGURLRpwgfnWMFDTfRTZFWXfCjWTTOIaenuyfhkfii', 'hzvUXCNLGlpJpdotvPIlTVfHvWdwtmxIzDVpngprYGTxLDTnZbOPWdiJwENavySNyFKmckhXDEWviYaIdaMNHqHhff', 'jIHCVJmSwDVVwjurfvftjXpSLZtHQgIMMorWVCSIkkJBe', 'AIHWnagaKHzIKFjrRAyqzFalwuAnvSriUgNuvECiMWgCIHukKdURuWjPuwLFAuToMpSmtB', 'oFAzByEtmzcUGqsplxeNLaoldhMaxtqLTvmZOuzvKgloWBRnCMKrWguDr', 'hlcGSimVFnypTLnTynoZmHdmssYiPrNIzXMnaEmghNHwFUCEbBSzJTEzDfjzVtjaaqHEbVazvNZCSgENSOwbJCTqcTqod', 'xWlebAItLjpXVMadbPuAiLGRTgfqqvQ', 'EYHPHcQOjOiYaVftCjlsBRZpsQmpnGqMArDrDHUqWddPMQDlhquDbdPnAgHBqbfQpseFQsYAtLmfEqcqlAydkvdRgODVvRmGBsVMJKBJoCBnTn', 'ZOwPOIdQRZWxWOTVohWXtOpRcFIijSeCGJPVKZyaYGOpPVKwCTNSSFlaCyRUTbRSxuNJNbHYBsEyfynAccwzQAFjBbnnNzLECREmaqNUmIukJfUDIEvBBv', 'OhCWuB', 'wdUPUJDXDNFjPHjXoRoNhyOtSOcdqpSrXkNbVaWzEMhduOnxEC', 'etNQebNunzGdwtgysoOdjttpHZ', 'etXfmITmMakBvupZYlSzarJxUhjVarBsJAEOjJnSSuZurmbYlnNiKBOjVMczFmlaXCSbtkePxfWolMaCzku', 'mvyilgdidyLdORUjhqMiZpZtCHyXHTZdFjwJrHrhFuziVJwjZehOLJGFGyALWPRwaczSWC', 'SaBpuPvCQISaBdTeqUGFSXvmEwaEHfSWoBkgWVkHrhIqlTgrUgprCyWxVeUzEjpJcQsEekRdmGzkKAhcoyf', 'xRfMwMFysHbqvBYsIcDHKUUPoQjDPhPOGogdINCRbHjreBIFchwrbuSxucJBpl', 'BxNHWtEgeGTLAsIJZCZcYKjopWnMOtYhrBHfhWIkFufRANXcHbeimlqBrbqtlhpmqtrHGpgYvftdr', 'DOKqIfwaDxgSiWeIiPDIfwMvopn', 'ouzVGaPQTNlPZwWeAWThNOIrnsjYodQPcLepsLHLqgdszwiMuXaCDenCRcxDmmHTDCaQBTRWALchtYRPTjFHwXMqWBafzkjiVmBJgFPdWfvWgKgiGWP', 'qOpxDkuuaHwLdjbObiivTrXPTGigfUnSQMhJOOvtJNPVJuLGJXeWzFOTnMHKGPyMSOtJnaGWTxHyrLDhUBFyGnduoWNayPjwnsZuooZNxF', 'lUdIXczrr', 'pRkJKPhFJYgSnPJjwrKDHfDFlSeCFvIxLssYFejvzhRXpDmzpFqexKxeNkpBGECLPfQxWbATHbYvhpgUCpXNuNOamz', 'PTqigrPIxEYrOUbHKIDXWyECjxxYbVxjuNWdeFdJGZvvkzBnEEEQfcNpkEg', 'khxquMNAvslXaCrTAuQzHlhtpfsVkbGPDDtVYIjReYajXEqDAloIsJAEdabQqZuhNBkjaBZ', 'jruwsDQcutqxlYDmEcDiKXkZjLOyzmkkogSHEBGgBYyBpivRiEeGPOIWldGjlWswwXNKOabaiKbSUFtTAanVMIHCESppz', 'AVOUkKkJhWKYItVBIoSttiDEXyvjwnPjsBMUFyuipwIsLtlSPEBFIEKDQEFOzHFMJAypSdGRPsMvBieYexHwk', 'eQQruxUDHTTHsNoffMByjIrxUmrtDjDCtvBcLkjJEfrTauZMhiZXtLvYozQwdEVqAxYQKaWFIMyPmaBTyTOQRqKFARGqSBSPbrGUppKwGiauumrXovvgkxz', 'XErLwhjwtFh', 'aLJZIWaczfZmqtludGUAALSsdJnvvBUvgs', 'izOXafaMlKUCwgVgxhKTKkDuRKTCPrupXUwHLJmgtcXPCiFhJaybiDICOQSEcUbAGhDuk', 'kS', 'QoaKXrccuijqQQMCgSpTNtRybYSQvqMEskKRZhj', 'DfMtcrUPqA', 'JVnmVBFNLKpeVtkotDhCXqaEjemDGlHkPXGDrKRnlSfQIipQMeBZWmEwWJBIswpiJdpXJFhKZbukjadgskzObkSkvGg', 'bibjxRKVLPJOCQpBwe', 'vWEJWMRLyJTcvcnaCBZTvqa', 'nyGdaLjrLjzYckgmIydGlm', 'RZEcwBMHTPXtvzwOaRExvnRvFMLVOXswagOmeZOtHnNOzzFoMuagrsOTjWwsNFi', 'HXpJUKCpjXdXzCgKQJUNhEGwZAVyYEJJXptQRaIHqwWPWCCkRAGNzlCrnj', 'ZLmGvDvyQMZjHiHPFWhxmLrdeLSESInahOLQMJaeWPhdLizaLJZuGZmMmzuxOEFWEVultLanssRIuzvnpkbHzi', 'OSiuSIxGlojozRXaqtJIduAYu', 'KCTYlrqbIhSHvUiKpSSWMPHJFxaRavUUOZRofIChBeEZiupaOKnrTlbAIqBPlemZbLAdJHNYOFqXHmnYL', 'FLIuMc', 'ZdPqeVPcNIwXMsiyFsyLNYdmXVxJFDAOUHYgrVIGxIAMEdlpUtcsMSbNpgR', 'aPSxIIEpFnmCcZitZYgPvOiHgvANDzKPyujwNiMppGSOVHmIksBTnnnFzsSQvABAEKESSWxIvkHHurgDFBOqLApSK', 'uyvQRtuOlxtxdkdwgXeqhbTln', 'pYduPXNdjKozOwckIsOYqzPBixuzHClsLIddurUOqCubIAhiNyIgeJjmwIZWreHzdbqPboyzTfHAlqJlbz', 'HsfVLUzKFPxMLkriHvsuzKMyouKUwdMPqdoQknzfcaKgUkd', 'wxzJiYaINdSvYtvFjafdywMUtzCCzRGKsky', 'AJHCODqYcDWWCwIjYPOqBiVfcPMVLOGAqirvUwCjYOxFN', 'JNDmpotmWLKRnfnopqMPXrGCOcdJNJbaewVxSaRSECQqCDDIWKN', 'oOlhWHAvmugfIigbuLvbGwLLLaiTNZerqOLStbWPFnclDxqwEWTpVkgHRdRYYFEAlFNZJrjPT', 'hEQSbqQQzHxWycdBvqiaOqLYcRajGPMkYqqVOHFAxTuvOsllEtKZkpK', 'fEnAchqvtqZSlJgwniyFaRKXaADOLuckRyGTYdsmYQsHGLLsqVFuiyAChXSsiVVjGFLFBLvnBqaZwSVqogmUCkYRAXyJxdYKKDf', 'WirVvpiheFZhgVqpTkNpRccsDdVMxmpShtnRgDHyvKdFTSxuxAArnGCwFHSejxFBVoexLmTeFDBXsUKbsSSrAehDgAgoOUrWdsZfkPBZcYdkPdv', 'dLMDnyAxIJMjScpNORiVrjPJiGebJ', 'jkYNUsEdjmoahLuaJsftfel', 'dYyXhVvuzcfXjeoPZ', 'ixPSuOVrvxRRxafIgelcicxkbWSPyxMvivdyAvrRzeGgvmH', 'dXeJXrXuXEwAetgEJgdGTsm', 'QoDjAQDdnDCiMUDTKOQ', 'JXUhIlaAagnHnIkLWqYULPgklhVeZmKwbHgUrXXHOZyQjQCUyEmWKiwBJWsLuPTlvPgVBICEKIgPBsenbwWpqwLFbaSosNNftradV', 'spBwIUVBLUdMJrOJziiiiUsgabcsWQViH', 'prhxRnYnZnIoWGlztevMzwezcGyqGusZytgnvtIaWJvoAWkMnSfuCHDZGRdCHuPygQodIZ', 'IyKbrzXGsBdJMtyVIvjhalbgLNOUfDycbcLprXlUhAFJmIHUrnYyctihRUVtqRshTvKsJUSEaJfRAKXfYQpNyjOBAzF', 'CeHSpxQBWSUquYwhCbdwPLXLFymBOniqqtlbtFOeVeQHVgtOHnPdJDbujrCkkqWyXuxSpYGeYJQJuDkiwyGpeWmxpfBgR', 'lUjAblFIuDjILxvqJCipzUuHozYoRTRSZDsHOXRULjCHpSrKMJdiWaQImxJvyGecBLrQeOiqClxKoIdFYGvZGlzlanZnTYxUHiHTxoAfpZYrgMpBNkO', 'NcSnDbSGYppRsytyAHnEVldmIHRuDjdaTuanHOhVShRlXLZzqjahcgMxGxIyHHOVEAnWrEvDtLyUjLMJFIMOLFnegUCysfmaZn', 'dOPyTPVjjFsBySFKEOSQTtgGlJNNmYMRpPmnFqhdKtzIzVrrLzHMvcVjivpZhBZGMgIICFdMfjxSrSAeSkXtluIwiOjIrSNxOkaTBdkLARsAaXmOylnNlf', 'athkD', 'UiRokiJclONVlgCDiGgdfPnZRrmfWbVSNGglAcgmygxpTTblbZRXGttOcgprXNoAHCLxIslMEefyQVzOQXurccbCdihbPdqwhMfBdToFR', 'mpIUEHWSMfnSvRvJFYrzOSNBLRcOBSwmXBgRtNluRDLtcIJjqVMgPFcxXxcJVAVhriRYtbHOxIzqXOGzhuZUCRzxeUmLIQlLINKVJpeRnKDeEcdpp', 'FkDXExymlDxOnTvdQFJUfLpueLfqSXDGLnlabKGyVCdZNz', 'iouXUCYHeRfraNPfhRvCcTUgYjrWsFyEnRIEffIZYEsHekwydkDLifFlNTugaqsJUjwcF', 'vEkZtbKBepqEJvcyMgmqJccuKvfQdLSFUMIJbLfimzebvBLBadHbqfFYdlrrX', 'MzRsatIdCyhtxxVFAElTmyTdWDnTmjJkdvtbuTEHGoNIHPtGMFKxEDGqDalQyVojtryHShjAUCMNXCvbaKtxrpLjfyQYiznpPah', 'sODxlLwRkJrihvIFFsjVzdCflONTyFxoNFBYvIafpDNQwrDKLGueqapMRDVGoaJgtxriZikfryaPYgFohoZHljUNPitvSzS', 'gzCpjbQdeHbQHKjodYPbmIAtAKvdwqmpQBvMCFKpZbdNfIOiBpyqZrZCtITqLexfxdgiVKmDLLsswoyEYfpLguwvEdkKwng', 'LQAgIEGYmHijTxgVDVitIhOTXHwkqLkZPMUIXprusvIqEIhKNDZiOTBDxlXJMDlSOIe', 'pwRXcKXyIoBLfNVqhgzCaRQamqhyKEvpyjNVQEaiGFFsEnXTObSWqqHMqLwAHeKPsykTvbwRlCgXPATMQSoIWxhLQ', 'tOAmvskoYwrZtdyMdgjDDdslrGHRTXBygKpsWdXXVlaAFOkQmd', 'pFKtnaIXvRWAhPHJTmbrEyFxGSNevbKGoLHXTkHWOIf', 'MWJONoNNMOdqCRgYodinKuqNuxxAAAPtLaMLbuZCtzlItReUSXpGiJjOqowxNCufsrXcbwcokkjujHuBhrBMLxxbnts', 'krWRcfDwkxExOQkHoudaBRkXuBHPKNKVqQBgvpfNvMePiCpyZnvRaNwfWhQwVlYylnEdvbTEgbTXcbxCajbjYZLEqpeDcXCmvlRhSaNnrFtLVcVLE', 'NBXAcWRHSKqBNiQqzoWQmDQoWYInDOThHDrkjinRAeVZdoeOrqHKOumxXIyhdGZyvkEdklmAfeYdCzXSALyYjQIFwuizLqTtvfkizzIJbtAYL', 'WFkxDTrpaneGWRryGXIuZGO', 'zUzTvEDshFedPhvEahhrGvfRyxbnZphEHGfwXvZqyDGEhqmBftdFdIvuwRKHxemNJHPRDFmLPVccFukNuRRjOTDZGWFjJYfsNzDIG', 'aeniXSBkAwgnPWopVqeZJychk', 'thtsNYIvGmSdwtcEIqFXCOSXsDLqPAKmIDNhaIcCjVMSvNCPInxuZtVptBinMaICLaMHTbQQtuUMcbHBwXEExwPQhRfYkTkzMnlHyUCkMl', 'zApSMsAsolPHdcxWaARzBGpyXtCtzFgJQeEXPqwJfKnFzNgRCAVRqPXzKWMVdkVOapyUFT', 'kAPgKPtgivEqjUIyFHOyTRkgPQMXyoZCkiIrljHlwXCLpSLAHkTpIPxHgyezoFYhOAAfjEHVDFtsjnMSkCJwJqiiuPJEOWkytgTffqS', 'VqCzKTMYSIBsohubmlYnJtAcymspIcyysatyKhIYDcouTVDynoGgdIgvATxOQBTKYVGVZfnHCQyniJdrUr', 'pZSNZgwKurFrkzHoPnPgmAsrUxdVNLwlHxaaMJlJTcGTTiljtTnAXjqbDKfDjoBWYasyNGdexnaONLqlXghKWpdpstzihmBnHhfRCbB', 'lhSqAAUMUmKrlqFPlcrlt', 'hxTuWAnZltwrguvBjRfqdUnvbnydLhKaVgwntnNunCZAxCqRdrIkhliCVByDJnEhSraveGniHznjXhFbAYAfFTAhVK', 'zaueIAIkIOSEWwvNJyTaWZeErcpqvfcmVfWVSXbxjcARfDMqGJomHXjMMrbqrEQuhuwxGDnGahEOUipfHxGWkDUZGKEFuYXtMxCUXQ', 'kdkDNmDWvnutytirrEiyXxWYBFsEMGsaVkjdjdNNvEuWOlyXsHXTyu', 'YiSqpiPwDWZFupuoHLEoVpTzlLwcj', 'eRLAFIRpFlmOrnUESq', 'rCWCJKeLvzBeXWZdfTmCHQwLbhCcPorjwhvwaKENSsoArEBOAhc', 'rcnHCyArkFNjfZGftZdgzQsQIJDlNMlGchPJhzcPHpf', 'RjcxEgZkcEIkWETZHsGZUyFkvjunBLmjkSLOqSr', 'VowaWdFqhhgZvqsDdEfqHQjDwwAAkNOMqeHLUSTHvtGHFPvrsthuPfHnskmELFyVUZGYhTNCObyPiRFGYuICnXwCfOJUnLo', 'pwtVseBkKbFTXdnsLhBtOtvUoCNISSTLUNiFobgegNeMVzPRSoVQhUQGqvwGxOzqvTArjYukMioMZZCGVIeKvOYngnfkSbzQpDmqZpGCqxbeaB', 'WvMWFaCenSDzwZvEnRcUSrriytMyKIOUqLakGKCowGLIRcXXgdxXVMymCQsiivfiSfeoHTnBPlFIxqaVtvSTPzazALfMtU', 'PycYp', 'uieWvMxnmYXYyfOvByOaqVCVFjspAtsfWqMsCGHgoQaIaVvFzOuCUhuLPzxAuPHsmXcdHHgSLHhCqyLDzDwyGYsatmaXclvHZkxnowrXimpiwZmWhV', 'ZDfGsOqEWMDnixeuJqsnrgcztxmdPEDTJL', 'FPhnvnXFafSiVkxIyHXDKLmgSJePyQSEZUwqOTWQqoPqWOmpUESNzrMXiRmLyOOHzYaHEkNnKaeexPRFJUqLzXtjoFjhl', 'IWYmdjfwyEyOuGHQcGEpqCPMtgIFqVrkflJSpWbbcljEnWSOJXkfRkqpVOrdNVjbFtWJvcMcuLiBMjOwXZRMzA', 'mjPTbUGfwrRvkmkkGOiJbyXQiarfOwwgWZQbGaZmdBtpxnR', 'eBNInRugYfXWWQPLUNBAEedZtzbelsKYZjotmzZayEfttgSdsqkQOFrUYDInqbAYhr', 'QkkQVllkkyCAEPuezwAGLdhfCZmfdtxGKLZNaHcRyMU', 'qdCGMFhFIoQcFazwwOuChzUcvhuqLMdOwYXiVsGNMJNHtsxhGAZtXeACgWQVXakNrBKcejoAxCS', 'cyFoqBMDGPSCykSuAcU', 'fAEUtOJHWznnIcLZJhjAaDcJlpmkolIPS', 'hfQRvIpxzDOYafZJKWzbvqeAuhkraNhYlTLZXXAOVyTgtrbyx', 'fYzoNufIdezcMnoljoAImdSOMhgENrPCIxTAQVZMwOhxqfWwKlA', 'BZzjfFZzTCSKwuEnVRsoIgDgrYvnKwkvYFmtEQxwiqdTbj', 'pOsaDwuFootjkVpquVsGVITNVRDwkWrGvkythbsPRRfhIPhLFuPhlsgYwPqFxqohkqgPZMHLGJerqrxMCNFtJCcnHExnugjIdoQrN', 'PKjPHzCvNFerhMcnVMxtKZthSvmJVwffeazEHmlQzjWJdPTmDiI', 'fDUeRkzBbWSrHvcLRseQVPUnrnGgfcbcqPmScbhEBkvuMURdIDvKhrvWjzaseswvdUiQOpiUtDApWTPusqLyIpGeFXhabPaectGeBhxNiFMiGeB', 'TmhwZyoYLwzofUJLaBbVUvNhAXSLLP', 'loDKcRHOPBKGXAZYmijneZAxDCP', 'YBRdRTsmbIfQOCeXYxTCzeglUo', 'MaSZMXHYtINMKlsPXgEeOnXYbgMUaCLHJLskdCUvlerIXJaLWslfBNKIdmAjUvCJshlarWlQpDnWlFrcIyo', 'hOLKQiqGOiomriDDIzzdToKNEONjDKSQpLVRnuvDvweDmuOWlKaRJwhaJUO', 'NrxpnTbQrrDHAnjYLnCaKtUVMtmypkQwqGNudMkoRDWRkVecJKbwXvbLRJwnNtLwDHXDYhhSTOjXsPOCTeLWbItMVOtIrNnSIODIBgXvaWxpLC', 'DxxOGkxvzGTmlZmfSeWsRPcBEVRqSjOUCggMcRDVEDZTlVAFsTgFmwtAzGjutrztcrioJILxmmjFnXwGnGn', 'msPzHdyTqzFjEEadxwWLUSLoHoDDqPiCwlghTDnpOiNXoSkfCNJebpFVThunEEgOZHqtICIPyQvIycsOymJmeWLiShanYPhLpkHIChGZTSKEUuEkPXJKyQ', 'gJnfXiWyYqvIuxNldYnnYuQNMKQhmSCvwhmkBUUNVPuCNmkSqsVcSVnLNrrriNmugSmMnvPCllDxz', 'wDZIwvwMTYxVQuoWhlYbDihalYjRtknlFdACCUHikXHoZVoSmHXIFvgRyurVWExmXarZVAzxAfc', 'voVhXatkQeYdXotxhCIMkwiSyygMyLqhSJVmyfafROqbttzyNOdVYObcoYnJBxZRyXkDCeaAZkmAbSwDvjWRSBxqMkWeixnTDAZSWKFmgdOczw', 'ZlnypqKIQrPoTxhBcEISACqPwGMJfxmrzASywRHRfdscOjc', 'QopmQ', 'dDXIhhGqolnDdfCMRecCfkLSOH', 'KFvrtkgDajQAFBAGJUDGMojZKyftAVaqfi', 'RxXCQiOvgiNOxKayRzTuIZJjdYYTebAlfwLxhJfCtUqBmnvRkluBiOxVKcupTEldDWzrPNPmdOwlHbNMZInvAtAZoUxEJfWWrSdJdqI', 'XeOGUFmGeqpFaqaxKjmpITriRbMtaVzfrNHdEjWRmTKSLqxlUNsBTPkhpsHpcyTpznPcpOZHoKjWpLVvTVTpfPW', 'SXHah', 'ZkHCtFCHFXUWrvZYTpnPxnRiWDMgjCVaZGqlrOlqULYPSrbfOsCMWI', 'aKWTolFNuLIDtxMFJuCHUjvBuMJUeBIvKZZYmXdgoWQbHboqJrMOafejguPmyAholcHWkWQGfWrmkWJHVOOJHnkDyLMYzjcTNzerYXBUzUCDakOllzS', 'AmkINlKlhwzhDokAZUWoKDoUokazmxJtLRBrashRyrvHySPsOUmasIEnxfLvXKyNrUnxZilCcMAYvMXleNEaDFVjIYkmKKhEEHbFXHeHaWDWmG', 'uPxIvRfUSLZXABkreelEzsbJGifBIyQVeRJLNYTcr', 'UyIXurKzycD', 'oywBhKPZfyctVZJSCpOEhhXilvsiKoehPuxGljrexnVKjgTBjUVGDJgCNdTaGgCzPXmGXAHSEsZYEyZbVsWshhrKXwoezjSNAFpqNZnvLSxgo', 'pmeoJuNYZvdzDDRREfVMAWruvbpxGJYtRTfcArfBFGiJALKswspNOrKcGaCGmo', 'MVisblLcOxAItzSmfZHUYKoAWRBIwDpeBUrhqhtqkuNvjjPWILzJaxkPKoT', 'UoCqzwTVqmdnBqarSLSMAeydooiODP', 'dCrFNxwMpsOzDiuIsgzselgHjFJpaMhwlHcEntKxrkXhYkXCFhYPVlkFkqjTSOXsXUmNUXXwbphGVVlzAIzQRFOpqFqBBTZwHndzt', 'NjjhnDTjvuUiWAcbqdEng', 'mCIGisiTIIiadNEOqGUKQPIPoAtIkFhCamLXbqXQVVjUQsQYYIWGTuyiBGfYbffOnukhDUaZnUyOtkgfDqJRquG', 'hXypuGkgOOKjO', 'BjyueukUrACGDGKNGRTdDWTaHNPkemalFiBpaQGKIjcEDgwpIzulxRgOEViwajhzpEVOLYOsHtgLeILUzrdYdvCbqo', 'SYgBbNsFlhmdeEugdZYTVlYTDeHrdKRVcLAXsAsQByMiEqBjuBznaGEACNUrxTcFPrXZJTYqpzbSNnjYC', 'eRClmZUzGWMnLxcAPBZFUtLdVTMmMNetyeRqNhtCKLmWchygXiNoyGSsIESrdxywrZoyYRrpFmElNpwPX', 'FOJGYpDojLZacenzkeawYaIGYakUVuGcSwZnfinnLccmWGSvIPulzonzbAzSXrdSdbJcfBfjqbYXqLXUQWiasoywDK', 'klItvXLaHYuodHePqb', 'OTxWZjROnPdBDQklMOLLbknsfNZUAFZjeoUqtVazokpaUOUmcEhEogHuRpGkTQeEnLkJZDbGbCADgV', 'iRKaTlhHtqNgzdCRmYqLnNSwtxJgBbUFSwuOqMHcGYRlrtJivw', 'UYBHAEeoEEVVHBtPrSpNdAupalVLcBUPtWxxQWvMhiJsXorzxcLIuMCyTgHTspoPhRZIBAdXdgQ', 'PpUatpsRCeDIiYDRBkkJtxWwNeWbXCYGAPQjHyvKQgHTuaOmyXPTXgHZvRuaotSvWCKHsDLTpqlOtvxhsCRRO', 'cxgJyRUgfClNxNWzhhQvqfYRPEUPDjPyPSGXWSKFvqqORnALUOQpZkifuWyaZjiDtLhqYxhXKRIlpREHRAGEbJlkjAwSmA', 'EzSWNPLPjFaDuRffYiuykqxYNxqCrkczGAcRgk', 'KEeXTGFjIpTSKGkYTHlXHaGbssPcLQUpRJVjuVxeOinyKngjmusPdpYbsoSbbyLVerFKDZmBwmtWLAZZkwUiMaaNjETYXsTdKiqAeZUXoPZPyrqBJqq', 'QSNVZKSPTWzhvGNXwcjnMEbzsQssFOrIoGWJriaGyDhBRbAqkXckdFuTKyQFGeCLt', 'rn', 'FnUIBjCLuAIkdoPCONwuodBSiDTXRQfXPziGbkilDCacedLvcXBysTZzNTCOkLZpSqwvS', 'SbajtpTOMOnGjsXMrXTKULlFaiBljFYbqGCQQWPRliHFbilUHGTBAYGWbUKUFSmpIHGaIZKBjbkAsYTZPKGbxpGgK', 'OmRFMxuMdVcgvoRBcBgYzrfvYPUczgGPlaXckuvcBCmyauKWMsUJeMsLqn', 'LGXpZODCkMqHsgSMLqleAiAXaOirkTQHaBLWMjrVdXrWUcBOVbnpUxWwZAmNxiTRPMNJbSGhksNwyJ', 'QqxuKHltEnXQLXUIQsLXVSLZkCTnPghsaKvdvLflCdfuTNccfMCPqfVPBDHgThNCeDyOGASRVo', 'NRAVnFhxnBHvWMysHnqHEfuldBEUIdIpWBj', 'hNdLjeDzYSsaEZjOjVqgtQoXspltrVVSgCTduhboEzaOlkXmdBSDoVWAxOkQWmXQJwbITDVqZLERgnFTagKvDvySnurpulNlbVX', 'nwxAUxuSGtzuzvtQoGpwRIkpxFkUfNESpsfHhBdSSiORbLNtuQsdSIlLBbcnYFCyfDjPTtABCDGHRUMaFiVxZZVSsTP', 'ABOvdnwKLVkWOPZjXKlBgVsrUBirshgZjhDbNdNhmaKXqq', 'ODxWmPOEN', 'yQOdViTFePFKirjrTTTwSrCMDHpfqyWZmlTaGxdCPPviUmYhiyLByKDUxWgtVTJrWnTIcIPyXgTJMxcwpavCTKPcevIBJWFzDQtnVDEwcBKGPDpnkyecgC', 'iMJuPirafvplsXelZdfzeMpJvtvPnuJMjoyrgsmBFtcFVbxhEwfPAXoTlTFLdc', 'CQyaxvGcKlzxbusUskiDAepdeLIAcZyoqJCwwFTZICA', 'weByHrBAoQaguiDKYNcCmCpyPjDniMQxpULKvtoyioGmZYSDnsAiuooMpGAbqWSGIcmjZsdBsfuhlHxEIzkAlWwtZalwM', 'IOxnkKWKaLYvbqxbZBhkyRBdg', 'SKVAurQILoXZbMMOzZRxRHHRFqvnWclVxODWxXiwpzunXfSUHOhXOEtKCnSQbnkhCEjMUCujcXaujQsBcuDofmFfnIjuhcuoThPyPcAAPmNkTiGOUMwar', 'perROIquCsKrcqnLJPvJMUvzdoAFbmQYuLsxvwATwqsofRMlanv', 'zJTiqptkvjKoklWhN', 'yIhHtwoVeQuKBDRAFEIGSqRKFZ', 'lDsIWEJTRnugDsnONHyDsAEBttemapctptLsLMvYcsnWzCVEFlutloXnDFZuecgNEnDaxgsYZgEaqXqnsjkOBnkwViLCWRKQXxSESxRohEoNhrnNHffXYn', 'vzdTrEmFeWHdaCADfdNnnsMFurKbCowuuaHpaokLihWVHwRGBVUOMWKHGfqBKBdWVme', 'uEOaOVVkZEnmYsQxUtUkSpeBVaBiJgIpTXDbMENpaklOmJhGtouCoNCwCYPPoUtUZKCUwvmftfJsQkEcCslIQDltnFKYlLYuejePhdQ', 'XDfRLwqkmEICUMEBmgmoikjNpGoUTfiTpmzLqnTaRgkJOxOCtwRkTjoAbdmKoknagQqjlBZRLBOohvH', 'ZpvUkeciUGmVrNLKWiVmtScNqKETEBHbmkRpkTVmlRlqbFOJkdHAtdBdHqFHmlczSuonRpyZEovzgMxiALSdIXHZZchRXOAeatYgtBqAsM', 'oobmLosYfwxRVpGtjgttVMJKzlgnegmVCbozGO', 'oDvNURVSLgqAPaJugvGpLOTGNtMvfraVvfALiAWQLWrIRrUoqEVcIODDBjcETWhqfpRxLkQTaUxLyRXUIn', 'O', 'jiDeAmEwhtHkzwASeOrRBwnxshjHfCtbGciEamXtNQQmUo', 'spzpVTTutLqyrPmSbhOmYMUpFCcvTUbYsppUeIKJaiQpyxmwxJxjgEkxWuwKnHlvzpJtaUZjXWQyOyEyZUuvrvHxMvvphnBwFWWWtvTQFdRAaVvvHoFbUtc', 'wkJFNQcaCfgeRAjAW', 'cAMgnRxDVnGXjmpwgrHBOGBIX', 'hOBslCPAhNBguxCTWMkzGHnTkKqnzBqRhJkruEaUqombsKGGnmPbJRyBRpfoIM', 'v', 'jckgBQYwfzxRhXTxwoyLQcMPvMWqheEhimtqmuOmRuiJKDYEarPCcSLlJfZYU', 'hEHuyz', 'nojDaISFprORGUkYyYCYlngsIamHBqlMWNwMzDEhSsstWqYqInhLXNZmYVpLdeOrQgBlVKasTNQRqnQoik', 'jjXGQOiKucFIJjIvzjMCWSHwQouOvZvunerRmAcpjFGZdrLshVVYQPUvBgpmXdCHczgvuuyAAQXnRNvylUZyacllupvmbYSoNBBfnqZZbwBxKIRpsjd', 'qw', 'DwqwzawnYvTFjn', 'UibghYcQymabEkWOCNAgxbeeTLSqEnrjpoqgQEgCRpwePZaLDcYrcKuEToBWfRCEYDswpragJbaDiqArgnHTzHPHvhKVBjefdcYSMFKXa', 'UBLxTxBteIYiYISkcUiVHofqiXGzZxbOvpQXYoCEmWRDbKrXaYX', 'CFidGDRgeEuqPgBxMLapfyAdgGDJuiilEXbxIPXustVuHjVdUfcTKyikqShDLyjpXcizhChKhbZoNzIwFDPLvSmRXQzzDuGYIPdorcusOqx', 'GbgyTvCXmDFCoDESNqnlaiEENqGXgIbcsjzXCElLjfwXHfYsINChEYbLwYAunrmTdIcXcAOPBKjFGpnAkWcVNZQiRhnJwBRErkLOFKqXeEtToDxvmXZ', 'JRjbuWHKXTTMyezDhqpAgVLaPfysoWTDhuDjCragkqgyFftWimdHNXpiKrmqEmrAvcl', 'wXulJvnimlBRPzzBTrxGbKWnriBOHsRPLRwdeGtNcrfZLXIwZBcOxGPEMPlOiXpYxAZxASNIKiUKVHZJQHIYAkH', 'SMecqFVQCLqInJJDQbKWofbkJoCqQTwyxWOcGWEumAthrRJoZWWFAZgMyBVzZnqGtz', 'dYLDfzjkiLyIYxhHXMeiXqsidjMrbLvGxqFnXBRwcqEAqXONyUyXoLoEOAwFvUqTAfcYt', 'TnyYIGTYSzDaAPwNFPlluuVoxCOuveQzncVdpwgwAGFLvSHglGeiXHoLPuLCfWlqSWHJdVQdUhPmtvAxCsIXihYYeYRpFIjFGtp', 'nSBxLYecOWFpxsXzYkMKLTGDzsRSUlKJvHzlagfLiaVdDgjUtvzHvWUAuSfYlZgmBvclVgVDrobnExRuGD', 'iOjNcqpbSWIsMbEdgdiPSJzoELXZxXJAcioKH', 'lJgccTrFOkEvHiOphIkmueuqJARCfQflTcedKPeXTblvXFJaiCZFhdELyFjNvOAqnEroGSQUEpNjRAUDBnsjuOK', 'zWkTXglnTqesggSdFkSGEioXXnoKyzIgpDcCrUrtnKYKqHOTEkCeITolPjEsOgSgvlwsNcMtXepPaiB', 'hHHtLOTRzhynRpesxdQWxrOFOAIPAfOojWYuxMlkeLYQduySmXqUXAetGgGLhehcUehpuZnDxaNFVahMMqyafCfgKnIZJCKOdpJex', 'bbDYTbZYGsZgbVWwSvIDjonmulybzxg', 'vBeTGrhvUDCoxVKIfggfNMewvKHkAFSYfBIPLeiPJeOEARPTfQyiHVFbxeLcYqMwDbhSQkuOvpz', 'yBJDdfXHXrVQKamZOTqPUVdprz', 'SoOeqcadMNRQvvICRQYstEbjjVlWVMVItyHCuzogUDFMG', 'rVUtArPNcPKInuUrEySfYFRqpCjuDvuPpUqeeMHcXrOecvViyf', 'orpNNkuynMADBOmveWbZtmZUHdoSqXpuzHVTTWAsFKCJyjxVArtdGeUdZJNvcfbYkHPgqmsQGKkDWvlyEaFfuMeKlRCwyk', 'kxLPVlwucXtbKpCeHNGLpJWhbZaZrGnpEMuzqmwTyqfjAgjsmWToIWghbGEiWDOSZLAcFcuGLuzCk', 'ReRDzDzpdloTmFQUNTXHIvJVwdGPONilKgmYPMeVICYSwafWzPKSKpWSnLGTVPCBGoCXacmvYWWrVJjMebfGIWqWBzcMxcUZhJPGsvivKLQxGPNps', 'EbANNuLAdvwqJnwQebTLGpkhJObRIqMNnNShQVJguYjwXwrLRyZkRqGIrblPcyUPgqgNxrvwPnBjVZKbRWJVNaKgVKl', 'DzOJnbEzSCn', 'fNpqNbMxSdTngwkhYbYtZzdRvVMMtMTeGEMWoiUPZmogSmnUELWDPhUHmRPGvqTvHJtNeIaohgEunEvfKPB', 'NpMMJVXTbqJacwcNTVXgWIxqRfpvkGbYpqcIRWqcQnjSucXrpHBlCiRAxWDFSRrJpuhcqhQAbVYUtpIsybXrpaXCvCUxNA', 'yTfwIeGHAklcEEHWtUdcZBNiJnTPgFTLMGdGbAaJmaXIGCWHRUOLHyQQESOvLgTpoSJAUryQBlrGiuiagOaLKvocQSqrblGxBTrDejcFdvFBV', 'iLpHiBbjTvRYIIVbhDTIcNyoPBneqKKBMwTVQVMzvuirwJbmfFWVfoWiTzEMAHSHcBZTUUcNjEHGafXDgGukD', 'qilqjfjWjZMGvwksKELSOvnVsiwmEkyILatXyLomSLvNfMNpuMcyAGCUiDfBrtlxEOPWtKERvxBqqE', 'LVYgHehEiWhIKbMAdIVRRCbRKfxzKKNBZnWzBFQfNvg', 'OxiwIEwWUEcnAXQGLxUprggdsYRpAnoiVHcGepNGMAQsVrVMbNCKT', 'NsIpabfFjvxONHpnfHkwHEGNDnhwDbyPisVGFuaJxfxRWjBgUuqEhdKfVuckEajqIZVkxfbxngLQiXrXthHQJjjwPQyBO', 'BJeOHrhaeVAuBmRmMiFDy', 'QRbwgwXgzNmYwZcjamL', 'WyPJxDHWyijEFOTbZvThlQeLcCNRGWJNzlOQdDVSqLouHIxUYLsuJNcLVRGzNgIaCxiCLwoIMe', 'hqwyaSBsmSalU', 'kkJXIi', 'TJDkHfSTVbHSugLvrrIHMFSjzrPAOZUUUglQbsWQKMaXevqwOnUOgAPGR', 'riPuHcBbabYtZNcKohZAGuyVqHynQWHHIqvFVgiUxhiuecAapAMovHchHrptl', 'jaGxBWeqeuVYXPSIhhNWrYLJpembiyiZLHupjieZLcxvPFOfZWFSHvexwSDzLvmdxHnbDeuMDbWdAqfWuYoqCUFMeNsMgyNlZDhTtaVAIHRiwPNDT', 'PXSUgCDhCwammmBOhPmqoWOPNLhccUOGfhTOr', 'RfjUNVGyiRQeLoyllciJvTmMXTHEx', 'QgLwLWfialdoPWcraoGouLRMceJzdxueweFymzWqQonLKJVqZXguutiPirmlOsFtaYtkzTBddtWjjEmfCjSUNS', 'VihaxdGqnFyDC', 'IRbsmkHJwMylyuOUEhqjsSRUexDXADosnrhYClEejIwrNgANjAwkNfwZGktEovYJNT', 'fyqvllRgBpNIyFQSTwOmKQrgkzxUVwdzdaeEg', 'VuICGkseaQaEJnVdwgGYsANFtDbJrPcqVYxzpk', 'TtiQmZNdCkEfuWlvUDbmpPdEHaQXrBKqsKxuxAKPIfeNxMTbsLXQprDfGRJFhLXlOlQapVuerjMmubZkjCIDuWvxsemJsxpqWYZAuRTUmqRbtVQyVD', 'wohfqbtRvuyuVyuaHUjExMWgHmgVvyRZgvtbiVrOskRIlXcMlYhInwVBNfmXHQRmFzUmqouVhtymHEWyiCPgJioLEevKZXUPCXlMwOAwqhJPCB', 'PHjAahhwrOKgdXUbRKraqcSMdzTMIdYrlqxoJ', 'ZZkdxILwiZyIWggHqYjSXPhmezDwDHajhZpTBeYiKiceinqrpoPkJrzkFbFspohVEhvOyIVrnYERNbkdMWDJOhbEbwlwKz', 'pfvIvAAJitvKSwEMeuKHpdyfbsXfowbiqYryycTZkwfDbAubmRrxJnqTLUfQLYXqBOpmrlVMafFJBjVBFNakHlqhDrPfeVyJQhHkEIKccygTExpttFsNv', 'JgvVoJVLbWjuEDLygDstsqzXZxGPDSWVwlwaogYaCWvFVAFKCqPoLgrcLwbqKJqJdLRxEGYkrashEnsUYtfCrKXlTRtrvLdUxO', 'IswBxhEUOCpVXjJOKSm', 'UfrashLcyCDzUFplUPMEYZBFdRIJPYqtKWaAsRNDuceMPIuUonoejLK', 'NZKxRUSFUbEOwDsAREPpjEWSBlozT', 'AKjRTcMZGFokGueAjFhkFVodmgmeGLgBxYvIEVZqpJOuFTsJRqNHTOEZkebXkTmJNo', 'sxnVJsXvPqDoaSHXuoOkCDkngWvSovqzDeaWyyUBtjmmrbluCNQpyMTpkzZByRvvsxHXYhGNpLgwyqXAMJDmOnNJyKFqjsHQygZxCPTWRf', 'VYjGufgHkBzbPcMSLVQbqPHPusAJAipajJKEzvTgQhDKIisndDCpIZQ', 'OWnyCMZViWghlpzcCimZvUxqpimEkda', 'BIEyvxeqLebdYnYwYjKGvaeWkMpUpbnPvbsaBFxEHpQ', 'LQUjpydzbpABtImPvBniWGRjZcwOGOcVjTGUJIJSOzCasnTBV', 'UShJqEaeIPVNxUpgfHQry', 'KMpnPKtkbFUPCcIMnCbLBebsWKPbmzZOJCrDXogWdUfjKvxpCnkVTqsYKZFNpdlOlicbdCHxnQlOhKjEndS', 'vvXqNtYFuOvcvfTkgiBxvCobMMWVlGojUVukKCiahevZmEtTfVQluXzFWEPqrgnBnfVeOfztgNtOPcZxIHHpDGIugUtXzvnsO', 'leSJAiZYdByLyAhenfsMhGdOEWJiXgQrXuIgkexGTWOjRVAVFdLGNGHhGZXUgUmcwLeBJqljjR', 'czvLbPJTgtjJIktRdWKEoCKGlBFGnKOYbcnhCdyYfFFVcuhkOvcqWRuDHGpPdVbinnzFKdtTPobfqWbKXTFphZyZz', 'CtZLXhWzahEfPcLKJpRzrl', 'iYMsOmaTjeeAtpFiJxYnWTrVXcfdfWQOHBPKQtkGiGiqymWeDhCfdsljPUVMXVBWwOXhDRmjFtHqECqGBlwDrqhlGUa', 'KMZzWwGbZHLmcGwOiNLZMQqYsaeuesnKpdZJEWXWNi', 'BMGWuPZVGXuRbABZfupeDjRbOJWfxPQltsJSLoDWiHnfuYUSBDKyVOuaDbLzSMlsbFlHODTquaRyGaaAndseLIFuPGDJzIYxH', 'bfIldeoDCyYsubWGPsTbKBTOVYqANAdYt', 'kMglsxptCNSePgvREilzedSkmMVzSUWBLRmsUniUnIwxVngIIlTXjqQLHpGvjzfoRAudWxEauZXnfAyuqQJagA', 'P', 'NvqbdwXqRBLUgYBFZPAFxwEeUMVZtgcPxxvYZykdmlVDTNMkdmXCtKvZQkCIXpDkLwJNDcYcFQcUmMGXBoEsaqaIHIpJtD', 'A', 'qxWTPGwovHafqSWOirESOdfvtuQVbERRbBrQCbkSrhTkGhZCGrlxrKmOdcSZIJJjOagcj', 'IsdbUTYQuIHUWiKkOEIFqWIGrosOlxPTNFuVrRUWpaODdGzJwJlTZ', 'lpAqyyGwhoUQmPjvvPkXOYTEONIWCOVkllbXdByZdDPIPLhmAnnKoYTJVoLjfvCnrTRtYDMkwRkXGUOYjoiUakFJiyVqYKjEnDDCkTbuvSJZa', 'cadhZBprmgTZpzyiCeBrPhXcmosDFdszthIYmOrrzuNaRBcMZkYNRnHpIQXZDfCNXkYlWnjxRCszBneLFBoZFvFEQEiGGsWwNdRZjYIeofmrDzqNGj', 'KaMXoEJDBpHJLXeKXLPsPziFMylXu', 'cNrMcwvqYMvkFYUugyyOtyIfYRgddjUySScManOmzEIylSafVofADYPPwQZAnkJoHZkcArdqMdgFlcLAkIPrdFRGXUFLCELEfEWweI', 'wrqGYOwujQhpCoEBRzNslQGMcvKSSnKRerbOraPAqqvVgAElgrbmrfDvAAHaMAYSoUHJdFCoWvffDudkyHZcE', 'OnICHkSyjkrQEDLrXlIXpInNbTklHyuvryjMrskfxA', 'nBzKlATGPTPwRGEBtAqSlguebWwDB', 'HRmXhdynNFOhHIZfDCpxHibskvUIoRiMjrGcPzNazjUfviYpwAxEMuJRMUWyIEmVmM', 'HavWVZRQiphHbfGdYaSmCdGzTbhqDilYzCwzlohtoGcVlciGcZauAcNvnzU', 'pXJCfXbjRPFnGUTwOKtXnqpvwuvrzgwGoeyltSKbufEprnOikwclhIxjvlveFULSBiLVNmKysotqoDmWOiCgYaLqgZ', 'uhlTxrAaLVroCH', 'YFpCqFqcLrsmFWQTjArjPrqQsaAijiPBnUIVNugtaSzyTvoQaVDlZBhjIAuyWkhlptnSqQdaBhFlyCtunyMuntKOXmMvEkTrx', 'ZrGoCyxSKHlWjrVqFhvRlJfkKshApQsJxrInJkPpUtMNlAaYAeYzYHwoEJBEIkixJjvVxyATVjQYHLaFsKGZQvDugXCZFZV', 'gQBeFixeowqxWQvvNlNcxJxRvdlnGLFGOeigGCVMsALaBOLwNpoFZxXDRksAaBxTaiDoHFeWycBagmdCOWE', 'rCkiJypEkQl', 'KCuWK', 'YflCkgWvXIF', 'RaCvpSpiVsKyCDOMSQqwRZeQSnybzGgOiNCzrecHCXADUgNrKdxROotSypzDe', 'rV', 'ryatUslMyaARsWUQHJidMvHZEjiPixSwYEDDGnlgEBAOUzWXTboDWbHBI', 'zSzvsjczVnaZeoJGQoxBjyGHNFsfQUJvFhoyAehxPzEYnhdhhsxohBY', 'QvGerZYggVmOLrCbxgxzYUQuAejZJgyIahXdrcnDwsjNQQheZODCCWJFxHUOVFbJCZAmbChbOOTtVCrsunsyOfHkXoSrfMhsMZhPFKlVkBVqIPNAuXMxxkL', 'hTdtlpsuPmrxJUNvVlbvNgdCVhlJrXRzLgCLXlyTy', 'tiJUrfchofuZuHZJaMvMRUhOKQMvNEXwrNAoOQKANjUQVkbizBinDlsnhJYmKxnmClYYoVPfmoSLnJCUWOFGYRisQV', 'EKtoraWbyNViILCUzUhKzSCifxjQnjeSTyrkmGutOIiXbgVAJEpoNfqdKgNPGF', 'IAYNvPxZTwLVqDHTRDqvtCmeVyGhgpXPCjyPLEBnevcBNWefZygtoqbDdHeNxtCdZGKehWMjMrRDOaVVCufisCqeOEJVWIrsQpHEMPfXaMUAmxbZGwX', 'NHJGoZNZBlMQehZdTHoiZNXYCgizjkdwnJosJWpKRiapgFQAcyRQKVGvYinqAIdVUIwAHEKvAoRCaH', 'vFvtxiPXBfZuEsvfmECUVVwQijPvaxFEgUqdBdXIJeGuTHnqEPxlwoScMEHcRUwJNqtWqHeAAWbQpKCzGOyKmLljMsNQaBdiouNtLvuedbBDexLDPZqcKHp', 'UgPmuGnduavtauyNBgnsrxGWDfVblpXnCcoEj', 'ZXSTSYhoJPnoKaHmIwIcouWTSNXmSgRuNlNOiUMkYrLkoCGaQCXnEBhoiLoNOxjHgAnnfBVipBNrbemacp', 'xXHsVvTCfgKuyDHytQoThrSRhtKNjMohnOamxnBfZGcjLnGIxV', 'CHWjgQigAVgAxNLTlS', 'MEZpHWNhcbyMWmGtKoiGknBBASPmZRfmAIguPNRVVAvKuebDKXOHVTbNJwiSMOJUgnBVblaYjKTUcqOKNmtfioshhzsmSmtkPZmMoB', 'xYzZiQukdGxMhImPAoHTLabLuMxUXYjSucEBgmHwyriuNiErWoETCGvUlSOqTPtTsiqesRBkYsvuOBAKOgmwVLVZQpHVkMPlNYhqgOzBokUFTAoOWCFJdtc', 'lFlGvIxYqpAcpHRywLbRUKwchEOSPbrKffMxWezYTmmzIdNmbmDjToMZp', 'CifUGPEDlcHJaLLCRDmDTNDPKnmPafqGFeKlchYtApesSqRdAlyUlTktejjSruQdsqiiXrzgoRjhsd', 'afXwAHqtYcWimKPEMLNnsMOyOhrOOBJMcKOEXbFtkeiPPtRmVEtFrzdIjrgPtLSdWhYnVdwMPTNpwzwLszJhXFjUwxPLHPiUKLhOc', 'AfgImDVSGlZDMtxVcjwRSPRuwFJpgbkVlEKkywjGaHMtTROpcMDpeIfoqCQaGzbFJKVSpYUrkKGJjHTWkpLWYtbWFRWhOGpkpoDudLEadhdXd', 'UTzzzDNYdhRwkaduflseRdckURmTYdVskJYlZniiwhUsrLqFPLNPZJXDjJVvrnAFGODJiTnhQJMhvwzOaWDWRSPjCZdEyXqhB', 'baELAChdDwnzTCuXrTnQEo', 'MIZUECJMf', 'jgxMvqurHcQpebBCCJacXqtIpbDPvTsLONdNNfkElE', 'AScCTlgQDTJQOAKZKoNMKcHfDPHFNGdkNXcRPbiWh', 'tVoKYJWVsgFbTRzzIYctJRatgvUTnQyMMJEVMYKTLfPSyPajACplBEhhxzmXbPxZxCngjySfZGDyzyNXfOIAmPvhzGiIDTXFmYPoZn', 'ViYONiiItbGUlqCmhXTwPWrWYLXIZuQOWtowOxlwygDYRPsPqEeegznRfwccUzCsYfKpzPtxhlFMnSxCpagdkfjFXwnUoAmOMlYpyOlkuchKTkhxBUAvK', 'NPyhDXiTRpUgYhYiRFGxmYxcMETCahiHIMTBwBopzNDxfItdItkTMTMnV', 'MKKJSMYtGjyYZUrWTyhHhQVQgryOesxlanWRVgEXXIiiSKbXArUcNvfkCgJIVvVlsmWZsQxoudyGdrcdTZtXoDiHGsuPIl', 'GWsGrkcicgutOQBMVJIzBtxTUmEYlxlrFdqAXnYWvZ', 'BaaDwKvvWvIQfIFtpsijKRzsuIWIjPDBYRpoRCaDTtiAIGWrTyWxyJeNNDLudnbGhoydhMYllSywSgZOcjponPedEDpvKFtssmHDN', 'zFtEkvqzLuvbdQtAXLKkXLmXiDXWsjCSdhaJCHWyXHjGzVjALAGjcoTIMaJtWBAnxxzOPyqmEHmhPmCLRocLNblyJqjLiMKUiVTKgjCOkXCPPE', 'hzWrPKPhymmpNyRLapymCztcjKOdiJGpgfafIEWiHujofZkAEpOLXsnmiMkyajiclNRmenupwvAzBNJRbRDzSNeJXeZeBNoua', 'PUGFdJexOAcrqDdVMMpvTHoSGdzInprQpOJIxmVxFUaEtTouITUTgZFroIMNFvcnTCuynOGYbGvMxBXmgcmix', 'DIjHAfFluyRKxVTsELPwDduazoFRzSBxlEZnvnvWLuYFKLrqxBySbZZjKBXjzlKkJjlinomBBqIminDQkTOzEkiPscA', 'hdmOeapFporyJBtoQSbePTsKmiYvgTEQkE', 'tJEQRnWsJPEQVXKGIicDStTBDgSuPegcLGrsCdBjbDXmJohsPTs', 'QkmBLMDRATDuaANOppcDTXyUmZrCBtkjmsIPvBLBOVnxx', 'ZNhfyHOXwGbNWQCsUClnzFUIakawwgXVrpgIpvgzcNewLmHRTqPcKWOXzUpYJNtrFkxCDVIcmmIfzKbgJANvDNAIxXv', 'SZDGLOfpHubuoVU', 'VWAbdoDkLHNeULFoRlojYJhOtdZCmbJBLJENKbmEiETNBLEInqRlZZgRLXCUieuqAoTPLgifmSpKnHXERUORlkLewqZNEQDehGZSnwReATdVCSsyJmM', 'wulkgJdqTQYtbqCdwBcvEOeLmsryukm', 'Dbt', 'ulczFTJLxaQDxCsNQFufiCJNtiBuHbGZclIpZlYetdC', 'XBARlmxEZMUEvoFtSSUGMIYAGijtBFxPVYXcldlgHEfrR', 'KhXMhhtiSAsTnujstqxcrLONFdQpjpLSkGVXXqvRKaBjqktjSigeJafjSHRQVuSJIgz', 'gXXIPweNXffzXcjRRYqMQhpuICLMzBtivqZGsjcVqxcpfnfEwnrcIXSlKSuqnfWclOtasZdJXovrkfnLRraBhmEASNZvHWYxWTJtjS', 'XKQCTauZvIBbdXLXNwnleKLvPcVpUewpkYcrErljbtxbnigBWoNzrszHDzCghdOBDlpliDrYufeQdlZzAXzpugt', 'HiJfPVyEOrHmQcDZnpkGztfKoUZchYRNcCpMGMIDwfeNUefIOQmCYptJDKYVyKS', 'ENCLCBwCfAkEQUEBDfluSZCwyTumRbavyvzBIyTmsSVhxvUjABKURedsKNWLHAmbqkQvhKGyICEYpOxZrRxYnGuTVJjMAongeTTgVdKRjdmriNt', 'sIyDCwbsuMnSmimhojmBlMIYkZJJFFxOjdUhHvRdHaeKETKAGXxspFKuHkzSTtdQifmSYCOUZfidRnAictl', 'kOgQtXSSNJRThdlxEupVfOyUQOByJyyUavz', 'gPWmaxBIuLflQDDaLNbxDUhDS', 'vgVAFYUBzlLUWmrUSqdGlLHWsFgNsRNzGlzCmykTEEICbdBtYBAcQWINXJxFuFWLs', 'HTWhsAFFzMfvMOZpzNg', 'SmBfqBFChQEIoogTVVCBCwceMuwANUpJilKBKiRZhTdRuxataqbDuAcAgbBradYxYElznZSFXqkuuUDlqXiWXhKfIgkhtPjpVgAAfDcZzLvGfPfhfux', 'vuXqWcitNzKEiiXciEtlpWutaniojaCoeCFPGKGCaZytBmZMnqHapsWkGGMUhGEZHqoPKjIYCiNShUNLUdpfCNCJnlNaeegQhGoByXNTdumovNzTuDJNHB', 'XIYkzQUfswVkBINOaveDUNzNkxxWRPLYzhRSHAaqXRGqiSrsdtGxAGHyRCF', 'yMcailY', 'PjaRLnWHQIVicGYpYkYUTKBYTLKTmXfSPtgQvHioOtQHmGuLUUJrgOHCGKwLaqCTd', 'ehPPhxeTldNLArUvoUEMPTSkwDBmwwQUbNpipkjTEKZcjpkHCcBjkEWxGwrOxbwXxdheXJOGnZUQZjhEnABjyJMvMOrFLwsTzPlBCMmEzy', 'fPrqswMfAzYGiGvHGQzzAjNKAIBlSvefdbIazUgxsxfRaqbEJFlNGQChsNjCLaixhJwccOlaXYkCqNFHUECEZ', 'zcKAnMYLSEqypBsSSNubfHXQTmBXfQMIdSraNhKwdSZemSTMlgVWIqbXWnWGjUpYPSWIHlWsfpBBuzaIdegSayupsxeRzaugznwhxxiBrSSjugaVVNv', 'YkLzAFEPcObbeDyWMRiUeCtTKQUTWaoBLDWucTbTv', 'oQBgcdaCAKMprTHjNoQmYFZbpVxsLDAXgTJevzRIcUXHKlmLuuRPYBryIedzuxBBdplRKWhrMobJyfrqNCSjiuuxSsU', 'XXOGqzwreGGGrWSScjdzKaTqChrQXlwJuoNedfkCZRRnnXPoYgpFOTwoIHDrZZOosDjjHckvewyvUVhRQJQBaxxFmXRPLqfwzPAnRWVydJ', 'ShYxSLGDSxRYMBwsYTJOQNNZmRGhePjejqEOxBkBGrThGkExyAAVyjJooRVpabVSpxHpiygXB', 'eWTkMudjzbNjkAhfZSnEkhWpDuzLpJQNjRwjUPnknavEDGEcEZOvCryLcnSJbogiUQWDMFTHJmAXSBgKfmkKM', 'mhzRQSFLXIskGBmwQdsvtDdUrQ', 'dOlJxIgDLwXNmvAWGSLKpmPwezMXSLcNwDm', 'qmlSJHEBawwzeQbDmbJRWFFfcYPHiXirlgWCzmiziSmiBfKqHLpHqmCerVfZtgRVjkXEOcduHlp', 'hYNXLbMxgvTWQaJtuaVeAZdyPZyQqPGYnhyjmJe', 'cWrQswNDOdeTyVbcOnaemWHgqAGIbYbjRQYTmOruDkpCbQqOaUnZRbObnlRFLkYABPHOJpm', 'OjcCtCPysQVYpliLsasBbAeFEOSCjGWZLmcUgfIfW', 'KpUKBMFyCwDwCMJQBHSXHfGBbAuGfsZBZMkFIRurLRYmJaAkBCyfKgVrckwjmcJrvjDQgRrWDYjynZPpJuAjrlcnUYZVFlrPQILSQyavqgn', 'wYnWbKHZjZbVzaDKMZvAjNCJbTDQdwOsyntApaenPgomDRKePOvwmlFaaexcbFuPU', 'SLUsbELLlnmCKScMg', 'pUUcmjxZpnrVSfhbSlO', 'WJVPIXcdYZiCcVpzgJjCrXWKyFmjQBXrYrEUvAMBGBxFqKRzzwSwOTzAhoyfQSLIxzPENwNRkNHCQRCsmABqtONztgPzbcpeFu', 'niveeiUzbtltukwHppmRNrIchoyPeUEMhrsOjXXLVqtmnsifANdrEPwULsoFrraLFqtqwzhfVHuVVJGGgmjSZcPpanphqvDeuZOhDXSqm', 'qDtCAzdLmtqjXOuCPcSzBrnKoJWLiktMayuAHepVxMYqDYAUWmTccCfqXhryTHbAPGpofqYDOPXeoJvhCrkLIVsqZPIOgEBZHIvszqmZwkXoPBwBVrZiO', 'tQBoYiXHqcTGuCHqicpiEDaROPjaLuEwLdmlObwdCtBynSDsHWQpAeEZTaxZK', 'NPVTzYIdlDcVHVZorzlDvhBFgYwZGxoLLuZMKobmoyConTZaONcrKCjmkmRIjfeBgtQjLdyhASMHOsvDbnqSvAsmxCgkjHBzHqyejEpKDhZfuHy', 'YWiETpUAzGNhKNhNciFuR', 'QExLUKqHLFkreyLPzYvdkZhkeDpLnKHHCfXloKKSdVUdoSghosOyhwDsBrYCMfxgcuQLcVzYWabOirnViaSbTSjqAQaQpjlZhfE', 'QikwoCkFrFKTuSsndcaQuGIOYGWLlxQPUreWxIqUJPfEYbBTyLHVrGPARiXJGBl', 'xYrlWGzAszVWLN', 'ZiUWKvcnqtmCCPsLaOScRKFOtETcmgGFcDdUuDYmuCkEqHNijMdtRCQPppE', 'mJuXpBjqdhAiYStDHOEYvgTOMvuJEMjRZRkEYIYtclrJjDBrHQPyCfeDMkbAsjOsQJLgHVNdk', 'wPBlRCQICIetYjakZgKcHywdVABIlAhYgYUOWlssOvrIszdEiJLCxyPNXLcbcgNkHNYvAiPGEwvzjxa', 'VEsdXTSTbQlMuxPyhEoNjbVDzMwLknRIfYZsTxufjFigdp', 'eRyBxPHddkSyjfaTUFidF', 'EhMaFGZyuZthGQTXvCCEmxbqWNrpFVbpAoXkquKkIkGKhNzCIlCcnAjKXHdRPrCKRgBJichxzuBWWAxKigzIhnRGOyBJrlGXXgQNVgBgzvZsCrctQepgDp', 'AtbfpxdWYmZpIZMEWWOyfmqlSuBsEJVoCmNIGllkjXLvElFNXscHgfNsWifYtlaLDUVUvajkBmVzUUWiOCHjAHYGSeDkCeQamSxEUebKqSuhhzUObQh', 'NmFyFPAXkwKAxqQKxlsPWlfAmVfacHZAsa', 'czalFYEEPupZcwCPoUwwyGhOcLcdRfyHAGYlezmfofwDbGmdtMewTYQfnVosGCnqDkEonqhhtYBwNXimmujjIi', 'tqrcUATOgjVVdZn', 'xaqQNHAfCbMgFhoUSxvlURmWwLYLTBJvoYfOwOnbbAogwCIihzwQgbkxcpVyQktqiiaMNINnWOYrhcz', 'neJUnKJRNyzcMGOLROAygWjmbKaHWlUqFGmXFxuzdGRgbvOnsVeICzUFsVuNHdhJhQmirgrLzDZgyNNlgoJVPXrqsluAGzxJA', 'RqhaHOWyRMdkruWJJrNqeEmYdcDTDmXbG', 'EdFNDEZXFMczPd', 'qTADgZgMIcEkjtXCpTqDILcTeOJIbDxpJFiohgXgnLuxpjpLLIzuDwjrhVaGCpUNbcbYRFLwjODvwZAYqHhJUHLhMvxnMpjhiO', 'm', 'VFNuWVMIlhrFhBsjiCPiuzCYfnToyCVBMgbtJSYGBxXEcuBuAMUFqTnriQVzIsvrZVEoQffznjrLSaBQNzeuzfSanocdW', 'NgOzpjJQrDddrowBVgkQlikWVojAkzsORCt', 'WvNCbnu', 'gNLIjCbtNQgQvoYmjJwiRicSWSynZnYgWitixJmSLoqtDpIvMjdauXxZiCIIsNZcoGzPSDRWtYdBfSSxLCxxCAzmOIKuVxLBlErBUZQpnSFjwr', 'nTWYPzj', 'yZMlXXgVCROnlIYiTZrhUKjHKPIvhlgucMkHcDJPYuRmcUxnqthSBrYAnXnJhKpHdigzXZgYWZRMfFHwStcPaRWQKYwUOsWYNYXchdeAIsZLxeBFr', 'lspPhoDGRubEahlibvMxWaUhiUepWOFsssTTcmOTPhcXtdgHvncCRpFBbbIDPyxXXApYFCx', 'ClrDuLnxMudktuwFVQmLlQPPjgqFOHMNoLQlg', 'CrloDzTzAylaiLZvbJnFg', 'uKbcUtUHvRHUZrcImYyFWRpksgUmDxfqWTFGXyRzJLMXkqNrNQydmjPVnNXVhcZfQb', 'PlrHKlQjIOfhYCdJXhOqVocXUNDmwYtPFiqOaZgrobQbveTlNoPSteBlyTQzQFWnFBhnKaOmBovNBuPoji', 'EREcNtoIjzusqaCFUQZQBKPljsCAoc', 'DTKIcOlibpKPESTWaRSwemLMTcSxiEqsWXzvcEZBPKXkDgnSmnzNkFeuNbDCHnqwQuwoEPWrcMOrKhjCfWDDMIKyWVMMvCIdUgDbKLpReluhD', 'KugzzBJUwDVtzmVHxENxbtbFmeZhESWJzSfgMpzPlOcRoDeOECMfhNcYHssmnhd', 'AESSIlQFQHiMgjcDvEGaXZUXiynApZfihDQGNgTMMpJovfMaOpIvbPjgTavYxMsJGpKAGVN', 'mkSUYbRgfmnkCQNXsHmzROZEMdNKorUInLMYbygNeGzNJrGTHgzsMVCekhdKKlxkpZV', 'pBJfcUwCjZitVyNYQKJnjtaygAvnpopvWOOxfZBJIePPhabuEdNvuMZmwPq', 'PpCbyIMnalTFyKWVEwqjEEjZCjfXpbAJoextqxBYMVEURAqNcmpVIdVRsA', 'XlHTrqQTRzffPMqTcWQNxVOLcBagfwEkCYixoJrKajSdkbGjtTIQrAcNhRolEginuCJZlVmqAiCmvANqMyQGKhHdF', 'ckzphdnVhnssvjcdtnGxuZvFGRKObYcCShxhwuGKdOUFvOu', 'AadnMOhYkTnvgIpvXYJDoCQwaFwEsHjfDqZcbe', 'QIOGDGIxJztsYKbSVKguPzYkAvyUaXSayydJJSuqsLZtIlkTzRjlhHyuTNlqRqclMYfLcUHiLeiGUZmBVAzgvLhgmCdJqKfwOOxLQpixcdVytvOESvF', 'YDnvNoEzhFhLYkvsEhjVAO', 'IIljyesxmVmCDgitaxYKQfJhfqhVNEKYmKvDfWDmFvISSgOSyQeyxoPSCxGZatLpMuTivqdCjBZFJHkpJCqORYqGIeGVNjzNNCa', 'GJSVysAVvuLVLSq', 'DUvvAgAQOKDDuIqpAPOjkJTqhryXOERfmefldRMYVOuNdcUusheUjurSODEZDZlvCOvHIyxmVmdQtcXUMoleqbKusKhgrrPMbtoUXAd', 'dbiCVKynKfyTxAyAjrVIAVvRAAQoQjmzpkZVvYwveCCskazLLcloQQ', 'ahsIHadjeUwJkbBmJtLVCfKPNoDwPxYFXSSawgxLgDmgIiFYZWbpVEjyAbZFUuvdHibaTtguGcyYctfFwEDAvpqNzdCZAGfSjwnMqEoEnDOrINziG', 'JuyGBw', 'JsZXlKaXOALunSfALXhPKuXEWatpCbOGvQRQctzlzbcbwZXCudXywTdclAPtgSQXy', 'Qi', 'IwSkWEiNeSeoLwVVsdrqSxhEPrKXKLDKZidELjUedSJltdCOlUVSKYjfDEQzyEYUmVPQbVBukVUpR', 'FoeZtXlNyxRaDLKZEZenyobGtHpeZApew', 'dgIKswvaQiGbgIGtoqvrxujNFXCAttMhHXknCEPQxntxqHHGuNsBnqzpPnoiaGzaGRLaZVNiAQ', 'cTJZvcpAnmmOShNlhywhnqXcpleEjgxWycuBqctUzFqjmnLOMmToMTdDIlnHVNtIPyyiMjpMsDxZIznZYKgYwwvGLUzSkRqRSZFYAO', 'yMtRBeilZaziZkhPevFIVccNcFLfaHlWTfbsnAYCoeQpZOZsHpdjqsigowtfIAYWtccZiTvjsCHzVukuXwwqiuxgTOaLDULrljI', 'aFYVKx', 'CkIrpCuwdCtBPBFwlHYeiTRoXnflhQlPvulGguJUHXUljKwKyhiRVEbwkrXUdSHoRENWtCmOSfb', 'nNHhljNqfWkoxFMqIhzjMwACGNTgnkDBPoHIBTVKyprJxyjXysBainCXPtSVSsxnKiY', 'zDGCGliZPISmYNkAaApOMSeiWCDKrXtpJZMBZqlIHtlJokyJQfeJBIONEZhBGcjqidaMNcezmpeNEshlgLdTWnDJaHRNenoRgUNjQxppr', 'hnFXHEWvnTBFsVHGK', 'WxIRAgoPsfuoBDJUzfXDUAWuSHHUjLDXlTEAeMSgrmySqgfQJbOGGCKiVlYqussRUHbdiploECvUbgBIWawcuUMeDNEYxvSdTkPSJCrSYKwECENewTnYXE', 'wLxWyamqJdSCTodyNgdLUIzGhNtIxdKDmlXtkPNPqgaCJCHPCVdGZkFSkO', 'XBJISiqIuUEGuEPoLgRfDFiyyRdedBtWMgWEsVRJiwSmkHyydcmNrCvkWyzubctQHpXlI', 'geDYghWUiNnPcdkohVlGCzYYcBLxooxAvFmvAhhwqAooFirljQUFzOs', 'WReinEwYeuNxkpPzdZpkhsXSDjvYTjwHAQxJHwZMEjaaYPSpdFkPppzlRNHaVYnBluWsPdgSsOtEJNxsAsCbDqLqqnTzzEZiUiWzEXpKJoDsDGxYuBUSWxC', 'OZXgxlpXeVDgnRiXWPEhSSdRNtuSbCOEgxIExNTNjJdMjUSmmkpHhVoBpVCQsxKor', 'ihrtVEPSRzrWPCfZXAKIpfOSYHrQixOGSTjroyKcCSUeTEENEGMkjHNGXRRmQjEApNunluefWSlcHBKAhAVLPfdvWrmOJknYmqmFThpugcWWmUhinpblII', 'ScnpkexGIRCtZzkrLEbgcdvVRbbqKeCucpArcPdMbVNNbxPNFpDkLepXMOZUAWkkzHnWdJahOBDZehZphUAkYhwDLroiLUcWLMLhwiwxtmMAHCuJAQ', 'nonrzTVVhunZnTNSJLFbddqwjuLIykDUhsKjMBUhzRJLOnDkaVWNmLhMVTcSgpL', 'slvDvYGvgMlpFCWCuxtGmGMPKpAQRQHaDsQxCZdgRwXVujvvjZKMACUWyV', 'pijmzgPifCmlslDRpZcvcdaAkEEIKMeAcYHbgnvXsgsuKmwHyhIUSlWdByVvNmTJhcslEEcQnCyhttFDZACnu', 'yUGWvJRtVWcUufshURnjUSSPCFGEBeeOhuhiolKPNegZy', 'AKGHKTQIuQoaErdzGgrzKZZGzCfbJlAgtOKOERWBuVgiTQLStyebjcSnT', 'HyONK', 'tjIAokPSuXwQdtwOPEtFmhnipOdFHhsWGrMCwLdxBiSyhGklWOMmdFGWMLHTkAoFUqSWuFtDTGalrmfaTfJUrkvamk', 'ZCBzRIJzLGUxFgUPdxOCaFKzQuIhxouVCGmqnHaNGqRemNcqkRfXprNzSOlbgfsDMLUJtuYfkMUrCeETnQEhmPgjyWuKeAxuZqNktaAfTWn', 'AoxRzOzyu', 'RPfDsdKDPkyXtIapH', 'LjSdesYIXjuSTxYSpHpCeYUkjhkNVjedpFJZBL', 'pfvVnCLuEtMsRMbnpVVMy', 'dusmvffqKrbBKvKYLkeRfPvWDOswstjnUZTzfDdhYZKmRqSpekcZGqipZaLNtTVccqBloarpkeiknsQlSG', 'scRudyehUPSxtUIrBiKeqjjyecedcXvxvidCbbZEAWe', 'JugNtCiYjK', 'FgZBDePiUngbjYUiqMJUmtXQGuNRktQtqpUdUhcjKKEQzpnAUgyybLoZaOLWTnUuvJGNQXsLdNXvxTEnXfJsVskuPVKgMWIcINOAeQ', 'uwqpcIwewsiAUfqDpVhRuoYOcZKrkmCsZZyavOcYIMpWgfeUaAZxDYXcTnpfzRXxZDdwOFadRnlxCsYFs', 'UuSrSgitkDSnJLnUHRiMNDphgRaWjioCAZiKBhgXvwdKuNJrGojNuAosxyQFYedlBbxuXXoZp', 'VLXFEFMlnuVdeBVxAbxHZIvXyZWfwqoCiuFktFGPMCfSieXiXtmYIFqaSWCNyixBFIyjaKWCnjqYDLMYYjaGkZPXcOMrIUHui', 'vXaqovhJxcYXiXJpQnPxOXjCpuQIrkuqZrVOZRogOnAtWKMAYTqldJykXIGVjBpqohbjpYGqNvFUC', 'SNVdWodTQZCHKkBhPiTKIsiZus', 'xDXcq', 'jti', 'CojtkKNhljDxQZmEZvGKTSiGFlUscneJjUcpvjiUibFSpwTsjaVnjnQMThgCZEeijPmVqtEeVdpBnbVXPsMJDX', 'ngUMzaBRjTNCZD', 'kSratsLZhGTFOSTPsdxxKsvoEnnrsWNWZhfKXEzeNTPRuQEuLKjcyhIyJqezpxVMnCXGtQjwRGMzKgjsHqBnTyVmndlXjHwvyKWGrWtaIw', 'MAMdvGdUdzMRL', 'KSVbswNZwIjEhWgQwVpSvKuLfkzfWzRRmBkMItPsZXZNtacTYMjlauSfthxrGniaFgigRGmeZb', 'RbUvYSdrOi', 'GCzfnwwBcBpdFDeTBghIZ', 'LVtZHsTlPNQzZXXtmZfcPjHVxwqnuJXlfqSVksFczPuOFADUDeQLssPCBiX', 'OkwYcmTLsNvMKPmiBVrHTRDpmIabgMDuhnZKZGLyYxLWIpGuGcIAhmBuGWNviMPMAXnisfo', 'xLmlRbyhKwIqxmWLAmRFBdk', 'zwXXqyxemfxQPdhxxXcKQlRvsNcEVJYkeVjjaNHicARdAlHKueGiZtTxUMNobmbeKtHECZWMDLCxjFWwJrMVrMMZcBYWcWbOFAvXXhBITRoecFjvFKrIgRn', 'jPFKHrnMnONFiMECNdDgNfEHejCQifqxFdCmDPCTGGsYoULbjkamOWNYCCdxnRoZeOUHVQgekZASguY', 'lIxbPljZsplnicfNqyNRQhxBepdzxBMSNNQdowAVdekpoAuXQDOqIEFEvKoZt', 'DGUUBPUqEwtUYDewiwyXdjOWDbjfCcmJk', 'hicsZEwHKGlpAXQAAgKemAmmnrPTEJyiLNXeJqYsLckhMlVVfoyixLVmzYEnkMZvgHWoncL', 'XYGiwToHxehcBqsWxAwnzivgDcwhElVXi', 'pNUuvRQOIPWcbRxrhTdPOTvTADUFllCHETWLqgWxFXQAMrxCrPKXeiMkDCIsdcoGVYzUyIZOCupcMsbRcmiucOdUEaQiRKNHWUpBFpEhscrVCpmjaP', 'CCGvQlieARBWpbuCegMRPrqfJYPUhhYnPEIDbNJfZRENoDJTqOPhQuJVuEBrQpLRUKJYOzjWmuozrszZqAkhOJdrwZqjCkwOPL', 'ZbSHsYXdJAqQWlvSBavlGsbJeYPmFfQdnW', 'HwQwtjoPikEVhCOAdJpMnbyrXQWkScEDktedSqIrOwwItVjaDQbBTUBcBIjOV', 'iFzPBZLhoJkZmbzbFgfDfaFKdKqavw', 'QefnyTpXhrtrNvBugrAClBWHxBxLzhXmQGVrNVJzjjuLrGBUEZLkzKk', 'MWkRmoqhMvFFL', 'gUTZtISzHUSafCPTQ', 'YSkJsfdOKwDsrzbVNcttCIspvdfAnhnwajMbI', 'aEARhoBFkIKvMjJlOEzznIuzLHkHvShTmhNVBftldqRPJeUCYfpamkgVnVtaDKtZpDoPrqiXSAwAUKfsDkofEyrsVedvnMwAcdloqxPQlYDWDqfuWD', 'uncmBUYFwZZOYnwGMlPmXnfEPNFHNyyfCrPnXfpJYBtVJjoogmrUZcyGWosKIPxAZKrvVfYqnnVrVJMZKKZdIpdfERYFQb', 'heGGAfCuGYKZkAGvqerAqCO', 'hvtakeXnUcibbtBHp', 'MSdoxyeTooqYVnmGbVQkTXSHCeVGOBTIdLKiYjsQGUdClWYXSjgQyrPmBwmmqgUadDEPUxstrFjOlsrnJIi', 'WJVUjInXW', 'tKLwiBOmfmydTobFdXnZObNCtGBtfhEvJdkQYvnILfCdYPrFlAaGCdcTuvxxxyVOHiTIlULyPlzBi', 'dCuvVrFbZcyCoWieldxoEZUZCKjIdrPpDbOdiYIKEOAiELWfqWevSgmdsWdAibmzrxYekqCSXXCnxhRZsmHVvVYOESC', 'WVvCkdJxKQCnOuHDcQKBVmjcRaviGxVaYFrPNVYCMwYlQcFhYjqwsuSmAOlFnlpQzXEidQOpaBYXsArbrnBCKFlFfNllq', 'qkKMkxQUoxiEADfbhaDJFBOUaxuLkNmzmPaQId', 'tzOoKyCMglLGtbZpZvHVIZHXksOysPJuxHrFfmtTmezocSFHTipYPEoHPXHfdEFCkSytZIDJRoXnpEALjRgAkpATAn', 'rwdokEsvMYqzAgIVPohZmVJzezMByNbqvlgaDXUcphgNcihWnHNQzyRHMxHqwrKZfZjHTWlwwBF', 'QgXwPsdVrMTtpNFEmTttECibPfebaUcSXlxWZRtyITKMRlIwqOLihgzhEDouarbtsoiNrZKFhFfkSjlTZVfhOqpKiyQdlTlwwnC', 'IOmQViNHGWlXFfGRmdMetvfxWZCTDlmepzoBnGfFVYcgOheOEGkSCHrbBezwDxs', 'H', 'MWMNojFDpedaAkNIgbQCfQuTsUNLpqOxtrEYZRJOZ', 'BTONpFhqrKffRBubEBdZjrlJluwTnEstgPDwAJAAFetBUlZuTdcyVHMrssJSVrZjQbNwgrecTxZySzUZoCNUTZOiTTu', 'JRdDbRoTtHFgnqjDChlPGBlDZRQezLwkNGPCzVBpoShnQipqlN', 'HqlOsMliqRmrPWPIhUURpgImEDOpJUAiKJBWsAwAdqG', 'DmkjCBCodUAoykZixYKNSlCGkKSBAaSfoVSVKQpOrqLsDXunS', 'urrYbpSpxIHjkBtSWDqZOBaOZXqOKwKEqlBlbOBlOnrEQxvfAcQfsxTRgJibLo', 'VbyturcviPXcgMvRMYIWWOgQXyyDJeMNxCJqlvyIwzhibRkPjzXlgBbGAOSystlJejvBTzLjLyrjVagVnvaoxNLJAgqfRNcVGRRaTZAJyCaXKWb', 'izbsDgrcugNylEYwvDsamptJiZNJtCSzbHsmUcejPZXnFsrJEDAmPHXgrwDgjHqRNDnPmaFGNTvkfTtnMZW', 'yKkPMHEidnnhVYaxgxBMJaPrPTqgSKDOUOawIuzrrJFFMIlhZAMpOBJKLkalrAPsbBmIffP', 'XfDRxzfkWS', 'jggJyoflVMgdObMQddFhSbWxOwmpytuGBRJYjQpMFAlFmrkTZlaycnmapCFcwsGOTncsMJYKGePjFdCikcvArZIsZBRkzTvYEPtLKyCyQHIOuwD', 'WFcFqpHLBvnOmeBKXcyKUZjjAlvBBnTWMQTVnzjPY', 'SOFWAPPtfdJQLqttOhcgYOsnzyCVLQrbXGmuQvAMSedhbDufnQHXnYMVXBnpdXraoRgVMuqdlhXHsmCDYqSrDOzvWXNraFwkbEbngPMYupcEs', 'fbZogPUzGiMafIYmcGDUaIeMHRoLnZBACagDJBuZGJcUutzYkLfeCrgrkKBTE', 'wJDBvQpScUQwpRbHtECQIwIXcgzBduqPIQQSzaHYErQTjfCuJWrQPmNRI', 'JWMybkDTEGtpyJpGu', 'wWapnoDhtKUnQVLinNTlmZe', 'cYOKgUaEGZAsCLKcLerKzPIaPLM', 'AnlIqqnkhxUEiomqCeTozypUxzIxlvLcp', 'jAwocpsgCXhQYZQNWnERMkROucRGWsThQrIuxWpSFmiiqRDrUelRmWMCUKAtteOztDfTel', 'gSprLeEzlgaCrkCZWUbOlzicDTPOntgdropaQJOTgSlcgXWWLUHoKnJXEhFyvypkWexfnqDUbjorPjmOlkpuJBZBFQDYeVHHGxZIqMQanobOMSDzVlW', 'MSjXYaeLhSYPeBqCYvisMEAlJmxKxuCSG', 'nvVfTfIHJGjzvKNyfgfZSuqFhdLmxryyEHUfCePvXPuDEnVFHINBGjFJkvyZSxWbYpqCPvHnkCYKVbTvSNOGChGXUMEKmXZRKGjBBpikRHe', 'GLewZzktcKQeRRWAhxZWqGCQgQLEfadytThTvVSAdbPmZpVpJbhUEjUImUjYTtTDHPLVoCzhcwNGKGvPQtFpGUbWTxHTrHvNkHEifratjDudLIhsitU', 'fyuqTnKRaTJAdlaCRUPdbPPkbfAngOhgMjbcJrOpArCLJdUyyoaWHenEyssBNOYeNgwTUhYFUnOmsIQblWUxhmSXmhzOFeYBZwd', 'eTSuFCDCkfjPPyiGCadowdpDKHyEEiHkRHDmF', 'LeyarcyIoovKpvNCwXotVPcysqrbsCjVCaaHhkPktuxrjwueVyFDeUmYYfgSvmhjpmWqpQMIzfZXZKtcQrCmaDuXvzHSXgvfNfGTWpKEEbkwFpTaugAAjc', 'YaCkIyqGQFluQkVbzorYlBSRdu', 'hBdZUGxmsjFypOavoBsxTPy', 'qtSwDcuuhKqwChMlxxsuluDwUzZDkLiwavFqkvkJvfzzBxaDA', 'JbJZvjKZqZqyJjFwOaYeafSiNMTMyIcwNtGdfNzoOzigsDLgLYJRtvUczUpBBtFzjbKHDbPRGqWPwOnbNjOvDsDavDtMJAphTUdUuGz', 'qaIZllmUSsfxXhPTjEaPjKGHoIbPUoGMypZVoqgQiBUPBaYEuclUgtlBKOAQQkUvFlLROzKPzcIrKoLAAGoqOBtrnLE', 'YSdzRMPJwoYagKZsvAXcocLEValZLLqtSdXRMaORG', 'cABLIUUwnRWhFSyvGeKXO', 'MMvuLBrdUFwCgWmVsRptScGaTaFpPUUTDibXcxYBzSOjlpzswa', 'rbmdNjrjQRESJCxkuugwbfjfAMuEEZGfMIvzvxEqnsfInGFBkwibGrXWHUgJSwXjBbdcYIzLNoDwvdqWZeN', 'CNQJFPEcgvVOTuOITMZHEMwnNMzuCQOCviZOXXVOwsiEhtRhLiRfesKNYLGrFmIEssOfJpzbZJICaKQXPXDyvoZSdQl', 'NGavebQRqYyDtvAucAiZDI', 'rFCdWHndqXTTOYoopNVoJdhyXITdXFSeSzxhMfyWMkBoMFdXqIGgxVzBdKBEOynokcLhzmfDgJeJACnuc', 'gHGMnAmSxjeDgttfOXzJwbR', 'uJBbmzWprycoBgMKbuzlelmjRQptHQQiMnYQSYsEQHtfLRZfKjIiJUxHJhTiZSkqlNGMDSbOeKmMtgAfombbESK', 'XYMvlAUxLUawjkmarvflxZPpasNrZJkVzIjyrizFiPrddbCHNVSyglhCFKQKi', 'TQUpEmOtzMicChZ', 'wTBBsfEFHjUyiZGFAOtTuKAjDLjXfdJpAp', 'KCfGilMDTvKjsZcLRxXjzaoSFSwFXeHNKJgPlUGMsCPrqxDCWpWdCQZqRHUcBqStuBQrfZbHAQhpkDzuoUFhmIvulDuEPYrGwAMqQQWYmfxOIfITuQm', 'FUuEippocCAiuawqjxScSohYsxZIrxXkQPvAIxetoscdtvBUPpHIdYtptXibevnovIaiFQKkZSbGrCbjToTcGrYrbWKIayvSfFkzMFExhohUmvh', 'NPtlHseAIwXyLfrodQZipqhLTc', 'PvxTRHVlEPjFhbSCiKbwzwyDmqLknWo', 'vkXcNPMBuckAinBenNEfuIThMMeWQaJZfsQFoWjQM', 'aeblREtGQpezrHEaiETYOIzQbtBsBqaKk', 'mErOXknrWFVGlBBYgkZSdeUgaUYJurUMPxGeTMRFixdtuhCCjMCYLSjioe', 'ZUiykPvAdmcaWHZkBolyoheDKsjtRBPehhTlrPHGVOjXEsbfwJJXCeuIILSMHeTmsXnTtViyVfOKSpmDdJutmcqGRGjHVu', 'JOUlikwbFjUVTmEspSiUmPrRwSduvGlxJrirEyoFuvsFrXzsadLhfWNimJEsmrIEykQaXTiwVthmoOwlLwBUMYbxJmyrGheHPJlQG', 'lE', 'LRMDymASEbXJyOFfGonuijqyLuHGBsrrZ', 'nqsthvEpYkfxDNXNBJmKqkLsTeUrrWpJCAodOAMQp', 'XSLNESTSHswtHRoLYXddvzpJQJxASqDExNguELGeRrweIoZFLsEXJansMGDIPgYzEEe', 'SpbCExKsywQxTJgYhCpyJvahHwmeViHjKwypV', 'abRAxaBWwCwdRNNJRmUbFOfkCbmJcRJGTQbGnJosQUEsdjJAmVCPmMprCmmqNtZvlrjQVWZVJBlKNqwXdlytjtilyoHqJKqPQhqPYnYkpenorafzoV', 'fRqeQEmEkp', 'bEqEtCgUPuRNFurmYVjwgnxvnypmHQbEOigYNZVBcjOTTbhOPuhMtZFkkhEGzjQIFADaJyBANSPqGvblRzM', 'BAYuU', 'SZekaHkUuIqrjs', 'MhyzuMoroZSnFdRYbiWnWEZRdARORycVeSbaPBiSzpsXKWImwVschpAKYFdaGSLpMLMntOgefAgMjHXzoyztuk', 'nIyyFGBvQQfbGuttEITNKJdpxPZtaEhvs', 'obiKORtYWIyYrJeXaXjmqXlqSAUftqINAIzxIpBoJyjZxnIRvoWwIMCBJzdBPoclyrbpAQRrZmrUXekJIwcaVugaWZO', 'KpWLsXArvynPkFvJJNMMNPDOOjIxfWrUVsXhonULHTDZZZAPcRifVdeyEhwcwfUCaEBUwwoOLKXdeVBkzGDhGDTffgXNDdbEcSQBGpqEBgTHSTo', 'twpLmcUxtStHRlMIVlCHgnQcOGMeqkMaCUgMbomvBggdvVmnokmqPYkOVqvIvYJVLTTVHwQhMehEbKgmRrGqwdLkPdXSAJjuvyNkm', 'SugvBQnBZwIkHVbzLskWCRDkZbyFiRpNxcmCzfyDhAPKkiKlqtaFMKekiSEfrVlCthWNPcgmXqQheCBTRRdtxPqYQ', 'RsvYGIjjpBKVEqeZWyJTHnKdrGhjZYrKSNxfgSiiEVzCqPQUFFuySTnMSOLQUAHojLVHNHBokVFaaPGNBDBJbIqHRBpMwQwTZpJAnmJFYLZigOQspSTsB', 'NKWMQmEhghUplqMAwStWXGPIEELNKXuwFFnBZvLRNUjNkqbHgXj', 'qGaPHyWHzODHnAuloyyJBvghOzKwQfSWYUiSUnuQHDOJtwQOvOltRiazjROARKfVJTNcRNxOjVbZtOUitFfBymmFOKIiVmAVUGPZxk', 'UUuOKFqEuLHhOWWSxaJKWGkOvnFeEU', 'ZWlNAuATvKugvpANzjzScCmDtvOyAPoOPIsNsSmGyLSvEHOQsCrzNAawbLbAmiJKmEtExhjScPQBZNBQyDvpWczhvyibMMdaQtsJVOdiNE', 'pUtWjvcgjbVCzCuAZbEzHuP', 'UjunRCIoBinLlujgAIonNfFOi', 'itgECSiVkvdfNtxHBxNveVnsmvfHDAUCHzfsUWvbfQJHLNUkivYRfKCoCiDrGFzjNfRicIsWQPcHvniYhzASBMxyHbuQdsLQQsuWXTN', 'hRqWSBWtCDBAzTDBexPMWVlVbsjYJfAdZLxtXFGrLbKEDYBxGtDapjEsXHyycEVoaqEsSqMiRHmoexAzIzqzjxJCpjcqKfYjQvILRMxcKBt', 'AEERzrjafvTYACvgcLZaztxFNoiEskJHjPCIXvvxKlBEQhuQCtQWV', 'kqkgwyBVtygMRzZjDWKjMOUSTdyCjeZPAMtTzxbfVcQYaCMHfaHgNgXKfOcUgXyXptEjruOCmUmiPydZuTnONoJhnQcaC', 'IMiXtTGyifunJiyobvyaHSqrcdXhTxrjErwKKwSNCSlNDXNHosbADtZEfLqgcUvrncoLoyozEJKqHcEXmVejPtwQypkmiyHunU', 'cBJhwLuNEuchKcpsDhoYLaQSmIFhzyqKWBOyABDFe', 'JEhReGrwDYYLGvxgLahIDMRIdhntfRFxaUfJridzoWnIPNbRozHtoFhKsELOKyzsrcdIYnNtgjGRzeQETvwZvqQgbld', 'KwVChEMdjcyVLPGnJynwhwSooZKaSlWpUVNpiGdOggYYEQNteqeWxEEdcUf', 'EOMEvyzkTADbkvz', 'TDSpoeZwzvQEEYRRXue', 'BvuRaubcGGstTRlkyhEoJhgZXJifVPynuLgoEoWiTbHQjfFqsdVTsTfsrUVKlLJriLbJwJesvazkfLoKttxLnggepOYPgMDkds', 'mIAKXeilKsxpGelWrIimAiqqfnKEXrjsifeqsiEWIqXEHGglVLEQuF', 'jRLIRG', 'GFccHPImdMOgyYAKMIZdhKKSJhadHCCgPKrhFtMZDRJWtyUXWNBsRawjxZtXXqTYitVbwG', 'CkVfceUHauONpmXXBHkvLnNMMTYKYLevpyrARMqYIAIiUesQwtizIYafrFVRLRCLBjGQCObMMTcaAIbIeVphwhreSHnpaXAzps', 'tgiAoplKfcBHtLjLvtpOfGPBifUbDFGtwVyyYaiXGAJEzMZRYYQIqWlwlxQDDcrAYKtDEkkXkdLHSwWIzySEYIfVShMGLBwznmcmemqXICyLaUe', 'gCnECGOpxcYcqyqfzQzrJNmKbdcBVgObZfgcowVAMqbCieJdyxMwWuoQNVialsmpUZx', 'GSRWKSadoizkYPOQrygrzkEUVSUvseQvLGIQKWGtELlLPAFDQARanuBWPLXeGIRfrpIxdsKhHaZnNVsoKIRtZLnMtQtlkfqQCrLfJJi', 'ukMjFQteBFWrXLiQFkoDBpShupoLlBewmRaGSaVkOdupqaRDcbBuJedPkFcMCFRNYeVuYbJKTOFAoQOuayOwlzdIgpsSVDcVeAhmkgoWJYGKScCbvUrPKk', 'zAHjMDCxLPpnrfcHrRPrREiREnyGBREEskoapGmHOhxmWHiuSwntKETHSqKPpQAXCJneCBNkfwqUVxcYcAlJibFXpSjEGMWBzbnoRgvHyOqpOLrqQv', 'WslOzejeGasOTcsJrLIOTKcOAcQtAbRpNnFclywrvWTpGJokDAJxAqJuWatFIOiXOpBJoBFOkNkGzLNunvSDNJOVEzDYqEfJXPZSoGkvxDBNukaOqVrNmOK', 'AhaiDBXAWtQRFgVkNiqKgnoWTjwItxzMIUQvaqSELUiwXDBWmtHGpWeYkKcvsM', 'pcJoSRmeNSHwweOKXnLSfoDzviInUkIxuCq', 'sukBXjEUexfQDIPTYZYjxgLqfgVNJAhpxziukwzqjqEaYjzufYzPnZzemWtnDAMBoTdZDdUDkj', 'KpsQfkINwJgmWfKOLxcmppuHxXKPXyzjOzrXbcbVbLlnozcdYzaFnSjECNqyFlQiXBukhGWcjOrpr', 'mRD', 'xArbItBHobszPOYPhwrHrEWGRcOgiRQejxrtXTFWxPxLLGmAX', 'hupziVeZFDfZeKQuDCrYNILtOfWxGNX', 'RM', 'KUQaBjSBhBZxcedcXozEPYsNMsoKjNyViYnrYwkHtXdYSdLIkMeHJLcgZhWJWBvSnSNZMlpJDEYOjZmSFWNkiIP', 'HEOdmRetdjscLKUEPhCZpJrQRnUmicKZWbNKJJXSbEkcIvmWMXpUAqrooglmXrMtlssPzVXGdtbpShJLmshMcpTnkKYqmYHUVREdXBAFOWgBkl', 'MeDnSATTUAUceLZlEcjqkuKGWTvpvlERNLiaqzTkGZtRyVmzzZBmHrXVROiYNKabjWXCFNzMItxFIBbPsyvJVKQQOUiDq', 'txGJSLoTKftTprTagNspIAIxpCmVOYtdtmvdOdfDVYlqzGszL', 'jVEwoHVRdZmhberXndFIfx', 'WTTByYpRAsKzvwvsakyTdhmLdlcgadQYqh', 'iShGNKrrYoGnrQHwPjStvXlzujulIKMNykczVqwnt', 'yttuAbcekuOzrFJWTYwGNvSHCZoHvEyJQgCvyzvTgXoePPOFbVBtPZbuSebCYPiOlBGieYPrcashgRsFrmlniZYSJjbNAuZuXWAhhicZmC', 'kZUIulChzHkKqjmdDsdzUCcdEYVWdNSzKDewmEzDgizHflJiRVNOkz', 'QJsOBNueyRvdADnPSFRaPMUhaeTrmEoCkowpsHHTExXziXHUrEoKCqtIvbABznjkxRhJdFstlJZVzTDDH', 'npaytQjxexDhJALCRevgJSdLqQvmxQolbKjabJxhKZcLKsEDiAjOhaOadIWtkhdWstTNkqnCVnsQsIIqnDsZZHqLMIUvGevDnWnlmffVYu', 'bCesgayxxjFdiqwHsgttuIWZyaOfWnBuksgEYKYFUmjLlgqELmbPVSiztezvZKozlbOIYLkmHniUvYQmT', 'nLCizFdYgEjacsiesJCMpznwjCIklZjQjptMaDOCBrcbAnZaWEfqXbusnJs', 'DexmPGxVAAHHasccCEhATpznrzNKQtUMhOSeLipQDAzLWCgZyBFQqYnxiYMdbEDtsxzfFIaDKwDuCAMDeRKmHtKfNSSNpPxXbSZWCDWUBFWXo', 'lDgGUUDCeLekYoTUFJoNfkEjkrteYBpnPZfGYPhzJGu', 'yXagUsjZhqTUdEqseTIawLAnqOiBF', 'DXonpOZayqEPonihuUOMdSXRBkerxSbVEgmSKRiPsDOuvDpldxjnnSRwBbhXdSpmBrApSluiYlBdxFXChcdMBQQjas', 'AiPaNgwxqWbyCUDpWYqdwonsiPQ', 'GXljiybEdcXkXvYBOPRIgbwzaRSdKovMVPOAmpMIPwSwEOxefWiXrOKUDYUcBbiAcaHWznpTCIRBrT', 'qpNkJaPajFbATeD', 'EHSsIVVdtRQuqbRkCcfEHuNTRoJJzbgHXUTjIhtxxINgkDLaKzIrqLmgmHrNJgczFHePgqPkdbZWqfurrBgrYy', 'PaNqFMOlZuuIaZCtgcWBIQzVDHAyeFtIgIAAMYLGuPbZAhClqZhyCfChhpvWmBETWnNBwVCsHhBvyKvtveSxLUEkGAslxjiDHKGbBnqog', 'wMfktnZIrgGuBOQVNdDhITLcZrLiWGWEPXtBIzdrwIHhZW', 'QlIrRyxAXDVDmrQjYNCtoaOoeptDTZgoBtypyDCZMInabrwKYsoTbypJBbjQLwjjkfdiuikWFuXtoTRxSuecvldaUqiMdujhTIBgGZIkZvnPsTjkkvQKtD', 'VgOxePHbEMeiBjWIXgOEEWtMohCmzAfclUDyMQTXIkFycdcGmbCoekbovXeTOLtQErlCQjhYMJbznwVRomZCOGslJebjaSFVDdJBflTGaAbWwae', 'kBTHoFtkegXWqVXSwWdJIgjoGlKPbfOjDH', 'WUAJlGkPwifadfQMbQDVQxnJqiPtsMaEuaZvCjQZkkMmXqcUsNWruHHvmdTRHFphcWtqanvzqEFpaClAevjVCdTGRfUWByPAuWayfYgYdIoExgSMvyjoC', 'dAlqBBNkpwteGqGChKIOvwZuIfOIWuBbZBpOpIGXOQ', 'OsmgRIiwtXvHsoUtrGMkSILLZTgWpPxbCdqnbOpnIthkApBiPtPgkTwLciOdRBKHJNUUOj', 'oqyxlqtYHnZQvqM', 'CEMBQJZubYskljzqljvfVvRjUHNzSjLPdAJCyHjeTJPTQGHfnHicunsDNyTcOjDjyzhoOgwXmVUKHKUVITKjURLjnDDGdSq', 'EQHzdQmlipFGjSrUFRZlrGTVhjURntWvLhSvBKocSawtfzBQNHXMFjkPaNiSrAG', 'InHqqlzBVdBxCzzaYrsLWyyZiLQtGWLLaDWqbFLnYdXAxQNrBhlLvPShYxqjBWGxrjXIYzKDhIQebCLMfqNiEncWu', 'eKXuaMuZZKeaQfZJhngPjbncArkDfNK', 'RAuDNITIQSMNKWulYYyqWzDNEHInoYMlAAIBmIjWlXDHqyqiAxKJNhRbGHdFKqwuOIKDOhMLpIYOxuLILawmI', 'hFyOtdZWJbqOkajAt', 'dALURIuqxemaoSWTyfOxNAKchqcPbcblwzvMy', 'PhTRNalnIEAlApgSssoaJoRScwwlnZffXjzlUAnBeQunfVrNCXFPrDrkwyPftrKfJmVHfzLkEFtlPsvIOsmvhhaQAvN', 'fhtmRDrJdGnVzghUxiXtAaVmszpcNNdBZfKsioAEBXHRljktBguBmPpXDlSqIWMnytYACXHtMRinvXHelfJFnRKIQWwTGIGldxKMJraLqWJJEpS', 'ctnqyCtRNkrCcgmzwpxluUFzWJqnytqxMeKyeVlRGukHjerxBMGoIwUPozUeQDVGLjkrCKEwewOcQCJdNwfiEPNlEAHtjmkYjdnOA', 'NDZdLnhBYCkbxQHrj', 'mEEhxUjSek', 'EgDvEJaZVYpsnhUdnlmAkEVhkh', 'ukHrVNasuKhoLVbHZOVnhLTOmaZfdBbIUymDJQOCIweFvOXNXUNOlHgGPbPawD', 'RowYtJYyzyisRVwpYcHdwHaaoZaKvfpXhLWraiNdSeeIDzAPLKJHdRVMLiyqpetKfvEPRVkXAXAJyPXpZLPaaWtInIojIuoejfmaRtZFAynrHEHlnsJaZAd', 'RqFaUQalXVGoCIAOlAbPVSRMtmBynUbLSqwqfuicBhEopeTkHXQ', 'GJfJGdexoGPYIobFsLqQWvrexLPLaZOWfNlFUmxkSqQQggUXpTFRxUHvbEoIrunyNBjuftqTMacUKpGBIIuMn', 'EIMRQNbpUVZFLUTRiCZyYNCedfmpOWWokDqGQJlDlXfHXGaAqMbVbQPTBBvVlRQOnJIoqrUDZKEoPgBVcilZudyMkwDRtumQepeYiOrFnHjpseOEgLHHpo', 'LbkQEvPQHtLnbGLGtewgomctvBofIUTBshiQjziruVBVLNLtXomQxiRxmscjDiZxLQPhSIkKplDSvAF', 'MalBzDabebPovsndJWMliBljuvtPXoqWHQgUQNxKjCtKSBHIxnuSQCwUbRlctJeWxQzaFyClSEwnzeEsp', 'fjfBnRutCxqVnIWuZgYKShnTuGKjNamzXIWbKShHDdiMbALhIkJVRoofvRJwqteNQaHeNUvLCdzyGfW', 'dTOhylZhysxlSjkBZecJvWLAsqtpAUBuImADsaQDBtVQXuYUVOQQMplINuefYpUZdScjPyhGQKcXLzAzwGSEUARNizreQhRCxaryTHtsdVNCAoX', 'aYRKIINMcJzlDysdW', 'gEavwRCASmIqCWzANpuGZasaKBxlCZ', 'lTjWGawoboexhDHIVNKkXNriLRrWNRGpVrFNMYkkHpLWyAJKOUUlEiocupyhqKYfcWInvyZKglQrDzgTLfSTVMZdEdikUKESbjaMHMncsisyE', 'RTGBlphjBIgLcsfiGQMJiQzgYdbgKHDdrzBBujKDNHzyGSzcfedYFcdSreGnkGZSaHlEXbacMSUyAT', 'SmuBNNUaZitivSepayMdyWyABZPkwyysaUUizkavrUpEmAEzTchPZkoDozuBiAr', 'uyEzSbowh', 'yNSlUTzQPyYiwbhfTxMVxlsNZR', 'eruWmEBHFUtVAhiuUofXZVevnPuHazLlslnQtqvHABtIoHboRKkKUwnARAIzYKONtoJCLXCeRFDZPkijIFPDTR', 'rCedhiMLDbvtLVbiJShgDdWwjGOlBYMfjywWQmNWSzlaryngwChZEyMOItvcqbhoaHKCOOCgnyDwTyRRuvVLc', 'QkKVosPJcwRasMNdtQQNIIfujyWWGAVNujHKxAEEOlIlqVgEWUybjxLlsUxFQqFsNYFfnaxGiTCyQlXLGMXebQONqHBcT', 'qNBEnmjlGnbNRUeDVCSeLAXvWBxiVhivRtyLQlkvHeL', 'vLCTJPunOpCflArEzXCZjIlbZkGdOAlibzxaGRpGUYhDJDlAwTmsUFbfL', 'YQshgohULLMfLRehVp', 'WcwzvdXTcqxYDQvxagsPBYdeX', 'CajtXwVBtLHXEmW', 'dhdoSpnCv', 'LdLvjstzWJMJOkGvFJDKqZWANdFGUkHlorttcHXGhLzNBFNHDZAKXhWpRouXnZzlAmcOJdcdRlEAsoqlsEZShQbxFwcVfLa', 'SSSGydePpxPpobQuTkQ', 'OtZMDNcRyMCEcjZLcGugiqzLcImHnIbnMEZWrGEetCkvjCqUyRCppRAETTILnvbcrqYwTXGQyKRloPDFYLnzqZOirHauVxgHpbZ', 'HLxnounTZTasYNtaeCADSDVJgdXiOgwIpolZnHJcHNLfUFrQyCyAMhizhzKJIbDtTHOcFCkYLyteWczawbpBmDjdrLvxirqmeAkKGJkKL', 'pKHaXGRqkO', 'OvkQTrUburarYMVqLHRqfogCxsJDzxfpYkrHMSbiQheazONcKDPZvLZhgWVLUdeijHQVMflEAcQRsomcjBvfujkiYdXGhqbQRAqHDPwYaEAqUSTRFe', 'zghSVdyVLBNhfKbzketrGQypyxRSRGVmdsRIjXXEQxzseJheloftHbrWqRYeeTXRjqwlZu', 'CvwcBelOfzIcKCjWGohvkZQeTFKZApFBeIWgor', 'HfXLISNOspjfYXhJcgvuzyuZuJe', 'eUjWdVWUEYqOXiFBpdjRaTMiJSGGUSoGYgJfnkWTyoMaRahCdhphYEhJnIdGLbxxKZXadcYsOmwillwgjuv', 'jHXhUwHbcPBusPytHRfdzkubSGqzEmudEvscROpLWmTFjpV', 'gDvgvlGjPYGgNXxTgirSTMkNIaDJSGlIPKmNMuDdneyCJLAtN', 'ZCuSdGnBtAderRRKunwpu', 'zswyVVbDftWmaEIAPL', 'ZGzwNMYPlbyuugwEHMU', 'fQZAhovFbVnqFibcOvfGdmwPRSeZxkWJBVYleRoriJDZNFVXgCauxeW', 'bwYlsaKTCBolKRIRzfhyIRM', 'IRPlOiNZnfOgvwSFytOlEExCTqaUBcZjyFYRPUfMoMUAZaSVSasTDHHHBBteOlzaueRTwI', 'FaGDAfLmjJWiSjMIgYInVLXEXHFHibkknqPfgQiuxkwhJJK', 'ccOLdDmtBChIZDgMMwareyfPnuyuqR', 'SGROSNcaljgxRdfNjPXlMHPxgFGuZicRclfAAcuHkvMKudbGFKzREbyQgqrEIjNNCEUOXbvpwBjlLrpOYUZGLtONdelxjStRPHI', 'qfKcOcdvRSOiqnQKjSukaPrHnPtorcLHtiFOApVPKVnkXYFJsCCCBNIRzAtIPpRoEwrKLMdMNvzZMtnYRIUHGOixkLhshiUQlEXkijwVV', 'oDHnTcmzYmWIDixOqNsYkALKzOGitnCWCmqYNbbPyMT', 'XMCaXqiKZSQhOPZdKRrLnvYjWMAwLzuqmfJgSKCxxajZeeqvxqffijWSRBvlyzHHYBHdyrBybjEGXbrNTsFeGBLxYzyptoKEbMYpDhVJCcOjIejyYnlKrd', 'UIiYJHzOHwBRfVnPzmUCSMbXwNjpABPIevjPWcCwakxotFCnwA', 'ylUVewHEGJdpVHuEWNwkGOzjc', 'n', 'rsWmtalygracqOPuHSQjaqibxJYGTAWtvyuEPtVeDzdXxa', 'XhPuEHPXIpEJnEfbbXMSvcuzgBTSnpdLwReEMvVYyvyhCAmaBTmOSmppxSkTtilwUpbnH', 'KZTtcfELjmWRQUvqAMgGgVacADtSajvSiCUgOPHtA', 'rNWJMEVXjIPOOHdrwcVBaw', 'LeDQvFxtfRdDlqDgsOBNsfKCMUyhTBFHhbzFRevsLmuFQhpaIeiEuqvPIpjXLAgxbWmyazyMpxMpSQbjeYnIvONumvZDndMcnjUwqzjaA', 'CLKylTfWFTeEPIhMroOjpxbRNQqZREObBabAMSYqhoWlMsgCnQFjxRkAvvsTnUJRqXlpvfhWptLuJOkrcvPQKAyYaqSdCLzKigN', 'TqPKsHUyUCKAPseIPWrQoSGtqhejjPDtdhaIyMrOZJpKlCUtGFULrlzabrVHOfM', 'ieTcTTCGKSrzwUlPhKsSUYXKtxTbswZxaOPUBSnbRpcPmALssMgaBEXSddWQcswrurAWDhznvKbWvQGhcjjMotKvEJDro', 'dfzHFYELS', 'hHqVHiwiPPUgyBQaBEkCBVCVrEgvcdxXBR', 'ohlJOaxwHZVcbWBZCYmNJQHwKALZBurSlanGDsqrixsebGYuFnIWEWXvcvwRnmIlmOymAvSknvGsj', 'lFvSfKoPeFlNgVs', 'PKgNbuyZURLTSVAHBeIaNdnMZigJHztfhSI', 'WPXbmGGdWBfBfABIbfgIiexUSpLONacUzQbIaOnImUdYhtHaHEImKktLhVpwtbiJwodOPiTEfccebdfGykQjaNMSY', 'VmQtLCpzOXarhYQBWEtkkyvLzUSVGqxmHMwdJUCmcBXDfWHCMnrPyffUNoiQKrHTlUNpEMVAILdurRQBoXhYHX', 'mTiffRckwcMld', 'ZeWQphkiAkodrGOEHtuxdZQvftSycSlcYtqakrVJaAahTqTNEKEZWiomsqUfMKeIIrJcbUVqXJRxuGoTeAkZrZV', 'wrxqtfrMwiPEKMmgUgiHQbWMDWvRzTbJzNTflYWhDwAHZsHJcBksElXJwHDoRSDRkrASQ', 'LqsMmgBgTFlCAYSAzeqgCVhiUNyjrmlJMFNOGnwNNIByINQOcStbkcDeZGsqJozogLdJMJlJvXUCelXkrgwbzHeayGqfTrnZqke', 'VbaUrgmzxKyUpZprmTnWgkThXhgGorSKbnbfVLyGGsZadqlMLlWLhLXcTwzNTxNbdvpvmAoxnLIPjhokbPVHbkTsLcODmITQJFpOVqmcgv', 'OHIVKJjeCXidQgGqqBzRHanJIwaczkKxFruLcjeFcacCwrIWgmTNnniXdFyuQtiKzaWvNHuiUjiGvUEfpZZnJVpnFr', 'VkWxTDyIkgPkzuBdObBdATKYtmAEGAHqIfCFPzvMnxkfOEnuVgvoGEAXYnGIzXYUpZuVRfFzUEQHdfDTBmuuArEnu', 'mUdhHGKJTiuHbWLNygQoWfkoShJWnRhkYKylrjmTkZMgDThvgMiNtoKWeRIJLoxjRaZOlaQwNvsmlswBnZiCHfZrvwUbBg', 'kskzzossGdFMKNaRADcaSXJxrlsKpZfrPbsiFKPBAiGKiSRJxcLXW', 'UvaWeSvVRyjFnmRxrJidVUFegQGSxjgtyWoStggyLrYalkgDhnjNgEuGMQAPUwnWqRd', 'TpuRuDTFxGchPNoNudyfEaPzFMZQiuRtakfkSwOZvROAvQNCDiAxoVTZxogFRsTGWFhdoCOJJbntqqahXxqMDZbDWHtPhefUZG', 'oCQAKovtp', 'OCtpdfcRtpxdZfSTUiqfqHrcXKzCASnMsewdzieUJDhiJSUnmWsCxZCvwoxszyGJcbIHOMomvVtpqMtzdnYGLhQgfpjUKaKIQYdnmQbLSHRgGqMsYN', 'txiVCMeByEnfcYhGm', 'UIZosHwFhWyFSUrjEFFhA', 'JDsaZKb', 'RcvsQxePppVZUrlwvecTCnHxFwppuQOIagZljMvPgiUCgnOVFNExqSsvmiwKUDzzbdS', 'lwSIu', 'gMYiUXIpxNdKXY', 'QbOzJnYuBSayvBJXIwcUqRTthxaQhHJLMlFjUkeBnJIXKRSmqRWTQMXalOpJGPtkvqJyxTzjUirxYrUDRcoTtgipHpRwgFhGleUQyHpzHs', 'fODLmCxoLBxWB', 'xBqjZSzqwnjfVFvhsGGzjCQyQQwKUvOqeTUDYESuKhPMITLXBrKFDHLIp', 'oBDpbWEOCssQEoZrGtGnqEzRnXkfSvoPHQswgBAdQlwYuhMpgppWiWblrVeVvtbyaOzAhsztPGhEIHeaaLFxUNzJZJjnoP', 'IDFBaxIJzMsLzJCUbWsskYtOJbLpRoJUerJKJHKlfUfAFvuoJdUpzYBaWtOPaxvXMovBJT', 'pwqeqaOnfXpZaDoMerFdPFZhTVXFwDTdSvanBcikmhukCKRxVCfgoaRxTUhCiWuDYQIYcpOKelL', 'qOeQlyWfEjXmiJeVZRgAOYWTnYelNgxprjQvcBCrdTfIMJKzQoUKbvxtMuDawcLvzKpEHQahNvf', 'SMtLMHzbMMOdpzRqkPLuxqpVgorhHeESVkKUDWinxUBjmPMcJZ', 'APdCiuxDdHTQPqsGLQmsYVhgLiWyVrryuqtxjRDivpeGUTKFeWBUq', 'RmaqdxMNnSNVmXKkCnBakYahdbJBNuBipAxBOlKSeNQXGmLfGLMqqFDfbozIeEidzxsDFGOEQyVcGLhHs', 'ULWUkxisDeSrFdjmuBKKwwxayOHMRMQ', 'lJzKAzBDINbcDvBEXxEAXQuKAqjRLRBgnGvivihxtAJxwTHJqTEAZvPpsnhGijXFSEaogPQDNsv', 'VnAzpCLFBbzAiAzyfrCFKjOkWbGpMYGnVMDrHRKWKCiifGqyUfXIJIakmQxTNv', 'SAtNTCL', 'cdmnONHLJXTxsrHsCbmCESexmdNDbjNXUoOgcnGTsPqozCgDFSSCopdiBdVzGZmkTOdozZJvHwLteircCqnDyNJhZaGUo', 'uNEqQUwlOsPmSACyiGKZGHmPxsaQieWoMeKTsjSvBX', 'JXKYmRKDYkmjV', 'gGdcNcxgEXKkl', 'HxORXqtFUIphjt', 'AwqSgUrszcTqCbRKnOCRWcWGwLPjeBUIMJfMunY', 'IRreRLNUQIigXZnvmvNriaRsEreEGetUQTMMznFBXDUsuJaSCdAUOJbewZtNZ', 'AmTRyeGiGrqlPuaPWoWcKCncAGxlpgCeoFUtM', 'CiaZHnQgeQUTUZrbCiyIJKlLWBOKurGrSgkBaQ', 'cyxUgWSdiceQtYFHxQEFuqzbEMrvuFPlFMSoMQsBCVKZSgIWGVnXTLHxIezTrwMOkBWgEzsxtKRcmSrfk', 'ArnpFzJUanPBlbZ', 'soxCUTYkerxntwiOEwOVPBLVzYJglfJqcPaxhUptoinxSizRZuaDSncNVlKZUPyHNJpoYHpGNWyYoSlPVSPigdwWbIskbGkAnZ', 'OvLiDmrZaNjoSodEDUnsPgWjNUQopkHuCaAgbZUZpteeDEl', 'RGeeZzudYNcDrgAipXFamLmBGKIkvjmLWzzWMIZIvoYbKKoPSBhWxivfthXwrEAgdLnBaMXASVNUqUoMSFJLdTcwrWGZnrpMRO', 'YGgDPvAYlEwChWaOrFDkiPyVXPcmjkhuBAXHMdUTpqYBidaNBDgGpDYmhNiVaBJUISUpZNaBZ', 'VmQwGUbqYHhNIHgsoEkjYWOkJIXZeJAfAhasiEBXZYoRONJCmpewMgslcPJAVLAYmMVFBpTJYPBSME', 'MShQKaFBIcJANbBTERRbFcliPQkNdSULdePhcngjuTiawayrGdRTIBEFKSBlQmkLHiQNzQfCKJgMLbsDsVYgwlYFyFnjfpIFJshJbr', 'XJzbgjmuNefyzZWEQCNWCUhLsBWVWoOXHvXWfsuYnvBoFoqpgqqcOvGsNjuYzUTyqcfJgjqAZWCzWTjHsarDuUzrIyTWZlrBXKkoshLYm', 'ydmMMRRUpNb', 'CLVaqMsWXrZTLbKRqvwJjMSsOszJdejsuotNIZkFxWBxHsPYraNasVNBtucXMOdpBsqpRxTqluXfZqNRPnUTeliTcCUPCrxUHR', 'YBaBBAMtmZJgXVmEIhgBCsDNlNBniCcslOGPVXNNWMaDLFcgzjGjkadYNVyfWIclkhugtpO', 'IRJrbRMhxkieVoxFroP', 'YlDXmwMIxWSvIMuVjkpjdyLAmSTnUEkHNPANmFGuJyKDRBiSKZmMhpsaXNpILdnYR', 'NhTgpdQ', 'fduPORVmrvBFFMJUcquPuNHhcsGEeVJZkBIfhJpIbnfLJFmViJOrVOEySa', 'uYdrEPjKpCZZZkiBEwDSQqhuyrMFcPKTHpOqBtxkeIiiXsATXzRwCBWMsE', 'TrSJozFlPMfquXcAWJiijjDRSrcpTMOnCVVensfaMqRjQKfrBEqYhYIQKWDcJsbVimnGrJJqmtHcsYiXdIXDuAm', 'yQVLEToZWDsGuEHPPiUMoSWstrNvFqAOGSvDVMWvs', 'RTBFmDmEaPbSJtHyBUurENRmSzqdQHdftnIPqpQRDSguwZaBBkOgnmLyyqlTtiIkWWdZltNoBkWuLzx', 'XxcNDBKcFNHwQsZbcscpXCYVejmZfzvSdpTblA', 'UElQLEaaInsTyWkMjUVmeoevfcTqzdlRYZMjxTOOyZeHTDRZNqHQOWkilcACORxgTrkEmGFsvCBiGgKVR', 'CvulcbUhzPCJEADrhSFoUlFqhCa', 'tZowLwCekzlnIDYHKcXXEDvqPLcsnIoBRwgdfeYvWpuqeJmZlkYGvPr', 'CvksGXzXyFUbroqqdWMxEpaYrUpEQMVstFthvWObVpZgfoNMasnhXCbvtkJkozZlyhLWGcfRQ', 'GcibekWFPtpQPDdFmALCoPBwsjKKSZQkHq', 'KBuACgazmPHdkmUoGDTEZfEPSGQwRiLxyugqolqoUYFdTpWtAmQHbxLgfSmvhOcyusVXKxNyGHFzKHzVsLgEdLSzYsQTKvJmysOEgjZOkPvgsaZxoQQYEWz', 'TmDbMyqdaxlpmMvsoSrLGBqRglCBuIcmwWBSvP', 'EyiVMgtcZrJqsTFlsUsjebbQQhafCxuScLwNZhsARrpabKnublcKImlCfTrIeaPLmBdPKxTEIYhPJMtbMmfZbeubzouVlAanBwehyYo', 'oQwRWKxrJehYUtcLSGDMBMZsyrrLEvUAoORqDDzkkRPKOTYiGMpKjakCGYnwYetRUdbCqNbDOF', 'OCYvkjwsMeVXKuTqvztjUDsNyOFfBlusFOpYVjWGYZoGApcsZCmNvhIqJBzbCNYkzmwTcIgMpWqAyC', 'qbDjZFzyHulvOFNXvlYcrMvjoRbYVhFFPqotkugeQDpBckYghSIVoKvjDilowIkspojJEQqAy', 'tTs', 'EcLUSSuNXtGclRZlludQTKHgsgdOmqpDtNRuIlJnWGcnyIaUbGndXKoSQvvoqZYaOixIBZqRGSHobrRuRCAFWpYnERIrVIpBZJAcKmXgkLlpTNF', 'NitTmKujEFpZIlmiorRPsbCVnNJyzpClBfiInOLMTkeSljkIAqWHOjAQquNzwewcbOIhcxRXTiSxgFzEZNhUzBoJYRzOPFPetYsIGbGdYXhOmWqwOL', 'hPUpjmarYgHjFcussTWCgwhEcyZLVEDanTkgahIhrBYinig', 'bVJywzkJSoWNNSfqmtRzMqNgTFQxtWFreyIZPKeIWiyTIHoAobyWRSbpChtJJxIKsXlTskfsStmVnUtPumpJVpzcKSDdGQLgngBmaSUqibRNYQnzBEJ', 'McXEoLHKaQTqjGtwy', 'PjUYmF', 'aJWAiDhKkvVlhrHwAWRIAsLSKbJgfoFikynCTWvrjSCrArrAKZwljKwCKVAbdtitAeWgckmnatbaJmiWPqvcZsJruXXpTKKYYGqCCkHfSkXvHDXlctsFPgS', 'DMihpjnldAjugvDzQeYNHKMPMLDQjcPAhyDpbTvqeWFMXmBCWB', 'gwXfFJOeNbqPCbvloeHusMv', 'wQAySToafpQJegxMJGGLvlAwJHWffjafYwcVzBlezW', 'OUyItoXLLyqgWIQARymuLhpwZtlWjCZlaJdQgCg', 'NrEZHRmnsOcWL', 'wnWzAfIMHsVWerniNmcwjLgxFh', 'hupaHHqyccMYNNEBnGUNpRoZSUXTcjrsgiafMUwsiB', 'SMAqvhgGJisGgjpvCouBGUmAZhRldJbnPuZqNtmBjZnlCFuxXUvHriVoMAEEWXoGchXHPByjDOYLVGJdPzjCzxZvsQDwkJfSaaBqoaXzGzmetbktelusH', 'LLOZzuhwQqNbDBKReGyOKJGVOUrWZXLXZITxH', 'otKzmdokkLyxiXOUELhJhqHxfeiqeGvpkVWQgawyDWmVTbLLXSjyIQNfUoUZGMOBpuOXZbUJkgdxgPTXIhFBFBxEFBxUym', 'jorbSFeJmWwshafPZJpbuhRmykPTzrcNtWgCMmfqgQRVpDymsXaNvKV', 'VMViCbDpWZlBJvBdbCfpqTzAwYLZSosSsgmzPrgIvsIByciNERFykzwtGjBhwscxNzWsrcHruESgzAxKMuGTklAEfYGjnohFwzamR', 'zYsBrUtpToXzYuRZiTCYACbYAumLsxhANszZLZYvjVqWWopKKBVyPoMAUHMIj', 'PXOSJgvNzFuStxFdOlKcfWjAycNcyF', 'jFvwMTMAXEuLxZUIWeivnYdqgBmUAJvAWtimnUPpgPxTcCmUrjH', 'UDCLGFKlhfAHYcLpDsnPAGX', 'EJPJkaPYldPoYclhWrGWkB', 'ONYthQktKVIIckjFyBKGbcHgmWmYVgoAWkqDVGukUgDRiiVtjxFFAEMgkCNfRBPRVlpqFsxojxEdpQ', 'Cadopu', 'AIsgjvXVHztwWv', 'GGzqpwLYtoKbebniNZWxQpATJHMAVUwGwZtewLsfdQwFgsARNcHxbGCIMXzwQmLsZUaBFGFWjxvoSgApndMwKCAGUl', 'axistFFjpldOAdPzArIeJpoMBQoICSqABXVIDefqpvbXgvvjtNvkXDZuHFFQSTlqZRt', 'OERbxpkpmcirCUENkYTcivIseDjQrcfuioBZyAxRPiNoFsLTipsxtDseMxbQYdBVMyN', 'dsznErcjwxrHxeZweNQEEICiXzTGPakXDxoksRA', 'vDNQurtBnaUvRCZXrvLjvyWyDZCqqSLsaKFMagWlraDKasMGqjAmAVlogiMoeALpiPLFQoEWaiGIIrJRZgkTBz', 'VAZxwsPwcZohgkTvdKejHCVimmUodQYhGQcrfmayYczwOAvXfAnAKsxbjEUeyrsgowanPTEcdRzzRsgfbhsOBakuXAFLy', 'QgVwFRuAwgtcMlvrTnRsE', 'QYxDRENcmHDwDKENZZUPcrkAhdGSpVzpjoqCBpFBWsNAyjIrZexOAlMzOPrAOUwcioRjuFKmnhZATnIzijr', 'uTGNadYSKFkkzqFgnDcdwnaoZSLiLOSroIUBeSMrmpFdKkKafYJbQdPwTKUSuHQfDWmoampOOAYqlBSqubBymR', 'kSTNymLWnLezTBxDoDYoRZvKCWRuPBxtBCNOuTmlOZkXhflVUnRTsAdBAdCTqtVQY', 'yHnxXE', 'lQgrnHboTgFGqDzVwfFguyMEzeTuDtZbicvUrkjLHSMglKqbeLaERbeIQiopuHlPraiAwAl', 'uDGFLkPCcuCCrhBqIiJPTgpwMukGIBNRBisRcSSxdadwFUrWFguyuBR', 'BRoKUvOXhGnKmuPKjSNWGOqYnuRPhiFhkFQrVGytFMLtOqDaGLsgArKDO', 'fANDWWiFiAQDHiUwZjAoQkPGZWSEB', 'iVwlYnNIAMeqfGuKGFZSbyhVLfbdLGAJQfFzCfvXwGoizhFSnNPPrrvJQNNXNlBUxUOlyfYoHwLJmpNYecFMkcKOYcRzZlCHaaMNLxOojRXKd', 'DJBCz', 'lMAqWbPJrARTItORqSKprcdeQycrqsQixPCfssQlCSCfnOyBfMquBu', 'SHcGNeQmCBolfrmlGVjWHDMfZckfLKBypceFxZbnoDOMVAASjtmqWuqddFYTNQEEiYBsGjtuYGEilRcrGQrkiSnXejkenaOejI', 'RnmQgOtpMqPGUW', 'AMvJbtuBJ', 'RkHHhjTSAUPvVIvHrjnGfuxIUJTaScLAVeKoHGgMmufbHGoANkKIohdeVLTmpxzZnJxtzNcNAYiWcSjxuVNyeQzLie', 'npyxEAEoatLjVlOEdatVehIEpLZgnvoRQZvaoxsZknEghvUQtkOwIJbLCZvBZHdyyPUrxBU', 'VdClIUxnalcIvxBYrwoLOtg', 'sNhKiRfdUZwFRIEKWSlVhjWIkzcxfsXPYAQQjvMjggJuOKtNXSNMtWQmKifLmgxbkOtatJfDPLfOHSwzzU', 'CvpGQyvXRkMkvHfRrwTBoEawOUKdUufKvzPPovlPtcBIxJjXsKnCerQDjouukknWfT', 'DKwCPFyqzpkPlFpHDcgbHYwkvHymLnBVSdYyLnEFfhwbA', 'jLODLtBxrsvezhWKXEpGZeCTslsMOuVghJgIMaXmvMlJPVhcOtSJzpiwvLUUcgwcR', 'TH', 'csqiPFzptkasuhIeQ', 'MXBlRIdCC', 'tFTHRng', 'tY', 'bVboBByCYjLmFPBlkVBSpetKwVPLQVGPrQJPpmmxvxbhClbSBpC', 'QRMPCdJPZsPfXqXGFBtzRNX', 'OLdaIdQYrTjuYrOEmfPEoziVnJSBdaYrsnA', 'sKArEnLdfKMhlwcVztuSiIZcApjSdgSefSbvzcOHaGcBLXVAXQTGZQOhixyJrmKOEFmZPAGvwKtSrwKFrhwFpfoyXAFalDHigxOdhVUWzq', 'mIdDGxuewMQhcaUOqOhigQHysMsApOisOzavLJsjqLQXZgKOMSsMcbrRHwRXCGvjXergzHXbc', 'QLMBEJzQiqUObkhkdyoIroXyxqbQaCgvhtzTTNKQQTrlZhlJZhvkhwhnaVrRS', 'HCVAiCdbSMpGLDDUYkEbnfGYTELuYwAYADtUsSJyXtXkcrFVSURkZFVtAvgGdsniquqoNKuFrhPBTnxrwyYBRnhKKPPToWmIPW', 'GRSSUNsUGSPyWovLUTqMYdbANGhdHloXREmzrzogkjgwexVmuduGWKUHWnMHBWzTnujKBzcaGLvlghALvtpsyJWrcZJSlVDQkkGRhqstKFSzOKwfu', 'dsEYoVdRnBnQfOnoJIbkUTNXyrLeTNuSjGsnIvVWLedHdLLuydDymtpoZyhrMoUVIu', 'hlT', 'VbUVhvpbWIEEoAWHpRAUv', 'cMjacxygzWTWOfY', 'EbJAZrGvuvIxYFbJO', 'vIePUlFVICpMbxacCxpkiQJJoIKYaDHHHHbhFTEuhfHPGCrfoUtHklu', 'KukxNTxfXgdueUwLjtsMqJMhYHTtqjoHuLhiNVMzADAHGewMCWHzZjjcbYWwxD', 'sNwCphrhykxeLaHgSqRJQjiLZRSMSTOTFmfWmFUDepQeDauGGQaWzXSvXGBKxvEhQTOZpRNERYFKAThmhfpxvDlhRqdfZCtGXlAak', 'CwjtieckWNdtJHTSmqhPOEIDrsilwJWxPuOSQIaRXgAmxMnqhbNXoEuNaSnnRiFZzTHrVZNqXNNUsqnYoxDEVeoPIiJEWlfYhhGjniDnYxW', 'ByayJm', 'YMRWpGygxcJFEOaqpyLxlgTvzbsBxZfBdfMfLZJqCtKncCxmzLCtPMbsjLyiFSAftNpFieybLIHIxoIipZofdaeYZiZPYEQunsFJXqVCXJwyJgViB', 'weaSCKTfvOarPQmxiHKDUsfzxieNNYkkuWpLNkaPWnqkKiUixpTPKbaWJOtlyxOcVYWMnLepNilOWXMbqnqbEtvMPgemKdJvPDpboXnhVa', 'hMrMnDrBZdMGdgfvElBWneRLdtEaSXhSZETfbhDdnykhicbFu', 'BgxJtvdRqeGKJPZAWCdvAZEIk', 'ztbiUkFXSPSiklVrdFTMhjgInpUZKOLzNriVDGRnnxPHYaGGEBh', 'RhJJHGLWfyAkmlJQlkcwLNZAEUCtbastuZiZxecSGDWdSdTuTPPeUsXwdVQgraHOmtqNCIEcKSEFLOM', 'lAZMFfHOqOtcDyUamERlNoByFZPsafoVmBvEWiHrfwzZzkZYnbVBjdwPudqRRRsKGGMUWpnRpjQapIAOamXUNejaNoktudx'], 'y': [1, 1, 1, 1, 1, 1488, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}, 'data_type_dist': {'Categorical': 564}, 'data_subtype_dist': {'Binary Category': 564}, 'column': 'IuXFoKpu', 'empty_cells': 186, 'empty_percentage': 24.8, 'nr_duplicates': 564, 'duplicates_percentage': 75.2, 'c': 0, 'empty_cells_score': 0.248, 'data_type_distribution_score': 0.248, 'similarities': [('oZUxhCSA', 0.0), ('xkCaitlB', 0.0), ('cxWEVwECy', 0.0), ('uvZdEBLZ', 0.0), ('ZpDsCbso', 0.0), ('JcWtsFNEUi', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'oZUxhCSA', 'bucket_probabilities': {'pvOhgCgELkaaMBqkgSDOcOIBYlLClfrIKiRweXkAQVFeolLfRxIvKMexFXbUlDbiZumXFGBkSloqLb': 0.0001666944490748458, 'awTrTjOVNHsVmDYmGYtHJOixvMPTVoIXPIGKvSLjAqbHwJsZDXOITwBsWmRwYCIwMALNueFXObBarmMjvnUvRHvfwBZAQ': 0.0001666944490748458, 'OgJDLfrqlfOYVL': 0.0001666944490748458, 'DuZeGlhxDtvLWEYPtykPDtQPjrDXLYQgxRsHvGIWIbMlyEFhNoZkDI': 0.0001666944490748458, 'LckDmodLHQCZCAinCfnCNmWPOwKfgaBwlkfPvKNsPRdHflLwYrbFWiJbeDNsjugROeAvYZsUIaY': 0.0001666944490748458, None: 0.24804134022337057, 'ZETmgXBJgqomZWmZsKfKLutUSyONCuwktpfyJpULzVkBNPToLfDjADeOHFbJIKBWpfj': 0.0001666944490748458, 'XWx': 0.0001666944490748458, 'bIwabRIgdhUMcruyLWaeOYGfGLHsuWLfgIVAgeXLuWkLFeQnSAvsPTSfsIeVndjsqpOOcEcgLpkKkgAwmgllmYgyFHBnc': 0.0001666944490748458, 'wpOQCOIeePsRRsHfWUBqtGEfssLXzkWprYCyWWnVaYWTjBVwzwIYGLlksvUaXeLBwsHLzGaGENCHvApXrXwXlmDdcEtbhLQSPaSoONmGSSUzIablkVZ': 0.0001666944490748458, 'MtBSZxQZHDvbHdJMXMhfpdeoIVNtslDoeZelPj': 0.0001666944490748458, 'CndyxzErKWVyGZzBajuKUXoKbjxUuEJlqVMZFPneXSNmtJRIDcjSOxdAGsbsqPUNALvdZRwOhLvcoYfvAVhkRUUUtWjiVR': 0.0001666944490748458, 'xuyTQNmuleYVFVXuakwdOyQIRAVhVQiPwOEkiaMPRiumzPwzViVbPsLgbuytVSNQHlagqRGPPOEYTSMibecZVoWhMMjePrdgxnJqieptyTTSWsWAVi': 0.0001666944490748458, 'Kw': 0.0001666944490748458, 'DW': 0.0001666944490748458, 'CPAujwKawOeBpoHCQGdTgvNmoEbjGXCXjmDibYDOJQqydnOEbUFWdaKBAgtZAvldowbNQYgAhmpIBoPZkNy': 0.0001666944490748458, 'nRLaNoEbIXSShFafKC': 0.0001666944490748458, 'khCKdnRPuocQItfILphpLaWrrKUKXtZQjQpyJBHDWsOKPpenRoHwIBERTBiEoEyRxcuIfnlATzAeeHVcq': 0.0001666944490748458, 'u': 0.0003333888981496916, 'rEMJukiFuVPkISDhDGcLZBOJwGcvkjxKOSaCBbPhUBnJVxdOZYiJUsTADfULPNaiwFWbRPDSFsnyyQMGqUeRvDvbxqCglLkmcGkCeGDwrVFlW': 0.0001666944490748458, 'OuQbTHglbCI': 0.0001666944490748458, 'APOLDDElYcJhPPTyrnCVNmWwsMyyxjUiixEqKLSGGgWKdzjfdbiGZ': 0.0001666944490748458, 'zjLhSvJyrejSgyGKNvHAAMqRInDJAArjfuTunFOutTeKGTGCYJKhgnmHwNCPWepSWw': 0.0001666944490748458, 'EBkzQh': 0.0001666944490748458, 'KPxfmoKpsmukSEiZJNYvTLiLnGKZPBBjnwDgkQCgHDhQhQKQPbbSCQUraUkrVK': 0.0001666944490748458, 'GlwshFctFpRFhNMSNdLoscBXFwsbgjhJqnnZpYyORyCoOXCCzKHHbuBOTUHvadvVkexLld': 0.0001666944490748458, 'XuEGRJgzcRhkyXSHJLznDkFVi': 0.0001666944490748458, 'KmjSVVAtlXzzKyOJuPWZJUPTBFaPYizsiTXpUwOOLGylYuZLoaf': 0.0001666944490748458, 'E': 0.0003333888981496916, 'CPpLuBNtiWOmydKPXasAEvgipfBarvKysBupsNEfkXkypJltnBzXDNxnNYqYjabghjomImMjJNRuYUxFNWOPVQTcEbwIX': 0.0001666944490748458, 'lxcqgDGuHKsksCOrHDmfYWXVzKoeeObUBNS': 0.0001666944490748458, 'PBwRVXSqOMEEGZNwfZUEorqXXLUrzGqBiPzaZSWWTIMjCaBWWoWjYPolYBLVcBXIQCSTAwVuNy': 0.0001666944490748458, 'ganBMhOXiBwpRdQYLdAylLaXgJJBhlOCSTnlGmwRfdPuJ': 0.0001666944490748458, 'JQMPMzgdyBaOwqYvOmgsLsWjGFrWpbfyYGqyzHoOorwAQphNcfqOISGeyAEqaARNsQEDSutmdHhgAQVIgTGvYHKuWhgZnyEtraQjzGBLe': 0.0001666944490748458, 'ronjINvPvVvSrwsBzSsBKWJmZbcvicjtEw': 0.0001666944490748458, 'DsQwGRlTkZYlVBICprpjpYkdtWnrtlwbZJVlypwzwgecqKCexObGWCIJBkqDD': 0.0001666944490748458, 'zSgiDNzomFUHinynrKwkkfOZfPaxrrBnjCrHcVsiRCTnxgCJdAbaDObfsAsgRE': 0.0001666944490748458, 'yxBRhTzyHHROYidWTqPGChcHsfjZvuYjFQadXCl': 0.0001666944490748458, 'qhmzxuyPedagfADDMBloRFjbpMOcwjzsnlrME': 0.0001666944490748458, 'IjzRvIcGcrBmNcNoNXWslFIgglRNXdMVRVKEwrYVgcmQbiDQZhePGMnWhmqXwshBMDIuGyFnSrnzvcMXsVpaAKdxSdrHEsYas': 0.0001666944490748458, 'YOZTVGThEhXUaSszguSoTySPAgIfWuqhnChYKgYVzzoDDTqKIycosaamrHMnGlFbB': 0.0001666944490748458, 'fXQtZsflJPjTkmSJdqobVdxLVnABkfFGHJsLLiUYcMjviRxnUKltiUGSSBAjvaLwTMcWpKOzYhanLiv': 0.0001666944490748458, 'CwHqvIbcukYfCBYLxpn': 0.0001666944490748458, 'ggcZlUuefojoCBImuIgCvBWwMStecCNnWtAGr': 0.0001666944490748458, 's': 0.0003333888981496916, 'UJqXxbENQDdecADiwhskOdgBvA': 0.0001666944490748458, 'HrHEDeMiMLCFbVHuIBjXlxcloEHXyEdxaQToailWpV': 0.0001666944490748458, 'ODAqKmoFBSugmWNOpSuXaiBzYndnPfHdwMpOofNdxvkTpLrlyuDACoshrJoeIaqxWLKcWPGSedtDUsKIkLFaYuYoKiWwPVJiuwxhmDGUqishzKo': 0.0001666944490748458, 'rNvoTTOIfuoXwDzBacNrClgQQnYSYpgKzzkKCypoRS': 0.0001666944490748458, 'XjbXDxpgsVgLNfeKKcZXOAYUyMeUsHNWtCqhvAwjRxRbeqqcSpfOWzIvWxbvcGZPQHjYfrcAWVNEPBWLcCUMOggHfzPtxhouleosAjFWEvsvKngDbGr': 0.0001666944490748458, 'GPnNMiEsjtqRYZzkYTUAdeviMlTIAiygPYYsCFAphNxwXJUvkksApTqyHeIvqJFsFShvWSMpasVSvpVZnBLNCQcRlPCCag': 0.0001666944490748458, 'WYQnibsLcgXonoYNhiyxJTxVzVttqqmWBSTAjYOPzjAKGJyaMKjYfdNrHG': 0.0001666944490748458, 'efNAeYDlSHnEdlaTTeQcLETxZyySdddOfwBahtdaXojOnqhCupQwpQNeqZIYvcmlLF': 0.0001666944490748458, 'EsJzroanPFPlyKAsLdQfIhTSThdKvalMVuIxrMEyznXZzRVndnjrOJgzhItQXMDGkZSMMBfXKLOtsUmFENOVOPcPBUMtWZIdfCHfzlRyhtzaOrySDQxcCE': 0.0001666944490748458, 'wcTKDWgRsrSQJhECKstImyWSKRcXNLAqjNEPeLMKBKTgq': 0.0001666944490748458, 'fOfzeJazeYYNrKGFwYZnTEIGeUQkIhjponEnUthiEusswpoUrTDrVOb': 0.0001666944490748458, 'XxLkhfvYiesraaMiRQGKwqbmVsqFFugKUIbvIgIvFsZQoHwODhRKkBeBFSUaYfKNXjhePJxyFXjSCBkeNUBSV': 0.0001666944490748458, 'bPXOXdzMisozSZptLolxIQBqgabMdmDBIrLWbpBchZNyhLwDNQXgtImBff': 0.0001666944490748458, 'HdFxaCJ': 0.0001666944490748458, 'lVplnUqpsLfIlrcphOwhHbQLLUpQwDNFArxKtmjflTKTHCaYoaXNbwKFRryDaZBWaJKiWBQluXNBTHbqAewswqAvNBSKfXCyssUQLoFfZHWAe': 0.0001666944490748458, 'EGTqxCzroAJxFHtJmzkGrZKlzGehizjxRJwGhdNOeiKrPLAhnyrlqRPPBVkPUigJHU': 0.0001666944490748458, 'AeOGenjjqHvnjlKJSWNJopaXLTqAdDyiWkpKQpvEJGveRFxbjkKUyBvlQxacPCIEjPXTVy': 0.0001666944490748458, 'LzVIcRNRwWursUfcULKSVaDexzyNVlRsqtMbVQcbGOizLWJqixrmtBvVGaRGyJxzvwRMRChIbjwdAsUKrOdHZKbowzfCPDQkVS': 0.0001666944490748458, 'FLQdVkAKswuNEpaPROljusLyVmpkFnNyZPdXpWlnlRILtfYnEKPqzwVwdXuunvidvCYUjahUpmYtbclgtziZydfcqIpuOVzSWOLzPqVwZMjvhYNVsmsiK': 0.0001666944490748458, 'gaLqVoLKwLamNaOsUAiNWuHyvkgxTGwLnylbUrUEdHxmqOBCtQTvgbMOTJFhepnqyxmyctzgPCboiRSdNlg': 0.0001666944490748458, 'UwiQWuFICAJMhZQgUcruMkcvfAxoWlkxM': 0.0001666944490748458, 'dPJbRUWQWsjCjGHUdTohQeGRqAQuxwQpxnIcsdPKNZJCLK': 0.0001666944490748458, 'yOSsrBlRtYujJAXGpiDuyVcjNTHsUiIagbXHkgOrdlFCYMmCyojcGqLbYdIciMDWWibIrJuqSDrnQLshGGacxrShRLSyI': 0.0001666944490748458, 'QjXAbOyIjOrFsUoabvopqYswzjGBMvDzZWFCgqetZcsMinOQyFkqagVZjT': 0.0001666944490748458, 'IwTlPGlcRHIWbcDfUlVvSVJFNfGIMqlSFrvRyVNOwGLmEpUvp': 0.0001666944490748458, 'ydvhTKlxnFtQWbwzBuPamMafHZbMEHEWlJoQmzUifZsFnyCNDkthWTVBVtvmXpgQKvMHwbqntWAjU': 0.0001666944490748458, 'GwucVkdtOnvrIpVCnPIGMbZqjqIdhaCZFmsUtlUjpdvfFZXHRNgPySTdNxitGPHagunfsWrwqpfOHN': 0.0001666944490748458, 'HDSxNtUSbYYKoLfTaPcoadLTdhmoqgJMETueZl': 0.0001666944490748458, 'AAylIpNInXHWQyYBNlqnPQVvrJuZVbgrcJSitMssvYw': 0.0001666944490748458, 'GgtLKRrfwwRiEbn': 0.0001666944490748458, 'eLMuvPMURMIeOEIMOhenjxkoaNStHmfWjJvHsHqqWcOtofMBvyiSXbwaSVvPlcLOqwuFaTpqAztXrCTnPQphxfkzzERSHavjPmdzzbH': 0.0001666944490748458, 'HPxMnuxjbCScMxJBPiJOXlbLHMfCsnxlMYMuXRRnEdSCMZrpXivbGHGEledNESEBQEoBuRDHEAbmfezbESTeRfnKGWEVv': 0.0001666944490748458, 'LTpcchfFmvWThDubEyKYcEvsJcgBQqjdgslPhuZNOHAxxfSnyXfMtnZJeKXdTUNtuWMtADPZLiobGlkwiRkRhv': 0.0001666944490748458, 'FUcLtOmXwIfEmdTfwSPhvfPabVwruVevqKHDJPkItlSecQCzaYnDxvdHWMNLRprxnwGuDEfDRxdSYphsLXTuQzs': 0.0001666944490748458, 'EuxOgpepTXBUsYOkvyajejLDbStKjmtkdAZfZejjxDylsgHRjuUPNJFdzeHPkUorzVENNwCkxvcVtoJxxyBvDiILGBSXEvyHKLlrfnieOzeEiDINW': 0.0001666944490748458, 'auUlDnsLqHoMFpVuHZuHkPUCEnGEoMtdLysHUULaGtHHGfsvUjzFkbcPWkHewJDSJhEYkEEGpMrjREIBHPmtmEkDYLCAzDBGzbbdVBaTmovNotx': 0.0001666944490748458, 'fxqRXvYFUTAIEaOoVgcTolYasDgMcxqizxCXgMPdAmpuxnwEKeUWBWqKXOmhNqfLdgvNrSWSTSYPLqgDOlSkWHRbkJxmOujfLe': 0.0001666944490748458, 'WvJixwMSzUPTXysxVLDpRLPWfaTIZaYTntbqkUtVhDCNuJQMATzyYaNrWg': 0.0001666944490748458, 'cwWsXztmMFyOQsQgtgwjhzyoUxKuCbyqUXuaLRMyPCXsdfTsZDrSZl': 0.0001666944490748458, 'NXCuIxuOjRgOBCQlORhxZAkJZQmeWIUBPhTUrshKqGBMiAQwTMqowemnxfUuxuzTIUBZmGaVVWi': 0.0001666944490748458, 'zqphqtlXmyUcquafpytJNkYsFayVBNdvtfPjdgQrnenskBIJVouYW': 0.0001666944490748458, 'sGutiwH': 0.0001666944490748458, 'KRmKeIzKWymefzFRkbtFjbszHaFBXwRoNaUeClOhfGOVGXgJTIAQcNVTgWgricOgMCbOuyPPjFduaCapXvU': 0.0001666944490748458, 'wWlPwddxsQtvNNqAKghiYmWfJPsfotjGmWJnlRNZB': 0.0001666944490748458, 'DiwkrJAxlIyjIPpCXXwUHrgLYbfIQBoovYfXbMDrvgxMXKLnCOrXrWZpKpOfRiNASXG': 0.0001666944490748458, 'oUqFGmZCRYavihUISMZTbeGSbNWrACDELwNTbFLrDtPBQCYVobbuSeaDyCGdmJUkNfzWOFWcCIBdDZtVseMukohybjDlyNNKMDgiYZYFEfmyvU': 0.0001666944490748458, 'oMFxwgveGTrgedpLkhkyNNkpOqbkWeRdVoerHiqorXsEbSuWuyPWClJVYLYETJexxoyRifppMwafIENtAMKHAS': 0.0001666944490748458, 'DaOzzhOMuFITkCIQObmjPUAgVAviEdGLQOZXesoUScqihimCF': 0.0001666944490748458, 'GXQHKqBoMupFjvTMDDYexTMNlhrCvgqQSNAYkyXJenbLFrFqgfiGvOARzJliqJLampoOJKdRrkSIdvtvuOPoTrO': 0.0001666944490748458, 'WcLGRLUXsmwcRcLaZvzYSTPngRyLQWFftPHCjWzAwqCfjJrThGikTIKvV': 0.0001666944490748458, 'xZaFJJsbhosjjjaWTDGfzjDibSTbhpJnogmtoQJYqlZKgTuHiLbShMvkuHFXoXVcyloCFeDtfuhUYBqQEjqVobTXk': 0.0001666944490748458, 'BpnSjROmAvSbW': 0.0001666944490748458, 'ChpBXgvrkNWmtFrarkzAAMEJXcBodixzbwsBtKjxiuyLCxFfQsJTpTEnBSjWfIxPkZtZnZrZZByvIkKCbvlMdElIeyQRCGmwKgNpEwOkDTjBaNgfqSzMH': 0.0001666944490748458, 'YTRcOlYpGyadTjIiKrHWFaU': 0.0001666944490748458, 'DSNxkJAWVfCfLSaOoPEQxCZgxjSilwZIXNHYEDnkJcp': 0.0001666944490748458, 'XPYabIOxsIdgXDmaSrFVlHlaasRjsIftTEVrWbKgzrGplttQwPJfftgpShvzjfnczDscUmNzAXJsMFRxDYaeqtPpumLzPbWOjU': 0.0001666944490748458, 'BdQPpEAYfVqdFQkWIaJdYGzfiEdqLUXmIJrpFtOtUsULFYPBJlsKHVJbcIFafoaBnO': 0.0001666944490748458, 'kvHMGRoBLWlVcWuZuzhuMBROhnkCqrHmRWzCpvgEIzPibzUUt': 0.0001666944490748458, 'YbMrBEoYKNKEfHFPZxIVcYvaUfTFBrWMUZOZuwdUieMksjRaAPZvvA': 0.0001666944490748458, 'pwJIAldKpshZNqoDnVmWASdVFEKIvOfFqXaTBqAwyvCeDasjHuGTTymiLELzoxbmbjujvVFtBCIPJRtPCnOdNmSqBIjSkfMijQU': 0.0001666944490748458, 'KmronfjAaTpoeEYxhSejQIYBpfbGromwQSpmgyjJpOFENgjvxDIgPQbBdRnoqjaukYBynbxNffpuANEnHlRfR': 0.0001666944490748458, 'bYXRKLswAxpQeyckeIhjebVLPjZCuBxEXqDfaliMYp': 0.0001666944490748458, 'pm': 0.0001666944490748458, 'WwpfJpPskbGNozFTLgWaAPtsuxmhxypnZJMaFZSXUZvZIuomEtyyCsYrToqRMjRXLVdrBPHvsOXckzrpknUiEC': 0.0001666944490748458, 'RjUYWQlwcmM': 0.0001666944490748458, 'ridRtNiAcbtXCvngMhkHMLBtDVMuUlGkftfOuCCvHPFMeNGemXGxlGcqsQaTIPdusstjtWRYRgfmZYkOFqeJnWb': 0.0001666944490748458, 'BRGSHxGgVrHvFhr': 0.0001666944490748458, 'hbAbdzCVcDzJPMadezzTdOdPvCBsGYEaGrYMqQpLHsOPTMgdGAyuyLwhkktpjYiZxhsxbDctGzwTxx': 0.0001666944490748458, 'xUSFgFriIkmfgnQfWwGgLnkjlBaldKtopMVaMfqAfKlHSaxBXGWWBUGDOCjDdTHLgKpuFADthVreoLnsoOivdiosTrPJBHBjaNLygmtBKdxAfVbxBm': 0.0001666944490748458, 'SblFSBkILFAkaySotUXbrOqqNTCOzWPHMtALUuEIpLiKWhYEYybXlMEodXyycnsVHmPLoHYfEUYqUEjHausvSs': 0.0001666944490748458, 'dWjmRfyZNEIkAGAcHxFdMOcqlvPktGzalYhXPbMovrxJZKMCxInSnCtWIQdtXbuqgZeERXNaSsHwUkaPLTTpGYhYKweJGNnDeyOJPVLwQaLCMbN': 0.0001666944490748458, 'lyeSFpucQOlEikQNYkLYDUrekIsWzXAanZxbHTKXYlnwyCbawkRKXjtvKybShHGlNDWpXIegd': 0.0001666944490748458, 'VXrNXqfumcnpAmNAFLtGgWJLtAeJOYeWIJkqEpNpOaMbaj': 0.0001666944490748458, 'HeDBRBMSkoJYhPcBehFdQVnaXrMQiGG': 0.0001666944490748458, 'uECdAmshUyorMVKyQzrFJFMGVAWxFeTMq': 0.0001666944490748458, 'VTUMkOpVESvgbRXtWZReGGYNVdHbmsHDSZQEEcqyEKyiRjfpaUMoMpyNktBKoxlMACaFsohrCBTMKUtMc': 0.0001666944490748458, 'iKbwADdpujazCoYdVFAMgiyxsXiUuvkwxKWcigDGSwPtyqnVCBCnlbhfXIysynpUrQbdipYlSpQhMEITYKo': 0.0001666944490748458, 'guoLcTyLkhzTpqunbCSOzdXfETZkmMvYxmLzcgCkZaEQKv': 0.0001666944490748458, 'YIrlWpeNIqChfFISrbaIhSDjjVbmlbgABlRcKKiodRmMRWCxuSLnnvYKtQVRHkpvttybbHwqkEJHrEiwZDHEAJfpgdfIlHzAhV': 0.0001666944490748458, 'JaqHVxczPacVyaerOmhrBheuCKJAqXvbxmUhfihpTXmEmFemMOmixUJHguiIKdJeklOMLJUViuUWQwwJHtJfyLh': 0.0001666944490748458, 'fBxnu': 0.0001666944490748458, 'F': 0.0003333888981496916, 'DhUgnpkqjfwrUzuBYXhsIHJosFbFTupwKBLqTfgOURJTPtGtJTJMlpcLPod': 0.0001666944490748458, 'hWgrUOuVtgfFosaoWiIuXNWmwClrIosSCZMBTotTRnEsJLOPADphiULbXzuFUtBpQlDcsaknLBpVw': 0.0001666944490748458, 'qDuBCybEbEqjZPFkmywJLlCNgnFEusgYeCHaThPHPNzyzytBqMQPweKIUbehOdHVMslHE': 0.0001666944490748458, 'MxxtpQcoaPlmRWxhkQvvdlzfRgSNSGNNmMmOElJIcLAwq': 0.0001666944490748458, 'McacbAiKCpbLxpcGNUJvHLGLFshhhbuZyFIfMJBptXoBsVjaadmikBvEbSQHJtbIsvNcBxeCn': 0.0001666944490748458, 'igUPUniYhMgHdgKRKrmTxEMvpZvoCMIElXoqmAgiChrpNDNFwexXLFGdxPH': 0.0001666944490748458, 'MSYiDVKcudqAsMKbtSvuEoJ': 0.0001666944490748458, 'yfajlntqkbkPNvAKaUFQdLackFwpDYEKEzBFIlFDJrhqteERIgHJfpHpPiQFOSdenumeCZwsxnnBnXEMwbEKCTrwJWiVHkpZQbsXptvsWnzpZJx': 0.0001666944490748458, 'XenqUtbjbWaZpDiWDduQCiq': 0.0001666944490748458, 'BNxrRpiZpzYszTqtOXgPWgpMpPepyCEnghsjYezQruzTjabOmTpZjIaFOFdzPGOklOvoPidVhayRHKzZlslUIsziPlGzs': 0.0001666944490748458, 'lLfxYjlTAOKGRxXjKTEJmujKqxyYugELcBUmsFAGPJPjpmwNMmiTedHhueMMdlSvbofjgItfjpiaAsrGDblNnaIGcaVkyAMllOZtVBEfbkIxeY': 0.0001666944490748458, 'INydlTgjieMBWwokYrTsaXVYMnyGjcYdPCSHcVoyCDhwBUgtorLcbwDEhcXtIaPsaGVlJQodjidDXuxzlxCxdZNjcxuwsuLURApfTlUxvfLcYccrX': 0.0001666944490748458, 'nhtkjZwQifuoFeVqJHEeBkwKKVgcEIf': 0.0001666944490748458, 'zpImPXUIIuIzxQvKFvvmoZgbsFSJkFcUjaCLoFZNUuqdyqMLvWjpGIqInjFdY': 0.0001666944490748458, 'qmbqXTLHaDXPHrhkCArnEhLIYYygtkzYZ': 0.0001666944490748458, 'DNKLoohYeaQAupAnthodfdVezvJHBenjXlFmIiVkVakMTNZnkHykZstBbbwMxbFJUBDQV': 0.0001666944490748458, 'CuAIlOWURzFcRdwXnxlQnRdAIffGIYfPefiYKIElFRQmNNQbeQTjCNEviECrHDvMEOYtiCEXxtblyB': 0.0001666944490748458, 'AVoEJFYSdzIfrokLltEIwhEhvDdrKsiAchgxmLKqcvONbOpHNDBmTEXhbeETbLewlgeKxODgPDmwzsx': 0.0001666944490748458, 'dfljnllelrmtnlnHBSrorpRMxUOKxAUfaZyIrinmioNfRrPMIgyvDFDltVRGYrkROFpgHiGQKlJQHeGsWseuhl': 0.0001666944490748458, 'RmJLcTEdKgAevqMmWOo': 0.0001666944490748458, 'yWHbIWnNlgYcpMWRdhVZJSVJx': 0.0001666944490748458, 'OmzJZWdgrI': 0.0001666944490748458, 'CxGsHRwpxWBndZp': 0.0001666944490748458, 'PdQtROKSpBCBThKDLPdNmpivKYFBbbSSbmbIHMacmdlJsjbusVcqUan': 0.0001666944490748458, 'LAxajVxmpAXACt': 0.0001666944490748458, 'gsRvBpUwAxqmIXGASnSuAHfZMwJcFUeVGHJLTAscqtXaFGMLinYingAeZYWffocdVPDrnpfzHGeWZvXEZQvOTXleF': 0.0001666944490748458, 'bnLmbghyT': 0.0001666944490748458, 'zvYYcOELreeKhTtuIDExNqfbrFRCz': 0.0001666944490748458, 'JkEsdHRkebyflwfdRfeqkSAiPhbmA': 0.0001666944490748458, 'lPZeUPygzIGQqHmsEtWgWMdDbuClhGNgcddTx': 0.0001666944490748458, 'eLzvNDEKZGeXYZRxQOvVKxrYhpgijuQucrBhUeQNSHlbBlLzgjBsPdRSy': 0.0001666944490748458, 'qgGtqOE': 0.0001666944490748458, 'hwOITIxPCiIUgmwIGWUWsKIIMCXoBYSkhDppvVtWrwHttUDLkytlrYMCfVqtJjpHaYizDbKOZPJWXcZ': 0.0001666944490748458, 'fjvcCxQgaznhsPIgyVekgLUlArfyiPrbhbVMpZobVnnxPdHnlOhKfqMClPuztMeLZcVVXkxvaZWNHKmslUgeEJQFB': 0.0001666944490748458, 'eqzGZpLqebyzdpQGBUDTtaGZkNeDIQaWpbjwpkMbNujkdqWkMCoCdaLFVgPdoEGRhtiWenMOqaORNxRHVPSeHNtegHMqpLRIPMMXlYKhcTrbBPahRx': 0.0001666944490748458, 'kTYSOUcpjVSayErqOZFwAYJwGkKMOdhfcCylHnowldLufCGYdEr': 0.0001666944490748458, 'rHyTnZtOUbZZgVKLaQOLnQQEQvOKcePovIURjzRJWwtOvbacyLMzSPBNUuBzJwWAzkAHidKaNKN': 0.0001666944490748458, 'vhNlWmgZbkdhJbTzvtyHqKqNkDDPIEFFKpsitjylHPSQmZGfgarbTostbrNjoGBrPrNjQpqLANSxBqCxZCxJoslvpcXgGyKFXJwBzYXlpudhprYemjO': 0.0001666944490748458, 'bRMRXDdWFfnIvJldNkmWFGdnVCwJKhWWDrQkaSpyAMAYhWynNYZIjQHDuOIBpGcDaQOFtvINpAxIYYZOrPNoSTXVQGyJFZyOzSgEtkfPqe': 0.0001666944490748458, 'QdVUBXTsGpCWTiLtDlFsQmqndWZkAVBhZkNryxQYweQujCwSTYVUBnysRJCFthFOpFraNfNlFMLiZjARTzkjreMYktmob': 0.0001666944490748458, 'VUBUoBkowRlqXCbsjQsirtiowXwWoTKpy': 0.0001666944490748458, 'TWQuWdIsSQvecEczpCXJVDNdLPgqzBUdcoVbWuERbTafRGKLkfFsyKicRnsOOJHRmUrgQPYHACxMXtrIOotYgXnsqGdcMTueLYdhrOGCSRCCxRUbuQzVt': 0.0001666944490748458, 'nbRienGSvDJYbNHMGSLrMnEOrEzFyanGRXQpKvMdHLrlBiwMYaIYapRVgRgisajjewRHYWydMkPmrlPmBFefSfk': 0.0001666944490748458, 'iTFPvEgArZaEBIPKlBFvnRNBtrBFOGrQElILMVJmChhXhIu': 0.0001666944490748458, 'tpTKnzjyWxlzcARCWVEvAYrCAnwkMZOmnzGjrYWtVoxgcBjoekMtBZXQYXFdlyyctWRxQqXenhQhfwxbrfeLqTsyIzsdRvdTSHm': 0.0001666944490748458, 'sCRDcPWtpTHnrKWGjnaIhxAEUQabTUqHjQzvJSGeIKyaQcFoGJAyoAaMgzRMDRIFbL': 0.0001666944490748458, 'sBRZaGpJmFSWqqJVRXRqhGpcuEWiUbjcbCjGGyeIoUHIXNXBZ': 0.0001666944490748458, 'QjPlMOwPkKWzJOUIwPBGJtUNDIanHKIItuZsglvVECCoeDUfMYosrZwuLViXiIyXiInYJYKhrOjLRzTSvJVhU': 0.0001666944490748458, 'xlwIiWVkwwWGJCPdlYjpCgHITqdVPqOIQeJtzWZlHmwmYXMZUqlSQjJZjAyjCtRjWDJRKpxrbShSmZhxnIGKe': 0.0001666944490748458, 'gYlmozypePvVRoyHmFuaisSDQJvUaeMBxKBgrnhOFrXAXSVQzOhycaCUmkHmVDvmUIqlOzROBWeIWFaScVwcfjLXhPOFskfQdReezFtCUdulI': 0.0001666944490748458, 'urtfAxdPIVgNVyYkXcwJKyCRyFlYBvDioXlgXQZzIwaLpUtfXwataHuWMOsAOKaLJIkyUwaHZQO': 0.0001666944490748458, 'aeOIKH': 0.0001666944490748458, 'QSsNeoaxAsVGJZMsmujsLLWAj': 0.0001666944490748458, 'iglRNtHbHOyBPbKwLUmvQBUUqKVuwTkKrrdkOKhxhsLCIxkppKGQgYynqkZRDLbgrCQsYKyiprYKNDfpWVzKgQdRkc': 0.0001666944490748458, 'gVXHmDUwevvnsJWkEojYzEl': 0.0001666944490748458, 'VdJzpsaXfkyIjgrUzDACvbDmvsoiJUNZdAlSDYcyAegehocLdfRfmzztQoRPKDGRNF': 0.0001666944490748458, 'EeRAPrQYLEziDqXVXLWfUjQJMFaOpwbJBhyzQjORXVLvrzSenWUwwqabhkxCBYPscNzJYvSzmOxrgivNILnQBUyyeRfiFVaqbyovLIt': 0.0001666944490748458, 'NslllzHfMrDZJkGwtGpkNuamhFiYtbgOAqgsnryxSoaXepMnJLnoaLAyUAmgWPVHOTPBKLlzBzSGDg': 0.0001666944490748458, 'CidfTiOvVtuMTTspSbTQuNqiwXgsKSzeUfXECFvzNqSghwjESBCsgCeQLcj': 0.0001666944490748458, 'rmBwomOccCZNNCFdIyaiVDmTJTZSXtVDPgGqReJvfyLemoGxhiQfCIbUdkaMWSYOKFQKzXPDlj': 0.0001666944490748458, 'uA': 0.0001666944490748458, 'zZbMzlrxcCSDQwQWNgqZektqTEJqZfKqvVZDXfd': 0.0001666944490748458, 'GTK': 0.0001666944490748458, 'jrxBLXUDIoxgqHG': 0.0001666944490748458, 'jyYhgwsHzqjQhZueYE': 0.0001666944490748458, 'aPlZZQvTosSqxGsVeWQyAzQMpCFFnwrGYSWpWLVlcBqEVxLDbIOXeTYYAhkkVYRMAamaWHnMdBPYANStyfZblbY': 0.0001666944490748458, 'PPxrOT': 0.0001666944490748458, 'EcYfgyzNZzmqHvnLhKusyNgfHXYSAytezeIcCKEqDNvIIaRELCkLAZbwTvMpYeSNVXULYPQzoGaZAKFocTIOWEFkOdQLhTHtwjpCSNAmmBdxrRDlJLe': 0.0001666944490748458, 'HENYJhngnEZnTvmOiSHnyehRtAnCjvPntHihmjMSFcvFhCnuwEEbqigiJmtpnyhoIimDTUWFdcQJqCgSYROcDLvdJYxPOrfyVtx': 0.0001666944490748458, 'KEJZgxBHgKVQr': 0.0001666944490748458, 'lwOtUZMivmlyLpvpcTxqKqJZKVlTEPAynKaXUNtcPkgWDdvexVzcvPYhJNDqqwQqkDThmzNUiSyXZ': 0.0001666944490748458, 'FTYHKxQQIUQTtOggIuscxIdOih': 0.0001666944490748458, 'DTPfzfoTavUJeWPxbAEFYahryhDVmdDQLxwzSSfthONuquQqfW': 0.0001666944490748458, 'rhGoLvguchZUJOQURwLcbthkQwcScchKxFsMAZfgYmdOEBGxSneiBcSrILTmk': 0.0001666944490748458, 'ilBdgAnOoTebgXDcfmkggbqgVWjSfvXPrRpZAblbXdTiGjZKeSaLjBZIPQbPGPpzrrXrVfCmxBbKEJluI': 0.0001666944490748458, 'TqcnNBRyBAAHafhpQMvDLMWWsOiWGRZXjQCDgzLbzbGMTLPxUvlJLdMbOCimmsLJifROZHhxhghjKKchu': 0.0001666944490748458, 'NbSfzTcDePylkajeYjmExaSJymmjKyvzuR': 0.0001666944490748458, 'kVmRbrgHjLODsUzZXJzDfBsUqJUbEIZwLWhvPcjfskMhaOyzQbeBfpZPFsORWLruXitJJbaMOnsXMgSsDzBBfvISc': 0.0001666944490748458, 'bOhnYKpVgfJMgADZdgAIydXPnekehfNEdQfiIGdVdUBmTIdYqRXOtoSjYmNVwYtxpSbVJmOTNWI': 0.0001666944490748458, 'CgUwWKOcHzFSfSfunQBFTmBhfobkqMQRzgBzRvyrmMqmUFRZJ': 0.0001666944490748458, 'zLaVvQFvTFzRLCJRKrGclNsEBqrzjNLtDBpDXOapOaYhYhRvKCOShxSJNLeEWsgcHqwiYIBwjbMndlMAYdRpsJ': 0.0001666944490748458, 'JrEdBRFeHUQBUWtogrnrcldUjqZwVqJMxamanlxVhpEsuGbvwHYLX': 0.0001666944490748458, 'HxlJWWbVJJFaEeGLolEUVXLtpupsRIKgTb': 0.0001666944490748458, 'FbjJoHjbOAPWERpQdsCCeEkLhhwYcbENJxMkovjvBkLVixlhiPhHvfDpJ': 0.0001666944490748458, 'NfpsnwMtxXhryVWibafCRFFCIFccCjAGRcyRyfedNStZSYWplMyBx': 0.0001666944490748458, 'bTNoRjMLTmbrRrgTXyFFTgCtJJgEvHoEvHziWnghRdmPDKJef': 0.0001666944490748458, 'KttwwIJzfPLcoMuRVZZLMmfqXRkPNzoAwfKhAkKghbSFxCYWwiSshtLWwSnCRjAXehGhrTSKFlPrCgLqxwWpx': 0.0001666944490748458, 'IkLwPmQarHmPbGuLECiEBAdBLq': 0.0001666944490748458, 'rhrgsiFzxSLdOhUloGgChKVvDAWdkOBybJarKxngEsNHRSLtVYHCzgrbFfnksXPbRwuQZGMbcqEHEySISzJXAHFnPp': 0.0001666944490748458, 'wCoqGTcBybElYtssslXEmrTUbwdPUjeFsRtYuetoyEkLBpaQheVGWLsxrpwwMCBCHLhOYtcDpgWCAhLAgTLmkvmhgatnfQWpXV': 0.0001666944490748458, 'LfugdFyAgBHxFlKxSMWPryHQhWErXxIPzMrKIzESVvsZr': 0.0001666944490748458, 'KjxCBOagzbIGcUAvlmgcnsvsE': 0.0001666944490748458, 'mZPaVnGbmezhhVCWBZOLnH': 0.0001666944490748458, 'uNvSZvxxCpCUxOj': 0.0001666944490748458, 'hQfTVUKyjQtPwRAdPdzsJmpQRwbiKOCXSajezPqzfvEZWeDciuthiFnTtctaoVKskNXix': 0.0001666944490748458, 'WQiRQmDDFexaSbBkFSDyVXNxqCGuucLfbpjqlmasyizpeCQwXaLfEAGvyTcLljMpHHVLez': 0.0001666944490748458, 'cxERANsYrvFGBhcZgglTLzJZTFxHJRBkjnvlK': 0.0001666944490748458, 'lttDjVAvivyKByDul': 0.0001666944490748458, 'JlLDnkOgVvVrlmExEfQPWmpCrJiBmgbPECfhQleosutfHMRExNyXkvcRCiqNlPnkCxWNXaDcnDIuyVbBMvgLwdhft': 0.0001666944490748458, 'ECkGiGhrkVTwHXhUsOCeUgMuknVMVhuvv': 0.0001666944490748458, 'GsgqTdAPJsztHIRqDIMSsJNyHaVHuJqjuBHkERXYOmEejHDwplVMNixZdKwzFfoPNynpqnCZhqOJdgdXSOBfRw': 0.0001666944490748458, 'peUiLR': 0.0001666944490748458, 'kGFIEapMrfOePhHLYdvAyDsEXKERXWXLYQEYxiamTbTrlAqPPNqzqGnboveqg': 0.0001666944490748458, 'oqQAYGgneueBCHAtfckwFzsSCRRdockTymYEaIvit': 0.0001666944490748458, 'AyMzTxbpmCGcIzojOPhkXIMZlHqoungSJuIQZNCuCuDElmychRFvqeWlxsRIAjJMa': 0.0001666944490748458, 'gMnoqTxZyVbGBUVtXDiRQHiNXaUZhFRaPIzyEgXIPxlBTnAHBuLkUTrVHCkrB': 0.0001666944490748458, 'DIsrPnAIRoSLPswpjEVfOpUDmyrrkfSbaEteQfKTLvTCjrLHwSRInHqTfbivFfXoDpaaXeEgDFjsAAvWKeRKXiwjcHauWQmccMWce': 0.0001666944490748458, 'rADuXzEMbknOtperTFCEtgGzBLfzRXQfNDdyUrDYOkjoyIFWyWMexANaWansWlhnZTyHFVFBoGPFgAIIcXeobbUkRdYGdArKeoXGG': 0.0001666944490748458, 'HAXBYpVlIZ': 0.0001666944490748458, 'aAZLlFRzTncTlkDIxLgZYeiEuIRTeiEPXEKQgiNmOVWkJeoARAPqUzFBVbUkNkxylCBRNPXdSKINZushogXtvxdNWyUfQcHCBNR': 0.0001666944490748458, 'KpArAcUYcwixiYTeriYsMAdZbAfFUQSdgVqdJRpEVyhCFTFDTffAgjwlMUvXDvPLj': 0.0001666944490748458, 'LeUmAphcNXACggiaGXsyolRJQmvZQlaLSLhIHBcsrniODAuJYXxEavyLefJLDZiYbKfjJpCHwhGzPPXLZwtxnQV': 0.0001666944490748458, 'KwqWxRgcNPJUlTsaLQdNTpxKlTRXIoVemaPwghaoVQtJPzGIEElDEwvhbbewZhxnIMKiHpOXByKkHDdqqiRPUsvAUIg': 0.0001666944490748458, 'y': 0.000833472245374229, 'JmsHOuyByBlLHBVJYMVlwxZaPpiIxCUFGFxRhODNjOzgrprtPvglpKbqLAxyAUkpOFSeuOHTjLdFctTCCNYFikRWcvpSI': 0.0001666944490748458, 'AsvuneoWLJoYdAAKUKyoBLEFsYiyhSCkiSXKJRsbHXSbxII': 0.0001666944490748458, 'BOAJMQQgGAJBdZLGxRwwTCxKFdNDHsCHrZUsLBHsdBIkqwzTsxWnGFlVsRQigeiZPNqDLlmhmkfEkOelGokOdvngLpTRSOCcmIEREVLivBtWSGVJsfrZb': 0.0001666944490748458, 'XZeljyuVFVMsxDFxvSrnVA': 0.0001666944490748458, 'UyAYJPBDHaVSGQGiHiKzYHOBkRFjlfjNQzNfOJlOPTZWoEvCkmhrXxJVjaRFaRHzhKN': 0.0001666944490748458, 'spzTLhjssnwdchYaWNpGqxwIKxNqTYzhZgkAprHLYIAjpkyehumjXqAeQttOQIzQkmSTsJmwjXEQcpKmXpFrTEDfEqJbCFbRnHQxlBmgjzuij': 0.0001666944490748458, 'BeyOsVYHmKFKzBSZjaJaxPqiObRCWUWekmqwo': 0.0001666944490748458, 'gjqRxzmjCHjmPRnnYCwUPN': 0.0001666944490748458, 'UKAwJeNtMUvHHXyXJ': 0.0001666944490748458, 'DPjbLLJQsVRbBUgYaqmHjBcJWiQUapApyTrhREZpzGpfkgXuQMVCvkQnstGJZgoaqLU': 0.0001666944490748458, 'OIukWXdTHxwYxdLTfSQqrHWwpV': 0.0001666944490748458, 'ryY': 0.0001666944490748458, 'dOUZRQAHuageBHDOurNorrqfrKobMfOzKmqYfZDRNMbksyDciNKXFihNdhWEMbCgxcySuSBAfNsSgqCKMJZUafsWYUGSRFMkMaSGtOEgHKTnTtoQG': 0.0001666944490748458, 'z': 0.0001666944490748458, 'QIfrgMRKNRYyfWGAoSIXvLkVbpDKUeDRL': 0.0001666944490748458, 'tvaFRfMhHJXOsAVeQpNMTNpWdvGxMwirBlHPJjaNPpfzLLrmRhjEJMtDvZBzIkZawORmbddwSZBQvSDxNebCw': 0.0001666944490748458, 'NjxVuZGjhdCPoTWNjbQtCufnczjyQkFrhwMTspZxcBhTdLitazSCBPHtNUcoEtffuFkQhgvMmTnelFXZobQdtouLfBcnypm': 0.0001666944490748458, 'xtNBBLVIENHtuxUejtL': 0.0001666944490748458, 'LCbLdWmMvkPkePh': 0.0001666944490748458, 'YJhUCiLYUnloAjuvzAelxlOMXnyVHQSYMD': 0.0001666944490748458, 'kibXQHjabVmIVCXxtMbPZiuTeIXGlvEsPPTMh': 0.0001666944490748458, 'mmdLhxECvXAuOyApNIHpuQTJqoAmypUtTWeWqSuJqthlZSqbGWGqcbaZGlnObMFhpIzJCYEpPtHSkmwiSYlGDJWgydYWuTEafVnrRgybhSfLXxqBT': 0.0001666944490748458, 'qsgAPnYdQHUgOmviAtnXLiJtxHKuWNRlUrVGQRJUwOaqwoRPJZKxGiHkxKEFH': 0.0001666944490748458, 'GQoMvjdGePXLZxxYjfhHsFRtTFVxerKdN': 0.0001666944490748458, 'AjNkiePcKGXvrxpQfIhlQHbUjXEIoZjtJDbwvBrMfLMyazrRhaNpWrWIlPLpImwTMaLhIicZGrkbOLxuRxvUjCRKpmATYHviDpWZoUCFEu': 0.0001666944490748458, 'kVUXmBkdIQmiYtfrwUIKjQILnbLrYZRyBdQtMjIBujtBubzRlUjidshrRoECXdvuAJEfcjomdMpfvePfeitYMHkFpLPPrXWllnFRDPm': 0.0001666944490748458, 'LDKnPQfnqPvkYnSQhZMmuHROBRYnLlXIFGqEtxD': 0.0001666944490748458, 'MjneVBpnwBF': 0.0001666944490748458, 'ksKGfhZyvuAgfNHylHeijtjxqbYeixUEIrADkElWxbIApjFgBUGVigZrbkOQZOOvcgglAAGKyAsPmjVYHLIwWlfxWCFLStIQbd': 0.0001666944490748458, 'qnIoOWedzvKmEYDSMTN': 0.0001666944490748458, 'FypOqQqZGwsFckOkzNxOkZLkJTAflwQuZVqFPAbtjIzsFfXfUdbinZcIOnhUhiierzIdNw': 0.0001666944490748458, 'MjtJYgzbHOQnNoUrnJomjlCMPNdAEFhyxUAsCVrQEhBWdoRcUoUCGvqJjfZqEDeqcVERTNqAZNADxkSIfNjqJFOZTerldOyrhlCudUaWDtLCBGLCRxBHCf': 0.0001666944490748458, 'kPdCMpWfjxtXButgNKIvfPmASlevYDkPbUjUTXRDhwjxpeiesQepiiahDjODfkCZcISESkRyJXjBfRtSjXbBceKyCdzdeeOTSZCNBbX': 0.0001666944490748458, 'bbuEbLJgbq': 0.0001666944490748458, 'rIOOEokmpUdwzPWuJwheQiWOMccGWDWsGtyWswzaVWjiNsXqjsvomMtFfVO': 0.0001666944490748458, 'xnTXcSUMsKKxDNurKrjqwiIkPVeRfFODxOrgieGGvdofjAoGSQ': 0.0001666944490748458, 'VOwCunGWgjaOuPBlVQLiKwnWNJNIYkvbDKIGKfQfevECFFTGTHWYPZGuprmQYQqiDMneTF': 0.0001666944490748458, 'UAsEVEhkXulweLmBjUbEZxQlFtmmknpPfltOelNbzlzuCjcCbWnGsfSFQeVJUINnMQeSWti': 0.0001666944490748458, 'wHpPTaNNFCUcEgcCYwnoraNXtLptRjhaLdEupcwHiLEwmdJPw': 0.0001666944490748458, 'bpAcGkLoLgYgkrvIWauXCRRHURsXhuEZmOlYCeIPMivaQGzZZslvKwquIrqLIeJWXL': 0.0001666944490748458, 'CCEIymzRviddQNnEnIYUnGftUPYkizgveCJswKEkFvlrfsdPPFHaxGRRvdZdKNSfBzHSrsJzZonZCZgCPstQPXiXYNfqbsoefcFxYw': 0.0001666944490748458, 'wxwUQNvjdVfoGVczDXidjxKNAjPxVBMSoccpxaAVFMtAiPyWXUEHecbjIBfUZNgduAQUfvWzmoedQeDoXFCFLRtNDPULlNdtRCcPhRKyJV': 0.0001666944490748458, 'Uai': 0.0001666944490748458, 'sSIbHFADLlvThcvrBVybRXxkEhFpKrsBRpskWmKkEMSlsIzszfsloqEDeezFlBIWESHmiXF': 0.0001666944490748458, 'XxcGbpkAOrTNHwWYDaQWKqXNLbBIFkMMKjJsrppmPoIIPGRwufELsweUnZnFUdRxcNmOgnzwRNtRvtBjEBAFgmmHhqAfVElWaBSxupkvHNagROtKCCSdwmx': 0.0001666944490748458, 'naWuUPDmRHzUruwHutiRhtdYbrJFPOOYRDiLLMWACTHqamHxWP': 0.0001666944490748458, 'TLoacWGaTAzDCHjzsgGLtMickruLHRJmolHZnfQOoh': 0.0001666944490748458, 'ZuCTpkRtnkBRzSschC': 0.0001666944490748458, 'UsbyVUijZWcfOxLjNyxSQcLOUFjauCjwHpTlJsuvifdJHpqGGPjUXXZraov': 0.0001666944490748458, 'caPBapaPCgToetEBilxXxWtmhVRiB': 0.0001666944490748458, 'cmfhVURdVLsSMJKwgzojTycDRVMNwnUStERXOJUOj': 0.0001666944490748458, 'yLdnOpSIoqOrykKImK': 0.0001666944490748458, 'K': 0.0001666944490748458, 'DxqHCyzaZI': 0.0001666944490748458, 'HumHblEoZSeKNXDhuGMNAdEzyDtOUreyqEELeryhvOpeLYfmBYAshhzltUsETDCKoksJy': 0.0001666944490748458, 'WyjzKsAXKcfrYtJVGRzwRCPOvNTZvpumfcKVOPOgitIxBCwPVowBXHSWYsOSqHilwHslpSvKyOACDIiGfWjGWGEQRvFCZRlpGUjKlxypF': 0.0001666944490748458, 'zErRTfPaSYdUCBJeyHczFonKPNrekgnpCBoznXRyFzBPZIaNYUwLHzQtAQHDcCTuTAqkVyODojFPysrxMjqgyqqFjwjssbEwvsxFwauDa': 0.0001666944490748458, 'uxFKRWSqpolRYmBbkOAAWFowZrFdxkFuitIFStsZOMbpHpIGeQCQzCdHzuDTjuogUQofNdRdSYukBuNFxrgdLjGbaJIumgypvcTIyZqBBPCbU': 0.0001666944490748458, 'IFVWAJgTqESrtKibVZveKVARwKcsmoZkEsIWaAzqMTTQpXmoWoRdrmLhxtXxHxwzcHLCA': 0.0001666944490748458, 'YdkYQYtRtzIouavgSrIcagMJDhRXQCrKaidAcwaJYdOtgeAedGBbluaofxVhijplnBzhVeCUxS': 0.0001666944490748458, 'bRzCuqffNwdftQwEEzvtxKuCYNCnJkIQvwLFw': 0.0001666944490748458, 'XwrMRdoRKZYOGkLkpjZLQZMOnsNoDOXfyZXyAQEomqvpM': 0.0001666944490748458, 'FjZEMBQSU': 0.0001666944490748458, 'NaAGvTIUKwBjpcHGvyiRSuOKln': 0.0001666944490748458, 'cUuqTWseKLbGepRPWokhrntdiOipvTZNtFyKKsxCZonRRCWtDrDft': 0.0001666944490748458, 'yhvztAXIQWSFnTMywmUIua': 0.0001666944490748458, 'UD': 0.0001666944490748458, 'ixWOjMGubtUJHFQFQZQBPQRZzCxcXDlPxKFdLSmNVsCaDgAMfmWGWsxUyJCIXyeFEPhPRPKSRBWqLPFYXrwrZjolICCrpTEPkPhiJZqbEYBGN': 0.0001666944490748458, 'akVaBAGZFskJrcMcsMppoT': 0.0001666944490748458, 'JHSWXbNhHDDfqTlZkFnnIPkmewCSNjoiUTXVHqCQgknPbhtDqxbCaCwbfywFFkVPELPcqThOwqBeUWtnFjYrjRQENs': 0.0001666944490748458, 'tyiJVtfDozscoOvVheSsYAPkgzkqxQJIiYqRLTbKkxGrLLREFebvAjukqxhDVpSEIPwbdhzFhIRytqZJdpniWahtOuZaYeOYoyEFoh': 0.0001666944490748458, 'jCYgwyDVmlkMgRBJXudASUDKEIEpUxmjfZqYrflOWAKZFfGnUbJNHiIfjXYlD': 0.0001666944490748458, 'nfWzayApGchZqXvMvfjVnKa': 0.0001666944490748458, 'qRnkIPRteKFATbJbkEUZSIMARvBUQIdgGXufZhhkvJyuSeTfaoGOXODrnZzNhSPIEtpaMCHPgnmpPIQdfHbsVOAzjBH': 0.0001666944490748458, 'LqpVRiGIilmegdUNAXdOMvbkzvZGLwZvzQGvNScXwbgLawnuqDoKV': 0.0001666944490748458, 'couOmCXeaTwaCHFnXSUYHddjaueZYHJaIuGbzEzfsuTBiwB': 0.0001666944490748458, 'njmKVfmQGuYMjfZTnYWdGqkDpHypjWBhoaKjPKLtneeoZNDJMeHqufVACZtLmZAJEuWfNEnEeCCKMuhHnCwKCGRHQtyjEoMpegALqikQQ': 0.0001666944490748458, 'tzsbDrMwMt': 0.0001666944490748458, 'RhCkljmpFcxtyUReWKlEbjWIGzvnyVANLIbguvMJFUlzS': 0.0001666944490748458, 'huHltyMiRbTcxUhMfhaKmxSLKySWbRfgqiQBpievhIANwEVsokaUiBZmezSQdxnVvQlwnC': 0.0001666944490748458, 'qF': 0.0001666944490748458, 'VoGMbilPuXlljqxKPMpqLPSPWOaNHkqikYonEfbDrmDbKXFLCRE': 0.0001666944490748458, 'sUwNRkWbUZhiirSqHbiGhdpSFkBpwbkCGHWUTtswCNIClePTCbLMSyJDNltKGnAVnkvPL': 0.0001666944490748458, 'vEcKRFsgxQWDeGaGxgZakcNzUYXoEzkfIuPNuQ': 0.0001666944490748458, 'WDMstiJcMQpMgpKbKSaFgRqjlsCUisydadKxlWkmsKZsnoskkuPnQtxuUhGKKMChCoVMVUtJsWCGz': 0.0001666944490748458, 'egxXIneuHSELJSutvGL': 0.0001666944490748458, 'invivzMMZYsOpYWvbNeeQFaRLOgJGhLEkSOcFkHZUHcqQhubYzqDuhoNCXSBvDysZyEbvVgpj': 0.0001666944490748458, 'ehoGTIGkRsrFzkKVLjlRCfoJLfqPhngbafCXjMCBzMWzbKGOpdPPqkiXFunNYkZUnOFYUTnROkDXQUdBtBiRfYmfFuQIRcNPtpMWFxVwBGEeLyiWTs': 0.0001666944490748458, 'xBYqKAbSwWCcfOKLxAWfdYBapPvdmVMsfxyavtsSRzwAaGtUu': 0.0001666944490748458, 'KJENeeBPkKXIJudysLgXfaTsAZlsGTCAilrmZeNwEvzLmYCsOjSnXvQQsjHESWCfPmPoNNKACnJzlKTaJo': 0.0001666944490748458, 'nkDDVvByVKOpMFokHRDxOpFdAQiVJuEGtz': 0.0001666944490748458, 'qCUIeUhAmqSwXjPqzScpYWiiZmYdpYVIMhGCnlagVdIdQDYaxnaBlYNsRpJNRblFoXFcXWsOrjydLwEZLnfzOoYemiACoBQTRUfjsOHAcjejUxXnDkReaV': 0.0001666944490748458, 'kfrPcFPjHodjqQpwJfzsWBdsOupVAykVGgTlJmrIaxjiK': 0.0001666944490748458, 'LZNPSnkZoruGYVmzXlEfbKTSWvQdjOsEBajDbsnqIRQMCwxXqHoisLiXEBiHPByuBtjviqKQmAoSdpBZvqGmofuVwRrglPuXftedCLuygSzXktAJHkUUHX': 0.0001666944490748458, 'hVKFNIHMfMTKBuCnCKMvxKHLvMiaGyfjsWJPnudOzArsfTsORKLgOafXSpdRimEIlDhcfQRZgwxPBVIvuPqaOBnsTMIBJVhQyPQRswbFC': 0.0001666944490748458, 'xcKTwzfubcmDrOHzgVCwgnbrExfPmiLzyuorolJhBhcRUxcMlCJMx': 0.0001666944490748458, 'uwfwzfhwWIvTHZgJTtLtTwlGTpZzWTfaXZqkB': 0.0001666944490748458, 'rUweALWHvDjjUnOhHVKnIMBBLKBGuUCCgKCWYAiuYVgUeIPWlAzJHniTmrMhIMb': 0.0001666944490748458, 'MmModdalFjdWUkowjpUrfSuwQWojnUKEeRiMpFrnGEiXkDKxTYsgZqGkpNqIFawwtczSFkRXe': 0.0001666944490748458, 'niubgCUnzRtEuLGmEHHRBbbXMuUOqoIhpxfvwbyhyQxKdsmou': 0.0001666944490748458, 'HZABbWLLHNxPDqxHXagsVdDqdqRoBQ': 0.0001666944490748458, 'IQeOBQUOkUxcFmioKHFFbsVrMuiTIgsCjRkuEOLTuhGQGrbknCcSAczVRFgubhaPzAmziAprKIwzPZFRbhsCgSelEZsBYrwtNypeoZrmLXawnkcFQO': 0.0001666944490748458, 'xtlryEQrnvLCvvjHRnKjZovakemPMaGXn': 0.0001666944490748458, 'kdNiBbWMDRTeTnHOErjLcnZCoB': 0.0001666944490748458, 'XhwdwBKqgrodHSKANqoVUvHqAUcFKRqXVzpdGzhNQxrMrVYWwvErQCtGgKgTXbNwZZSaAFsvIkEvlpnrWrDUYjLtNkuSViUKBNk': 0.0001666944490748458, 'OTeYbxQNZSEWULRpCntSuGHqrKsjlRXwqWiQDFMDlyuvkOOOEmPxqPknytEPxFlPMFIONESGhIvsZtDUFHCbUmaxvaqVdBefCZaeGIXIZoR': 0.0001666944490748458, 'fzFdmPlVZxkRTacKtYcaHSUKZHHItcKqEmvrxefxKtyEyLyZeIaSYclLALOlf': 0.0001666944490748458, 'cvLdCgHBZJXbFZEkWXixZGj': 0.0001666944490748458, 'dXTGfaPdNRfyrLUypHzcqNMaHQfPicREABZvqAjkIGBVSmFFNQyEMChCOxePLCBIYSDUllrUBToElBTqlzMKwXEjjIthVzOByHMaZjXTJvSSWx': 0.0001666944490748458, 'Jluuby': 0.0001666944490748458, 'AkNgyM': 0.0001666944490748458, 'ozdqsWhxrcDSfWUllg': 0.0001666944490748458, 'txuMviZyBCzstbqWurfoCwHKpphXGegWegQGoXDheQcRVZvPdflhKBgNMLHnDAfIwFTjWU': 0.0001666944490748458, 'vzhyKKbGmpGNyHEivSjDedHHYSiTmakJRcFSMfGgcGfDCgLkyySdcoiIfdrCboyjNfRfGrbeLBGAzdokvduYyOENmwv': 0.0001666944490748458, 'jzWYsHfmeokGLkcChynoVPdaUTDtDmmIzAeQrEztMNUTQtbEAdapzcvVnEbUsNwuLHjNNshZDgcCIyCvtgWOTXAyybiDaEAVPbfEBFeMUIpzxwhodMWhQZW': 0.0001666944490748458, 'eMBkNDhBoIQcNyqWpBoBozQJnQtwgMGdWJUjdTpfKaOfVBjSzuaHXrbkVsrVIzY': 0.0001666944490748458, 'UFsdmiGilxqStattrcuWABdcPYMmiLavFhcRAtvoDVBjegBzaShwYDnwYParzCcZwbO': 0.0001666944490748458, 'DQlUYdkXlTRScskIKTYqZdBqDDPRskNHGDxQFeRJaFQDOraNrWdQmdObVKwhdNmpxuibVIwKjJ': 0.0001666944490748458, 'DoosHcsdjHnuVeETHGonKLhQsfsfjwawgmOfRnNRxKLHwqpVUY': 0.0001666944490748458, 'EHKoxJnsOcJqdIcDcXiqEXmvAvKcdfvpLWbzDWPucxwEdrSebsbDckOedxFWbnXivLfYKGXKrryXJbbDUrsHornwz': 0.0001666944490748458, 'judVXMZpsiGriSCkEQfqDvQSRZbBjMbrFiRbSjaJawhyhsACgKmutsRIVLBueOFuSiEnGdiVdXWhHQdCYkKLkpouwdWDTyatGinRsDUlUeoEnNU': 0.0001666944490748458, 'ImmRzNJRPNPvvnYMpimiPJDeKgpQDEwJnD': 0.0001666944490748458, 'ISgCa': 0.0001666944490748458, 'lKielELwhEBuQUAUNedMcuNCdRnBHiICGWohvxPgxasCLgJlm': 0.0001666944490748458, 'xkIvEakjDlJZLCtWfRiugvZDeiXPhZfNnrRDuQiVgIZeeOqsAfkzoUhFuLWArECTzxGcxSHKPlxbrFwKuRGPYaLTPWubbndIwKqVvzHYaiUTm': 0.0001666944490748458, 'CMbdBrAOAqlpPKqixpVosPRXkLfzpVsyTxwIIKaaKrsoxNfbozdUhunBHqHanbLFDuWYYgWFQnrYWJxQXy': 0.0001666944490748458, 'EunuxlwWuLudjolRzz': 0.0001666944490748458, 'DYCFiDeYnMkFcqRqOBFlDBqFyiUOPpEhrgViZIMSdZuSLrWCnS': 0.0001666944490748458, 'wrCWvCljjYMlWCMHwOrrFoNSSvJiyKHxDrqbnCRjNtOOVGwbKTCjZFvsDDRtlygIrbEBGRDCqyGDMUPHXGKUchjKYRiMZJzdKnxIxPKAVWFSLYtdqvyRyG': 0.0001666944490748458, 'nsyhdPCFx': 0.0001666944490748458, 'UwTNQVPofeLXgeDLVCbkoiFRzFwUMnwadvXrNrXveVbRBIBWvDotL': 0.0001666944490748458, 'KwlhEzbTCYArrUIGwXqxBdXAEyVjbzbXggHOqmANTpEdvikqayNuPVx': 0.0001666944490748458, 'SsooiK': 0.0001666944490748458, 'vbMkXbDVLuksYnaAidEOuNd': 0.0001666944490748458, 'eAdrhkgrD': 0.0001666944490748458, 'M': 0.0005000833472245374, 'ofzRhqRssokNzZjlzICHQhlBFYHHTBqPcYdGXICIjabYNqlJsmEpAgCjpTBDkfReglZCjfPwyQQFOacTMDNMAJDYa': 0.0001666944490748458, 'kxSzhXlBUrctuImKlVNxUxRzQrFZnqBdyUMcxbWekoqZnBciQnqlrlHGvOcNoJoWDxaIfJKLxntjKzk': 0.0001666944490748458, 'gwtdXzoUpvNUpdUsfbLXJvLFurKrFrpoIMFqcWrCqdMjKTlcdylJgxEaqytjndvEeYXEPDjXgwfdGM': 0.0001666944490748458, 'sceihrZpZVniBbmjVhLQVhqEFBFuHPXczDtsqIaskUwwELgBgaU': 0.0001666944490748458, 'gCZYOyDrbQYHErFrCLLKaQaBPwDitohxrJwQetLDcrKKvJiFGPFerdwKNwkEsZxkt': 0.0001666944490748458, 'gVFSPpKhBasqjcXxuRWBjBndgcRQcPBlHZbmAiTsomzeMOeFLYFDXAigOLPkOoNMdUrHhIrqbKmZMDRyEyyXaYTtAmuvGzVJKswsnEnBPZ': 0.0001666944490748458, 'qTfIOstuDTXYrVFvHFciNqtPzbtCSnAQKDNfPnVsnYHWRdEiqzlURGalRhtXcuGFpJHbHhUDXcvDbzxULIqOTJcHjkAEhBDFyAtFTufajDCgkj': 0.0001666944490748458, 'NzJQiFpTTnvzpHeyEVMydmxjcOb': 0.0001666944490748458, 'egKlGGbdaMOcv': 0.0001666944490748458, 'rPDafJhoHxrAZMqIHPzgCTpvPFcVecHxDQkHSpvOdEftZbGkSKGcsCXPPSQJYASGSgGheJJHOMoytHUfQEvlNWfeZCcJkJVboExgJvYJiUdgqQUMXU': 0.0001666944490748458, 'tYdjeIiXmltnKpVUIXqDyiC': 0.0001666944490748458, 'TtbvNGjcsQfBDgUvPlAQgZERvFkLMSmcbapEUVNpPCtDRqjckkaegbtgehoHLFpkZyCSzXriyzufmMsgnbicJABtUDpzMAeSrGOLefwZKmyLbOyhI': 0.0001666944490748458, 'NNSMAfNzcxDnuRvanecLTbycskKwxmJDGqGWBGrFrwRFavzXnSHdjsUYivEBTcTLMGJHwYFKrpiKNafWdNnhqelmE': 0.0001666944490748458, 'wQGNTXiysjjthWlZaQuTAACEuIgcXJtMxuAZmfKVpQQxgSHYemNPWlKjlZBULcGXtNrXzNXfvtxYINVqEfuDKGacUGZmCH': 0.0001666944490748458, 'hrEXiBMZUNAPzAPLnOOVQRhKclTMJXryKTNeprnZhDFfwI': 0.0001666944490748458, 'dOSRwSFIWwiwnxgVBHzjoKHdHhgktVpZSDvptAoBFnbxagueyroYlkuLApDlPBJliZGUriJgHRLWhwwAWbEMBIR': 0.0001666944490748458, 'UCTCUAbiOWiDiKfFpZQTRBMTUBznUOymeN': 0.0001666944490748458, 'IWGnkdWGSZisJoTMDZzoBOo': 0.0001666944490748458, 'lOWZSsrKGnR': 0.0001666944490748458, 'RYAPykilsybxqPySyYRfwRjALDjiDjV': 0.0001666944490748458, 'sooqoAIVsJbRqCOlnTkzOMOVDhJZWeRtxhPJxueJeUgVRAztIFk': 0.0001666944490748458, 'wfvgLRqOhXMkcIHeoWiaTOWisTfyuVKMplrzbCZlXvwFMPjZEJIMIwqzwHMcWZPQt': 0.0001666944490748458, 'ekTjkElEGbBEHcNnOIrBgtbYxgqUoEQMEhTLKLcgFVAbCFIXwEdRTRfjkjsMDqR': 0.0001666944490748458, 'vyYcpyQYJwlxixe': 0.0001666944490748458, 'LGwPzgLZdAQWcvdZaBDcsAfTJhErhcoRKKcnimOSHiqIMSBKOgCgtfuggxENJkUNYUFFPIttIDWKND': 0.0001666944490748458, 'jcQYmAbuKObapoHLBDZxuxwfewCACNWOBdWLdzvtxmCuN': 0.0001666944490748458, 'hDIPwtUYOexWFuHPMmCYlAOLBoYrsjfjnnpEqk': 0.0001666944490748458, 'DNwAPoMrIfEyfDJYDlW': 0.0001666944490748458, 'WhsAZLogDAxcCidZzapZeDvVkbgmbIHiMMafNtYTokPRJbjmolyPNJWaCKqoLvLxNFDYTHLXLeZEkPesBMb': 0.0001666944490748458, 'eGbwBoTwsoOWzMLYrWMSBJksLlnsfAHHIYUlMKNiKyeOjx': 0.0001666944490748458, 'kjdeAIQFqhZKExEdLfePtJQldBdIoXpwFoWvzqGFCmOChQxIBPRhZWNWDdOFQhMamNPdMxy': 0.0001666944490748458, 'mnvPjsmIchRslSFgSNDGWCYbbzGzDHfUoQRgNdmTiYiuBLNcVOBlWtcRRuS': 0.0001666944490748458, 'TlSwKhQCeiYZjOdbbncJcJIjgNvNqyGoTuPyODHFtOeNJZKgrxldWGgensjRNJMhFpWSkbgTGw': 0.0001666944490748458, 'geFOzoLYKrQNifyIuZKTLFtMprgogljnMuNdzxvqJykxicaClhXNlhBQFIyUrVWyK': 0.0001666944490748458, 'FHqNNHerFDuWjHjvXNPCDcKjgmxiuShgXSpEnaikrzNsMwnvpCuKuQYrjGdeUFQoVHIaImzhgrggcbBSiA': 0.0001666944490748458, 'pklwaurzAUBImsYMhFIhLuFUnxUOWCxCRYCkYbVoNfJWyACoACNPVgikIb': 0.0001666944490748458, 'FjWCdDLGiGEjjcQnYwBLqIpqsk': 0.0001666944490748458, 'LtecrezMQkjRmjYSjRsYvwFcseuIvhQUTejUZzLFfJSUyNshzteQDpZTXblySYSshTaGbTDqhirTPphFifnMUuACbM': 0.0001666944490748458, 'RLTAvIhUdgIANIrBqCClUFYdsAbVpmzglEWLfqWgkHxjEPwRWrbHrnlaxwGlsakCObkAOlbvaMAAhCMCRVMCnwIafzjmatcqMeguwdssVktgTaGVnQOxw': 0.0001666944490748458, 'tlRkzeXiIIsziaMAwCUTubXPtvWJWanukEmbokLxCHHqjCUHlxFymh': 0.0001666944490748458, 'IkddMrYyelrsmEABcmRKmbKPOPrycCQSWoXwGupUXvtkklROVFKqcJePyrywUNJDDCAOmFashCWNjpSQnleQGrGLEOPXKQTkqxQzjE': 0.0001666944490748458, 'nCbWMVgaQvvrzdzOoz': 0.0001666944490748458, 'bpvOuzLWDWIgUYqzXnqRPUuhRNUysBXuKrZdTBVtRtVxdj': 0.0001666944490748458, 'mtiNCHdeVGVqsSvoscksRWngxiLSF': 0.0001666944490748458, 'BJksCSBxWWgRgPpiBMLVxSmfLrPXEVOGORPHIcJxiViaIlTqVtBLAeTjVcFKVRHojdymoFepSKYyPKkMkYstTkR': 0.0001666944490748458, 'PGVOoJDooZukOQEoFOxpSjelTHXzYsLvvlgpVZjnjAkIrPvAqlxblAYkSZIigNtHfGkczvpkfrQfVxNTtHbFIdDGSOlRzeMqYVVEikeglsikRcgLSZmowK': 0.0001666944490748458, 'TclUQMKVqRGacCBbDZWHfKdQhvxVffJTwOGMUojGgHKKRJfBUQWYTDsbKqKRabQGtQbBkVhCMLNZPiVjdvOtOmPDqbLqPJvJLcIqrweaRgece': 0.0001666944490748458, 'OcemBEMfPeMmxfEqPndDJTOTwBNCZDAWFNZgDrPAkKALNPrPsljXvrBDbhxjvmNvxwltssTeWwQVOFHJRnRZKJORqFIdvHqvxFLYTPTPdLvumNkHpAgHs': 0.0001666944490748458, 'MfxNzMfcjpWEqvTUEVbuZGAHMdWRml': 0.0001666944490748458, 'inSUnqhHNdqnFqZEqnJfgbHBfAbwvfpBgpmoXpFUjjeoTznHK': 0.0001666944490748458, 'duxdqIeSodGuHkLCVsbVvWdTyTfhTfVbsLSmfomNaudBbcRUnGXLTHIbA': 0.0001666944490748458, 'irLkgtHODoLNLPHXjamLmzBrbYjqr': 0.0001666944490748458, 'DBRbiOammCQHftkaEZSnezpWBOnsvTRjGZMwCuqApZRNbVoVXECNjYaHRPWQnnhFHimDAdPBsX': 0.0001666944490748458, 'CnZHGFlEnRnKXfOMqJftUaHsrGLLNDXlTcDXmqDaloa': 0.0001666944490748458, 'rlYwOcbeAlYyIyLrHxMghGaJCZmAiLDUuDyjyOLqZeeqASrLFiEsGCLWgCkODflMlkZsFTabhTUyVKAgbgkBfVHczFZALWpvgn': 0.0001666944490748458, 'gLPAJdUfckToSLoDYmeHxiZeSBBWcnghPwdzgjpFAoypyoqgl': 0.0001666944490748458, 'GaljcdUvUoaEcGrsGVNXatUwHOHhCJwJLdhhnbQDmsxdoRQtmVLmUoCpOGHmQNafKztKqogMpntqSp': 0.0001666944490748458, 'KwmFwgqVyopERsDBjSFwDdxmRhcZLMArrcbMNAtkbAMywOxREtASKZxlESRXfBfXfjOjWJCdLzzHNLvTNZIyAqqQnpLNiwuAiBwkax': 0.0001666944490748458, 'KgrcLhNAmHuHoaMtuarOHzGetOdbvkAmyY': 0.0001666944490748458, 'BFlozAqFHuazFVgQlKbYZgOpBNLpMVisKCitWTypjbCuLjQThlLYaNF': 0.0001666944490748458, 'YCmEvSlpQFZHSRpHySagGfiaPkmCaJXBXpSqWmrAArSzTsZOadrNwQiZrd': 0.0001666944490748458, 'zVntUNdJQxrfdKAbyEeHYhRPjvqwTZmXGOHWEbbTVhfPehbcL': 0.0001666944490748458, 'hJyoXfgJCqbjJZqBtKWHQybSPSLKQafElFsHSDjWBtWwfRCljFigDfLVOLzkuaoaBYgzyLgxZXtKYScPgaRTfsOpydMHZXDkKXaOVNljSWgom': 0.0001666944490748458, 'MsZuqHgTKDiKHihTdbjkidNMKnUYrYNvamHdGngdAQoyHFUVzPwZIfJsnPgGJtHlngRTOqNxqoiVJcCBnfjkrUobBqxAOZSvM': 0.0001666944490748458, 'MlrQWsKuOkaBdhoNFclDlVnXDiTypPQllW': 0.0001666944490748458, 'awHoBnahzBvipeSokdJFVxXZIbJXrghFFgWvGFFAemgaxzAbbRjrpJMhiwFQLoK': 0.0001666944490748458, 'cyAInyrXpZaCLFOgOockIpQgRODUuuUHcHMwTnXWHmu': 0.0001666944490748458, 'TYFHTYXvTeDzIlB': 0.0001666944490748458, 'qRUZXngekavpAJttEoHiWubXqAVHzzeTyWNzihaLhvEUrOdNdNYLLMpMZWKmriUXeGBsoSaNthxvtobNDSBExLFFKBssdKVGImGGza': 0.0001666944490748458, 'hScaiyrQxxHLGEUNiFChGmfSICxznxQlpfhJUxUaRr': 0.0001666944490748458, 'vGfKUcperUzTuMRxYXfdsQYqEkuaqdpYbzLwADUzvNrjpoAGdYsnOm': 0.0001666944490748458, 'vALuohuhSeqVGlRaZtwFVsretpMQOuFFIJIITaiPkWYEYDvrpMdPfJaRWYnzAPnUHLtYaVucLKmbZeveroj': 0.0001666944490748458, 'inyhhyqyNaJTBHZaRuPgsdxjaLRztQbucFBXxhNvOpGxyHzCPyulQCPuQbfoAFtiHVFjchpKlfvkYAUULQzWkWm': 0.0001666944490748458, 'tHbyENzZhuYCCkhNculbePuinObfEaJvEcPUWayvuWvjIUXsackUHWeacSM': 0.0001666944490748458, 'SMMbPzNjooLcHeIQEFDAysKsNSdmUMcfWCtSHaLWykOMgzKcVwjEqwTOFlcbVr': 0.0001666944490748458, 'keXvgDPWQxKGwH': 0.0001666944490748458, 'sbVXUgUrwEbsKOpEQFAuWJKOlomnIbYSbFfdndexXsqoTsTDpIJyhQrfiJUMgHFJVrisHBCyECRLXpNRwgJ': 0.0001666944490748458, 'CVBvSlertpHMSUYHEymEBmlLoYCWBqcOUIMhGhtTBhPCkFjthFphgvhVHaYbmGaFrYyKUGMpPcHsqXIfBRdikgxHQ': 0.0001666944490748458, 'TymWPaTewwSxLYtXDdzeIIkqnuUpfNs': 0.0001666944490748458, 'HwGEbNUWMnZRPVbKixhhbPKvGaPelgeVgiaXSAoFjvHMh': 0.0001666944490748458, 'CoOBKvhIGcmXpeWebWRseGNbuWNVKsARAuErBSKcNnuHRlDXMxKaVaNbMLInhqEaQdVEAyqktJZkMHHIwk': 0.0001666944490748458, 'ISarNywRrlXwSMAiEhmDCAKwhrUeOtdARzyfymQhfAEFbxuODlVHbcTjvJJFEFKLPNLKZHOcmjdZcDmGseEtnJgmCNBjPrjnruLXbyVEaXcFmBnQGImsFym': 0.0001666944490748458, 'xJGqfRagMDSTFLxGKLvZsZeQJD': 0.0001666944490748458, 'X': 0.0001666944490748458, 'egnFNgeqKJkDskvWqyaEjTgYbByEJfKHMJJXuXICoXQcENW': 0.0001666944490748458, 'gZAPDOeYNSljDkv': 0.0001666944490748458, 'tw': 0.0001666944490748458, 'fmLfKigFjfhZrpwoSYcjaTvBbDxiFLRtbtwhEoLcwDDbabjlhKJRmqVOrgZxxREAYPaWlRkwktluBguYcdhTrSjOjivTDY': 0.0001666944490748458, 'CusNGtCOqcjguDXSxVmekQJpkovuXmlvcHpGdVArnpqinNgRpDKvIJAFhZnGZouIUbKIuUr': 0.0001666944490748458, 'tiGPaCpxlJaArx': 0.0001666944490748458, 'BLnnL': 0.0001666944490748458, 'qmUVwND': 0.0001666944490748458, 'vcQlErWcOBFPIMGWVfelPpQySGmaeniaHuSUPHlYzmyAWUBgRrPhGFKwznbOwJGULRBHAl': 0.0001666944490748458, 'uVuALDDIgyTdXjSdajJDukwreUwdrtBIwjwOCJnPWXycKSDoaWJlsoLHbZXizwloylxxlqP': 0.0001666944490748458, 'hxldcGHUDMMdrhytsYWUWkk': 0.0001666944490748458, 'yLevcYPQwrYslIaVFECnPpkgVRMESIoKNYxSJSCWzCnSMHfsAGcLTygNvkykUQWZtckWAlCTXlHpNUmBwKOqybQ': 0.0001666944490748458, 'UcEdzCzOzoqysqelNBuVaVwLIGzorbKSWgLdanSBFzFzdbxOhdRlxYitPVbmvFyhRZkJdfJvOpwxfyKjMZUDwdoUbXZogOTStVetLeBidrdeaHqmqjd': 0.0001666944490748458, 'YeUhaGeHuDIMVnvBFeYhpojDUdumSX': 0.0001666944490748458, 'mpgHF': 0.0001666944490748458, 'BQgKfhCQWjkTYhX': 0.0001666944490748458, 'LZgYoRsoLzxIpyFrfXnjkXolNPRDrBSHbwFmcgWJoSOulQgHnbosvEuCGjahFlWjwBNxNctjUvEZsOuniNXNYQbPGmyEzpCoiiXpTAcNgxW': 0.0001666944490748458, 'uVGGsTZblPyXuvlqYcTMsQwSKTLyHiXwLPDbEPtQVPfENqaejNhnXYoQKubUpXxjJGlVKyKyxJHpoPQWMcvbrNnmDCCsLYWLuBHhGp': 0.0001666944490748458, 'GLZCNJvZNXx': 0.0001666944490748458, 'CUaIizkhLppHIzMNobNHumvwQrlMzzxJcdoyJHTZGPxFHhfGFdVqmCgwZSPOHxJ': 0.0001666944490748458, 'jdkvUiqGpfFomM': 0.0001666944490748458, 'cyBEhsuoZgFmNxWNykdLH': 0.0001666944490748458, 'SWiYvkEurlQFydSaNDZDeZLnkhNrroQAiOCClDLvqHGIuSBSCapKOVbCGYNsGwNPhNwqMKWjpCgzgPbxDxFHHNHmJphvbbJLujYkDgKnvWYuTy': 0.0001666944490748458, 'dlNmneeawPUYYnbRWXYbiIrMlsJQSSLFWUtDRbbvZTuonWNOwJFaDqjUnFfuhwvOfrcWnOaqjiUMCR': 0.0001666944490748458, 'oyubXciOgYhHkMMdfLRxkUEGZKZEMvBUPhOCmwKXjyNTahGhJqqMwQlfLvdtTdB': 0.0001666944490748458, 'YK': 0.0001666944490748458, 'HGkSrRabflnFcxgvljOeTfjuewZWAsnKGAEsRWVRJWBwXqwzXcgcytwIxVBAiZWjM': 0.0001666944490748458, 'cRSPeiCCRmRTxhAcMsgItrlrDGJBLYpuracmJETqMxvUKoERxAlgstMiDRDqJznRPDXjKgbPpxKQiYhwBmAEaRoWkEFFRSpVIBhOTGVVbTQSYdYzGaGDdor': 0.0001666944490748458, 'EdoDOimYsubsaEjaVzhhMaFpC': 0.0001666944490748458, 'ZxljEhcjRLw': 0.0001666944490748458, 'PqhkQpPgJzcxGNHSGkelrzmUGJFkoIyItQ': 0.0001666944490748458, 'danXZBrIJSPZeTJiqgzqdKnagdtPmELyGitzNAfXyPSAIrhYWMMiypTlucoalXyGZchxygJXNzLxHwwjFHwLHaFuLFVgTFnHq': 0.0001666944490748458, 'FKPNcmjLnbOWTytAPtDPLkMoIfBSNoWBstaHbAHrLggHsAsKNuAZIPuNGwDxBTgydPLllKFOrdgLjgPSalkMoziNrdmXGEendelRDCRpFnzWFMMiHpbMC': 0.0001666944490748458, 'ZXSZHJmpxfwqQGxkSspxjBpvUEYLNqOQAntDIaEmKYgXtRNCQGZMbFDTuQkPCpWcrHNIsZKzfDhEgAZZdXAKhKKVrPSpuUG': 0.0001666944490748458, 'wPVriLnbdrfYNAULupGrOXMcEJFVZrvRctwOcrGDKwQvdmTFfvTAUZHitKHjvTexh': 0.0001666944490748458, 'wQJGvF': 0.0001666944490748458, 'QtwWdXQOpXoykVEstBWSe': 0.0001666944490748458, 'flqcDHOQgvvqTBUqMvOpgDNPBkpOdBzuDjjmHvudLKnSUuuZHGXrqcYrgHLMJAYDKMQQbaNOdggUW': 0.0001666944490748458, 'fXhBtuJgHAKBDVR': 0.0001666944490748458, 'BDLkYqqqFQ': 0.0001666944490748458, 'LpkVBGOqcvfjPtRQwEjYrvBpOMlwNatlhmeczHdmoPDNRFaDsjGvoDgczRENmpIAFgeNnDJMfHfXUcwIXbRJKnqgirMWfKTSUbOZfUavBcmPjDjJX': 0.0001666944490748458, 'kaABUT': 0.0001666944490748458, 'oNBhnnVhSPTkZYhHPFMLnlQEaddnaijrWiDLFZpSrmbEsWRwKE': 0.0001666944490748458, 'kbmsqVxkGXBdGZCFwrqFFEDHivakGgPHpnPlqHHLwtWJYtsJBkQzVdzRnBnYNTKgN': 0.0001666944490748458, 'axBxuDlLbtyIByxxQgVNQjbnhbfiZiTCTjjfTKgLqFRbBdOhdNaedXU': 0.0001666944490748458, 'BMvmEGWORiIvmmRdZkfcNhrKLVMIEHjGGxhzNAwJccluTxz': 0.0001666944490748458, 'tvdfDCPQynrHNEVKSwHQKielysoiZEsexcXlPNMiXwzCAlpScYcVWViiklwCrRCmhdNRWD': 0.0001666944490748458, 'FJOFfOrGZfUEtyhFXaTANbOjEpUbounxsopyAWKYYTFcXnzGROVwCpzWiqZlYgAZLEtIOQwHrMSRAjCQqKpnBcXQRkVXRGWrDxpgNeluQGejFNqptMvftCe': 0.0001666944490748458, 'lAKzMnvSSgDFesENQAvrYphVjCqvtNwQcamMbDEOCpqSaXftDFpxSweBaIT': 0.0001666944490748458, 'nMSCxEGHcoLtERdzkFWbBcxuXAqHHIPSLHDxmRdPwjXfFnpklMpbjgsuLTNPGZnSTutgnsIFhezEbXehNPqCaHI': 0.0001666944490748458, 'wgDcHZivqOiWqaqMfLaPZUmecJRVHJYvuBsNXUtyCCTiNfjgCptsYGtjEPRPezUVanHzGKOUQmtDfGvmX': 0.0001666944490748458, 'akjozhIbpRKuKawSwebTkXCxoMrpjYfyJhERhQIIgTsFdwcMjZcDpDUTJkjlhqnqwqGCezKSurzhHAOVYgCwtKkdBVpAmYtWuKeBjWRztvIwnJeQUxvsJf': 0.0001666944490748458, 'lTdwDxBsELsnRLbmYzYHSOOOZRoBOdwYxZgtBGUuFDMuuzBziISBmvBFIEQNVvutiIFqsWzipxrkwsXyXOaVQMptoyHgMuTNDEwxjchBQ': 0.0001666944490748458, 'NPL': 0.0001666944490748458, 'AIiKfXYFtRYAdgqfqPgkNgriDCDMrtVdntBDTVgdJSyxRsMMUebjHARTQgJdbpAWKPnTLQkPfwvPDCzLpaTrARCplxfGTqhqieEcufwQI': 0.0001666944490748458, 'JRtGOSYQdogwaNqzmUaTftriypRDEaObo': 0.0001666944490748458, 'uwqSLDmYURPgCMxUPLSqinnLWQMBscbVrTXumZRIbhFiYhrDXzE': 0.0001666944490748458, 'cBQEYgFaioQWLOOzVQwCySzzCfEpzopTGGBpdhGiKwoXTDocgtBfnpgiyAKlJCwZqDVJsChxTGDBJwCByfzGBUDusROHBFigb': 0.0001666944490748458, 'IdReMkveUQTxnMfiVtFsBUyDEZhuZFLpaZLZqGXkfNfTQfIBIcHuJ': 0.0001666944490748458, 'JNyPcaPboJkiAhhLLLzlquJbUaaVOigzSqrWXvSgfnLhmRBKfDwDsmLbVRcICkiKWcOCpXEqKNNKRQUPsdDpXbJCHdyeybMtcU': 0.0001666944490748458, 'fAtesUmPWoMJuQqaNAIxIinHLlTLiVuNIMEvQrDeddNUiOsTtVYGlpFLQBZOyeHuiGqvepYWSpaAKvBnTALtJmZxpndiftl': 0.0001666944490748458, 'zmX': 0.0001666944490748458, 'UlRybZrsMccpOVbmpMwElmXMpISWlWdTZogryhGgjGXZNUSTLtCfgdRCvOIGNunpmneBYobdJgwxZsHFCX': 0.0001666944490748458, 'NuCXUTqQart': 0.0001666944490748458, 'qEZPNaoWTZEaJnDlLWxxeqzhEbayfaEWfbf': 0.0001666944490748458, 'yXTrPHLWbtOThxGeJHPZGONtoHOQzMQQIOFAztrlEGhoBOhJLJzDsCbTSSzjhT': 0.0001666944490748458, 'ZohqleoFzLRkQqqzUWIliChlhhJdzrHpAfyjaXQVGfcFfpZNcYiUApsrfvMSlwtlVAcfiapwffgGAGkbcpNKxdcXKBcbApQeqcuoV': 0.0001666944490748458, 'tOagiMwNFWWjyncCwKXiaHonNvuATkEOxrjiotjsaukQoYkeqsCgooEZNGYAHaIgk': 0.0001666944490748458, 'dnspbeqsdzxDplAjMcFmcoAjbra': 0.0001666944490748458, 'fOvztdADoemMPB': 0.0001666944490748458, 'rcpYrKHJWTpwHSkCCVSiXdOCEofeQfcLHQBNiMXnOTyAQRKRBUnuRRrMhnr': 0.0001666944490748458, 'lIcSsLlivNiyTriLABWSXONwJDPJjzAHPWHdfEhnWkhoHJtSXysvRmfiErmdyQuSbnNREhBydUtDdupyflpzxg': 0.0001666944490748458, 'yGGbyBrUIMaiDUd': 0.0001666944490748458, 'ZtjUyJaCuwElluZCKqTdMCrreYZOpWpaHvbAAZAIBAkPnpclWKWFbswaEYCqAnDbHBdwhVWINUEvzST': 0.0001666944490748458, 'VGErdqeyglcHIIgVmrHVNgbFDotJPoJPpBCiIIyKjBmbfKmTGNdSVjRaaMttpMIONdskXFmqavwoemEumrPfDxZgDEUEpubcHhzFOFm': 0.0001666944490748458, 'VMxknNZLgxWfRcxAfRmnUfyqUhccUGqNUAmEExyFNwHVZukjMILrchAMNGpexUkJngeBpEAnFn': 0.0001666944490748458, 'HNrarR': 0.0001666944490748458, 'ZfWqvlYjHOgrgXOPHXTCwHUmRVVzZYkDJwUfjKnaGrQTXGAvZesysekycVSDQaHIBWPPvsORjVArl': 0.0001666944490748458, 'lNiuYMZaXuYsCchiLoJpDviacLWQrYgzjyVLpokeWbJAOcIVszAojMfwQuujLZAVQzVXRFOaGNjHAYZRo': 0.0001666944490748458, 'WgDxvmbHBcwxqAoXdeahpgPxIfNDeKn': 0.0001666944490748458, 'aRLPHsYziJXBhNzRqsVscyULVHnpuWNjmRlvZHlaDleAGQQviafDMShCgjiUWDuzRslZvnUqdFescdRQWLtZQMWpCMdNcxSblXqLFKBPljXMpyeFP': 0.0001666944490748458, 'ggUxSeGospbVZDqBOgXoxXOkKAuTsSfKCliRZcSwLOJHioiDtwhtSgcVLQYXyQKgPOthpIMjeccXzuWRJxiXxkdesDXQmhmnNeO': 0.0001666944490748458, 'GBDdxwNzlSyvxHParEowMkBMBQlZaXpQXKXFrBQGGdYoApGgILyoQaPwyuVgUJHfBtDBiWYyiTbxQTB': 0.0001666944490748458, 'LQnKKTubYybvWkUFcrviHZI': 0.0001666944490748458, 'NEJpIUVkBMEfVBOPlUZVWvinlihUEsxKrLiJFrN': 0.0001666944490748458, 'LlHOdIbeymKMiCWwLTIRCVHWHaKqfAXILjSiXitFzSxlMSeKPtQYDSpCsenGOupMmPPeDrHZBHYZmS': 0.0001666944490748458, 'xgkqZSkIQhxSS': 0.0001666944490748458, 'SXuXOLtdvFNBmvoYpSYGOULFGmMHbhgmkWrLEbBoxdbaWtNDzXlFZknzLQVFUrzqXswAmOQgPWWXpDEHKxXIGkxQQzDVfAOpDKu': 0.0001666944490748458, 'obttMRRrRSitYNKyRgVywReMhMxIPsakmxtvUdOYWKHPfdALOAhwRWQymQWRaafXSYLIkqkaDNFFiRlbzrtVe': 0.0001666944490748458, 'wiYayrGhXEJhIKlNplTDqVjqSLdFdnLevfEMHMjNJkYOxdUPXMIHZeDkcvpaJJtrPFSLHuDrKLXypcNgFnZVX': 0.0001666944490748458, 'xPbqCkygYOwspvAarRyznFWDAwyIwOzPPWezXdUiWOebzfnVtFPFOgzhTBWGpcQEbkfLEuDKiuewzNvlmuZMfGTaBjZKwg': 0.0001666944490748458, 'jJxnXLGsaCUKDkRSsYqZqZFgQallfMLrEFuGCcTOdPzfTRfyAjtNmJVCQixTqcYoqdrYWBLZGgMPbeLSmHQejmyKVKQHNPZIzlsWLlrlEBaheEE': 0.0001666944490748458, 'nWsnXyCZyCw': 0.0001666944490748458, 'ILJqDIAzIFCVWCzPb': 0.0001666944490748458, 'KakHdBDtXEyQuOyySsaCrKJGyfZtdhoZRRCpaSwYyYBXSr': 0.0001666944490748458, 'dCSygiffinpGyXHLQSMUATlnGpDRvFosIpmfBCPjCJFqbUrNslOHVsTuVLjTZnUSbwyjANNZhbBgYnxZIiVcyVETItJrRdQLlcbXLPiUVm': 0.0001666944490748458, 'pPUyQJRNmAyiwiZNwLMvNazoIBLhWenDiqqVMjMnjFNICIyVVnEevIlsTAqXmlbsiKLmQKajVtbiXOpmVpvFRKZHcFaRunVGfHUaOKxAzjoduxuKpcO': 0.0001666944490748458, 'BZECcRxjvMhGNjonJbARMsvhlxxDqSAKHnBIUsGMACILZpDrYyMEMnLJkEsveUElzjeYwAiktfnYKuszNti': 0.0001666944490748458, 'NqBhOhvLyumzEQSbxpFJFfuxIpjGBTDIUDeLwGBaUotIONQxalepKYKpSXyMCJHZRqHcDIMLVxrQwsmPQGPdsGMFBckIpEHcB': 0.0001666944490748458, 'BaRubevkzSAExQLavQqSmFiaymPZvXeymnnWdfrxXNfGOTNyaMOXnAjhZYEVNcBGzeGzPY': 0.0001666944490748458, 'HtvBgqkPmLrsc': 0.0001666944490748458, 'PgggUIjDaWZyXarMzWzAOlZLqqjScjfIdpcTTWBeKitDmQCDKeKsICFgqDFYmRSDKkK': 0.0001666944490748458, 'bHTzYIQziRzjEcdmMlWkoiyWgpOLqFouCWHPtHTrOoCVtTIXoZRGoUOlZuwiKQzYkxD': 0.0001666944490748458, 'AqSqqijXgAEkbcdoeANnccWafnzzjlbVbKGTRrUAD': 0.0001666944490748458, 'WndZxpParIYvRYKSreprQIgoXpCFJRzoVnPmLeCkYEYhUXjtYcCyQBTMfYPnKEPdcEPVGqhqRqIGKngnttWDAShPLYpAOrXOGt': 0.0001666944490748458, 'prMGazlRFhstubyMPmvNtn': 0.0001666944490748458, 'YVGLXkApzPXWAAXMUVorbTuakEiFthKBKxdZNOzHatLZOoCftNhBJIwzTGf': 0.0001666944490748458, 'hoKSbxshmvPaGtWVKtBOebjECndQLcUMCFMmLAAwGTXRWvGBwXoosnpUtUrBFqjRZSiupryJLCTJb': 0.0001666944490748458, 'aBMisalIxtWGtUvYt': 0.0001666944490748458, 'tmoGbAxraKiZZNKJLAcQu': 0.0001666944490748458, 'tJTIvIhYnRPCPHvuOJouBHFNVKJrTbieDrzHJupVMSWYVqYNmquWvuWiVHyGVdHkbWVZUjAVsOL': 0.0001666944490748458, 'tpvsXMdACwlpCZifAQXrJyqaCNAqQuDtLqWVXzlhQVyAjeillfTiHjBYacaWcPEwjxaRkoaJIQ': 0.0001666944490748458, 'NfmpLOlDQKVJuzLTPjDohEwbvisBWDcjZxUQewrsCThUrJWtZlHEhlBFENWkg': 0.0001666944490748458, 'eYRBXnwAciPvRVPyWWQlrJLWSFTnIMFRyHssrcxQgHtSa': 0.0001666944490748458, 'RaSPPOBOyeLypswLZAtNbWTxfosMWnKVBreKN': 0.0001666944490748458, 'MYSQCUtZfBPoWCPoYDIGOPtWcNdHNExUEBVLiBOAXjWUSYIpiAMMHzNtCGtstVIgA': 0.0001666944490748458, 'lhvtttPCryriUbDcnZaWDlrvXdBFIfFoADYrizcuvdF': 0.0001666944490748458, 'LDxmwoWBZywCxXIRDVbjrlBVzMByHAgknQexpfZwyHpQNHCrzUPnoHrGUNxYatRyflivJtETAjmJTxEPHlPgzqQHVsKSsuAijOyTQKsEohP': 0.0001666944490748458, 'YNrsnfRutmiBTVlKcqfbhCP': 0.0001666944490748458, 'emoXAiKDnTHZQDHygiaCIbVKgFloFRdrAZQJAMpRemYvrwPaxGVGQwkTuQWAeDSdrV': 0.0001666944490748458, 'PUoyHJsHdosNIkyLJbpLIEhMJMyBuSEcREYdkVcjQsjfAQFMV': 0.0001666944490748458, 'KGlPnodUpnLDktm': 0.0001666944490748458, 'yOsQvBHqpOayENXVsxLobRnyRpvlIEyQlU': 0.0001666944490748458, 'Z': 0.0001666944490748458, 'DMVPUliKJYnmVbQqaZpBKHqbTbfxKPToyDjnPJkdTekjIGkYdAoxAFNDecDkSEsLtFMGlioOqrA': 0.0001666944490748458, 'uECYfpxcJXwgPMGvxvNGLmmPOi': 0.0001666944490748458, 'gbcDFbWyErKqxUMZUatbFygbxpmahFxzQgYqdStfYyj': 0.0001666944490748458, 'REzNHQlMHsnzRkTfVriWqrj': 0.0001666944490748458, 'eoJoKjxLVYp': 0.0001666944490748458, 'imdODQ': 0.0001666944490748458, 'BejiLhkhkPVhKmJPGCIuhVwDGWEHXre': 0.0001666944490748458, 'lRdVQtVJBQnvlvjculaDkkpBjhggrdjIAhZHWajCPygKkKKbvZjBEYovZNyHObAILznVhLANggtfBrM': 0.0001666944490748458, 'NADXYfmmrwf': 0.0001666944490748458, 'TxUZtMnigqOamQsjECmLaWqyBY': 0.0001666944490748458, 'QOPfAmlWTdlnxJO': 0.0001666944490748458, 'bMUKKEzzx': 0.0001666944490748458, 'FNOxBOvifaLitdCahnefWlPFrluBitXTiIuwKHKKTQsQYf': 0.0001666944490748458, 'eDtFTPIkIFHRJac': 0.0001666944490748458, 'RxynpNFFFScKyVOgPEZABLuohDrbKrcPLiXGMUPlBjcxzDWLWDvkUlCotzoyByuaIKtlVHGgvMbxoEKqNezvElyLcl': 0.0001666944490748458, 'LWXCrvCvycGwVJiIKCILzkaOyIcQfeKLErBPyclqCAFFMwuqYHRFBXAXXrk': 0.0001666944490748458, 'rlpDSjYRuHPLIjqkwxgyZUnYWbFKUHpVsEJQdaHivW': 0.0001666944490748458, 'guCnsmMprOXxCvSZmMUIAnZsvtaByxotoUIbxBimCukMT': 0.0001666944490748458, 'pvYDbjQiOKmVzDqwqqzrTJaLNIUVtIcdQuvErhQoZAETRPQDgumdbBsmGDwDVbnKdiOrcSxsujcggZDgyioiLhSLMGqnfRZ': 0.0001666944490748458, 'tRUopaqpOGDVRGDasTrObthqCFZQktZqPRwFZBwuFaDvWzUhlddimgbgkVFlhGoMiLBjPqHlmAluklTGHdGLDkwdGIR': 0.0001666944490748458, 'mSOvzLHUCrhfuwEpjpMkuHqRjofFLtxplV': 0.0001666944490748458, 'tlKanlxXEXPdpozFCotmhESCyzaOhfBlHiAfLpkIYwUhrZezFkRuuxFgrfeQA': 0.0001666944490748458, 'wWiqUQVMGDFttmlUpud': 0.0001666944490748458, 'IRGieZfosCIFLNsezjzeNLDVoIFrLZuuoqlGoSJoDxslevUfxIZHuHjcagqLgJHHAsCNAIsTqphHdNxpsQCIpMX': 0.0001666944490748458, 'rZpSPrReaYMqbwSPKQtjrHbvJlNFFibQdhzqSPfnFKzpcGfufjAmYqTzQBgkVutAyfjRyIPQvwSflKjDDEMIkagwoKkLsUKBkCCKfQsCHPiNCA': 0.0001666944490748458, 'Ahi': 0.0001666944490748458, 'bDjjzAmAUaMPMGXtIgdQHOpwTrUoxzkiLpcsFApbvhnQTbhpvKEFK': 0.0001666944490748458, 'wMufoEfxdSHGugdIARSRAHXmksBudRFmCgherQDIpeflvmDsXwdhiUoQrihKqAGplrRZCtYqiuhgClVXMarPJMBBsSaFNciZKy': 0.0001666944490748458, 'YfcLklhJkisXrAahwptiPsIVIuAZhHAGmqxXTwdNymMsRmu': 0.0001666944490748458, 'yUBOCWZLURqxjWKrnwYxfbxoyWLRktWJwOkjYGxTXJWdRNfiSeMQeXqbZipDKYZLOlclaSpujLcAFkOxVMhKrSmosKUvEEuvZh': 0.0001666944490748458, 'HXKiTOxpzqckxWTnvLQBzQjAWQFLXomQKxjOFyqjgNLggYaedMkPWKrpkTK': 0.0001666944490748458, 'vYttSVsIUZmAXESYIKhzzoQBidigLkeaDPHycOKCxjNxlyxUNfnUrOozMvdqoEQsXunpFbUbIthiHPYKDEUUVOuwPXohMQ': 0.0001666944490748458, 'tcINtURqyYZeZuFArwoaIoFEIvSBvalQaIxZiRFwFmPWhSKdnL': 0.0001666944490748458, 'snKlxgjGQuQyqIJHRqOUITCHKPLtkuanQTglEyKyF': 0.0001666944490748458, 'vnBudgkpNBaqOYpCYtgYXcM': 0.0001666944490748458, 'hPsfpTrrnPNkCVzNHWpDTGowCYzZPNWVKSviVOYpLyfNFJMAruBLyOMkIhhHuoDADXhcJkbTwvgcpmkjrnxUtSBzSMaZetfQoSHBvUc': 0.0001666944490748458, 'OrlYhKCWVfklLOJaIkfjKsfxNeTSpRJUqDfHNiwHtBIJbITlIhpavBkkNlLqOhhubQdDmtXEgkjkhOoLVLBEKbbRV': 0.0001666944490748458, 'ZjodesTBCZlQIiHiOBvFRVEEhzZWJzDCcxOpRhgMpdobBjYdQsqRZbmwUsmYgvMGwBAtokmxOEGyovdskQKZebZ': 0.0001666944490748458, 'MUqthDegTPvvESEHR': 0.0001666944490748458, 'WFxSqFgLagASLHo': 0.0001666944490748458, 'xRYfpfgcdcAZWUoPNahuTWyBKYNeLHNYvJohUUcpvoyPjRKvhwqRpUEcyJQoAbZucbpDwsMMYqqiFDIQfyZNJwDHGIWDgMOiumidzdhesGeDOlrZnJc': 0.0001666944490748458, 'DLCGdeEozSfcQiLloBULthDHGjAfspOAZOPqElEpTNssUUSBqpoRZAfbGAeKfwKdzExnfxXxaeYkeMGjhUoZZryhuhjgw': 0.0001666944490748458, 'IeqOYqQHUvHTtzhiYxAdGeDUYQIolbrIeBhjTGczDzcxMguVtiuwawndiarvCYjkxXw': 0.0001666944490748458, 'qmEtRNrrWGmsXAvmv': 0.0001666944490748458, 'CYadzFADPnlLPjNBiveARWNnmOHSLqhlkbiSRiTNUDDJhtDkJJJDiYJfBcwcHAjEbvqWwbqHsFDsr': 0.0001666944490748458, 'uyQJOKztXKsoMIpzBOnGGnDErphkLfKdsgPih': 0.0001666944490748458, 'EkZSvBmnzinuMyjMGYzUDTfrLzWKwuxmkXoVAXiYOZoZbvrQTRPxfPeihpgSWaIxkjmLvCljoBpuFgsBdHU': 0.0001666944490748458, 'CDoEvQewzJYtGlAceqXGcNupWvBwENKIdk': 0.0001666944490748458, 'zCrlUx': 0.0001666944490748458, 'BLRcwDlwoVLezBGRazmWRwJgKwieG': 0.0001666944490748458, 'KxeEYignkTpYFJTrdtUbTScemDCGgsvDTkMqkuQgyMxrlXYLjVMofAJtdEUCXNLUMgTVJStILczcqFyeXBibgffATuNxUtFSRvkpVltBY': 0.0001666944490748458, 'tup': 0.0001666944490748458, 'pVPTuxNByUPKGibFJfqMopnJFLUwjvUuPAjboShnksAdlwwVuficRewgqQHJRvYVDlaOWmWlBemZPutNtzXuGJxVhLCNulEStxqqQCeGTEqaANu': 0.0001666944490748458, 'cfoglRnMjloTqfnDAOaOCjeomWPHJmQuFsgjwbfDGtsQEpkqkHtuRZqvePGPdkIJIpzrPZiVHnBMDpiHkvYXGjQpfvNQItckY': 0.0001666944490748458, 'mFjITuTjdeodWETIoNRDEZmcAYnIBQbDjjJGlhcxMLssSaxOBvfFlIIGzxYvbUZguFzgT': 0.0001666944490748458, 'ssVBnEGrwXBhDqIoJK': 0.0001666944490748458, 'BKPXBYvjeLggZFftvvtbvwLrozkKZyTheYqFMlkqZaKHIIxCvwZtQqcmaQILIfFbWbYmhcUBlhAOXDRXLHWeiJPbxCokcxrCUSo': 0.0001666944490748458, 'vvFJrVyApTvOnpmxtgGOVGDQTvrmCKzqqblezMPBhComKDQZzARNGSDxeBnAkISyGrxNCzhlLGSyLJdENVqyqcXdvsPtuxxkNei': 0.0001666944490748458, 'vlSjqGweDKysNxlmHlRlcIIsFUzgsbOARyeDtubKVfNbwQoDjxFWQwBhChocbeuaAetvUJgGHcmNjOdexgwmEhmFTHGXHKHuwTKitrh': 0.0001666944490748458, 'ApATOQRYerFFmOAQPnmkCQrRugwGsUdCjsAIN': 0.0001666944490748458, 'IfwHtnpyHsI': 0.0001666944490748458, 'sfIFEePTQYLVMwwmgotOGGJpfitqhOMEtrMtXhc': 0.0001666944490748458, 'WijRugRodDbSFNIffSerShhIVyufaZJLnvHKhNYrmHkvembBDNORpHMVoClIHrdyjPrsPXKzcWgwXAzjTve': 0.0001666944490748458, 'EgppLTmsXmVywdOfshuiGrSbcGQGjkAmQAHFzkfuovteUDjdfBLPbbnjJmOfJhQGPjLxaSCuIrvMjonPAyyNSnxIsMrIYJpobLCLoJuIRvYbedUTjgnxK': 0.0001666944490748458, 'fsDsfTaosfdPokTCwHZloHrivdPxNbBMmkGncQvPIs': 0.0001666944490748458, 'nUHQKEAdVTaRTHDkFtvYwaQKnEdPxZvqsghvCV': 0.0001666944490748458, 'nnZSjHIZbEfqjLHhanKCaSKyggSFMVKWqAtKpeqNYhnwkvGFLdfUeJcVvDR': 0.0001666944490748458, 'UrkmEdUzIYYfywRglnYWvssuDnfbecZpehcsAwPHmwmtpbZRfWuiBUVXaHpPZMOapgUfOjVUeZtfSvjIibhkDk': 0.0001666944490748458, 'KkbmUDoJzUqawyNGzvPpNAtDZtencXOBldvEB': 0.0001666944490748458, 'HaraKCJkrghLEPumHEjwSVBGYLMgDFZSKoYsNNYIlHZYHqupuzvrXuBSOPDOKeh': 0.0001666944490748458, 'FLGBUZjsiORjAgUJlcNMtndmpgsZwTJpwOm': 0.0001666944490748458, 'kUIXjupKWYuyDCDgUoARpjZzEmitZWhzN': 0.0001666944490748458, 'nCxXLvEHeaNlvcmhdmVoIKCULlgfYeWrwdQbQyorWSIPKduCxQOrbmIBnjTbBUYoPWRFqNtWSGDATZxPChnVHpfHLyMynqaCNreeAVZWzxYUYh': 0.0001666944490748458, 'eyOJaN': 0.0001666944490748458, 'AirpcjdbqSSvBTtrmFNgOmoiOQmYEEuvMXHWEsXFhoXbdZOfZZxkgxBiAGWitFjZlYioannmhHsQiMbkPzQVFSFHPKlYlNMOLwpoke': 0.0001666944490748458, 'OJxBDQJvwwVinBNYlYVaPKflDhHTpCRcbEsGXxeCPwNGbsQNIRuSTruGSuEjCnzcfUEFiIIzoeRnYDHYRqk': 0.0001666944490748458, 'JTWHdrauyGDjOEOwaxPPbmPyFvSzcnhKmdMcuxbLbDvYGgjXQCAWNKepWIIMJPOxppBfaOfRZqKBPrQwBJtKjwvVoTLDmlrPkljGodQhPcIDq': 0.0001666944490748458, 'tjGXChvovJkqETicjUWVFlslwCMeoQrCfGvkFGOznVdiqlTZxkVhAAEpeDdEkSuKVabUwFoiuzUPVxWrZqCkjERGxqKtBEAJlYKqGmuBqBC': 0.0001666944490748458, 'VtPqxNFgknggGvRkbyzMWYaUxBLElLlTeDrpKpGWDOiBzBwbzvTShoxcpiDiWOnthZXzGWuHYiIqWiL': 0.0001666944490748458, 'oNPNVtPVcBaUTecekkJfJZ': 0.0001666944490748458, 'tO': 0.0001666944490748458, 'h': 0.0003333888981496916, 'KBDkodkoZwXPUYrcos': 0.0001666944490748458, 'LIVprLohlDRPDYMJRh': 0.0001666944490748458, 'zoQWFyHsSLkeDzNLvVeKVfjsgjigaVbhcnpERxjfpWWnFQVligOiBKNYchgFjVLXexiMgSFXhKakHmGRxgSwcuyfPldvwooKwDLkLPgycBSvHpI': 0.0001666944490748458, 'TMFNrmNvrwJJinaesxlmScdBySqGlqrOPYUBdtnratztRqOcwCxKmNxFaIVfIXOatYmPhBnHBwstWMpCRRvAMBZAmaeCnmNvLCNEHetRvTyYrcxLFiY': 0.0001666944490748458, 'qBsHQpWnkDMgftitsyujVTmQSulQfcYQattZIbkKzZJzgCRLwkXhjYsjDjQIljlrhJxFFz': 0.0001666944490748458, 'smUlepClKvXeAUqzmYsJuoVtNExJveSGIWuVpJeKhPlReyrmTKemMVHZMkVcSAklnclqxizJstpCwVExOYOVPkUBYwtswRtcnlHaUvP': 0.0001666944490748458, 'MVhYGxwbLVQPjQVCzKIARkn': 0.0001666944490748458, 'eUQTRJTXmTQQCTygkDtvzNgFTDTbTNGCOoZurBQuNUTaOxdJOTLmxbuoTknKQYEyZDfljdIkwERhGZilvjeRrSjYAdPhAMfwSdpLMQJWUGdlDsJUAlm': 0.0001666944490748458, 'weQxDniUNcxIaeVUQolrvZeplZNUhTwNoURExULeKpHjEFTnvzielEvaQFmZXIRUhgCLptMSlnIwuBADiQmtez': 0.0001666944490748458, 'nXjtsJLXQEEIZJMCkWrsJHrFHpatqCEWbZqwICOYVTddoalOLgGjxdJghQDuzQcwqKFGixfqKlHepV': 0.0001666944490748458, 'jFrCldYUQWqQlwGwuH': 0.0001666944490748458, 'BBMrZoKbJ': 0.0001666944490748458, 'MlPKkejqIWJJB': 0.0001666944490748458, 'yXPlFZewvWvWeYezpqtwWxWIHRLeGwWNVTHmWGGutCVFzHjNNMYAjgPBrhCNJAjoJLbkDBoIRGPgrLzcPuZzFJPxpPYrgTrbmbjjvrVYoRaIsYXyzXB': 0.0001666944490748458, 'ocFwhHHsJEAZU': 0.0001666944490748458, 'giYAycryGQsaTObFcKNoRDJOPHHBcIVHYcTFtZowYDRFxGenEHMZQksVIXqQHQArFmtEbFPoupBjcWeeuUCAPsLPYSuiZDyRRpJnJkfiQfRXLJgJipyboK': 0.0001666944490748458, 'N': 0.0001666944490748458, 'JJkuWwdFRbcisSvpXZpCTHYyQkSHIXXbpBwNziCMAJOejeiqDPvekynBEHUDuCoGyxDggboYZPccwJUggDUawkm': 0.0001666944490748458, 'OXeTnYhlbATTUQPHBskgXYrPqkkFlYAOOpG': 0.0001666944490748458, 'jexVXHlYduAHZHZVrEqjdYBEzrVNCJMSkobOarG': 0.0001666944490748458, 'aGmqOiLcZjHRWtfLHTvpaowrTacYPmMyGiqFcTWCpsFVQpKLaQfJpGnoemtIkZWuwWWpCAJhaMKdarQPHoVCQzUadCOWbFq': 0.0001666944490748458, 'QpEeb': 0.0001666944490748458, 'TjLDfMgDUlhEMKbrgemovoMwLsKUWI': 0.0001666944490748458, 'dPgeZXDvovluegfhxzkzsdkrGvNQybUczIJwHQFxphGUUDsoLBwthGqZpVtRd': 0.0001666944490748458, 'eYBDbEiVVEVfRyZxzGDtOvqbxRaQhtcAJAXguhSiPI': 0.0001666944490748458, 'lAZHszajGvxaOaYEEwwvHeNiOwXpNyigkroLEfDWlaeSUQtNluJuINSwFgmziQLdsopzCXuecazHEFwBqHbMnbvCqIpwQgpeDzfSUEYhpmBNW': 0.0001666944490748458, 'LkyIXRfdCxtdALQnlnUbVvSuhCXUEgpwXzapOkPxJilDEEZjjpd': 0.0001666944490748458, 'SKmxUlBKXzEAZn': 0.0001666944490748458, 'GKjQVvulnIaVRlLVIUJXTKVxYkVqbcGTKfreJyimuYDyJsAzLSjAYDw': 0.0001666944490748458, 'iBCkkeegVquFZHNtQvFjVgsIaBbaxULDdQfKvRkzNpelYgTGUZwPXgVUfnhJyjSsJEHJYLRvrLxAIIxBGDnjifsCTGpUCdWnB': 0.0001666944490748458, 'gsRyTBVsXlmUYaznPTEwKQOxjRfUHqTyOZEQcOsfYBUwaxGiWiJjTRXyMxpSwLiPfDDzbUFJlDgAlwowHKtAQsHkkdOSbpViPcXjhmjZxGOiAtvKRJK': 0.0001666944490748458, 'puSBKNNwzzqTiqSPfKMbMkQlkYkSMnbzuAewjUPZpTRMGwm': 0.0001666944490748458, 'CnKfbWHObASYGknVOIPgohuWyvtnZjMOstJvfkLESAFILNl': 0.0001666944490748458, 'RhQOEtrmRSQytJlUeGzhQYShSqChsTcGwQRZQutXjFvwjiK': 0.0001666944490748458, 'kuVddwilPrkdzIWOxlNLfaFxeomNFCJcIBFsGmeYGJF': 0.0001666944490748458, 'jqDuoprqIyXbFQgeXmPrfkBICuJHKGfaHexbXY': 0.0001666944490748458, 'IlnNrMuAeIbdeHxRzMjJbmeMkPr': 0.0001666944490748458, 'NhAUhNeNmVijvsTmzDRCaVfnagzKg': 0.0001666944490748458, 'OZnzyvXzyOZldEirPtuJCdcIGXLLxCGFZtYEfRRdJXJaIQJPGBOHUOlEMSIWwGtcsEzCqbuiaZKFLTzocJaiVwzeydTyh': 0.0001666944490748458, 'pApYcyoTpuMEYrtoHGRDyVrxGRHVyToAQdaFsWyRLrEfNchlARTYkwnRBDHWRVGfNJUdqfJUFFnflUOmlT': 0.0001666944490748458, 'uMZGLBAyueoJkiGRGYmRRAzAIbRDWuRXYCsZEVesBLkYzEYtkTl': 0.0001666944490748458, 'LXnENaBYfFVBrNHNoYpnNRJKwzteCqxOqFZXUiSnaVSYyqBZemitcLWdMJGWgGrQobBSpUaxbULFKbrSswCfWHvzScfRTxVGPSnaqQYIBKbhqQSDxWmXuk': 0.0001666944490748458, 'pdCCgdlkeqAJknQGJaekjNpUBvqRSXazdQvimHiMJBrGABdFBLjfTjfhEyIpTxnRmsHDVtdsemTXvrgFcoxzSdhvyWZhDMYmWWiutKkWNCVonSmuYfy': 0.0001666944490748458, 'MpefsiDsVKkotahqSjfXZOECgNXuASuniVYFLvRjJEusPdkVphHSOCyvHEpvtFCWMMZuJUsKBpklWAWDshLzbLhldq': 0.0001666944490748458, 'kHrGIwkkzidXZpHune': 0.0001666944490748458, 'FILXlOtXQwsmOkPkQERDfrxjaLwIRTzYcN': 0.0001666944490748458, 'ZGQjVNrKPEnaJHibOe': 0.0001666944490748458, 'CrzfxrPHmbUSmGqrgApCgjnSmJoJMIlpuCVMNZWdMgxRSIukgjclsepJhtJYllGumOufyLEyyPszuojoNqsTTYuAqZeLNH': 0.0001666944490748458, 'vIPXwZXoloCTMdzVgTVMMVYcUrPEGZVVNrUaanPJNnNeAXQJFVCSNxjULwOpIcDHRlmUliWyTfZnjAFIhDsvIFtvZXJHvLvEf': 0.0001666944490748458, 'vUXkesSOmpxxcGmyxDTjuvQbbzuDyOtBJdeQClKEuHmhrtiCBQVpSyEIelPTJXeiEHVJofbcXkZGkyuLTOhITjXQuUrNuqGSEBSJxKricJgZocRHWMpDwg': 0.0001666944490748458, 'rVzBuSWWfUSfBxxpDLDigkWUfILvJgGjSxloYSwjczBhqrcuq': 0.0001666944490748458, 'kO': 0.0001666944490748458, 'CPQcHePjaNeorIrdVwshuKfgmWhCITCbpLBmpNHhEMCPiclcYaZaNHhOKFuKnsD': 0.0001666944490748458, 'hMMKuUUnLAisSsBzOBeDuEgszPlXhRZomIiIfDsicUsqFgpxyS': 0.0001666944490748458, 'PCq': 0.0001666944490748458, 'cLoXgnZEyNLTrmCngre': 0.0001666944490748458, 'qXWZBOteVvfpqqVCiXAvILAbGqCoVebbMVoYsBIwOHPilbreIGqtQxdFAcidfiVknTgtPFBXSScImoDOjkjFJxjRCunGPhmAb': 0.0001666944490748458, 'UyIEVXNXxBilsdmZxIVPmV': 0.0001666944490748458, 'EnEhjrEumHVygqyYqmnObbfdEcMjkyJFUFYzup': 0.0001666944490748458, 'neL': 0.0001666944490748458, 'FFVuwyulvjhRQvdhasGdSARZTQmcWSHqOUZkdMronkVthHjbIGYcCxUlZQnckHRCfvHsWHEZGfQlAjLWIwTnepDxXAdCbThpwpZprS': 0.0001666944490748458, 'kFETIjzzhDRtfWlSsTHFHhaesnvqiMscFLYGqQZBvKwwlRHnCpijwyrEHKp': 0.0001666944490748458, 'OwEIyOGAirrDoXcWPlnLRDCTTXaVWNlqsIdgVpENTBZhoGFpdaQyZAbbyHyWjYNlpolfvutUqwTVvSwcnwJbkMhFZEcNakwWGTFTySeqUalsl': 0.0001666944490748458, 'mBlJcaiXusHreyrQkXxpshwsNaSVwoGKstOaJBZKJXNfbOPsCMJgaLaFBhBqOTbCsXcbIVrixBiyGeFSLIbMvhISDBkxpKnNNtgJJFVdptZNLTrwGL': 0.0001666944490748458, 'DESEkJoYoHmqikWsSPVzaUhozLpzaOSeATeYCPIsSonidEaFzHHyrzqanwJGEYryboJqcvhJsEreGAJTUejRPHszecPlCTiFTpSfgliovjaDVYQjgEKYhY': 0.0001666944490748458, 'TPFxyqeZPS': 0.0001666944490748458, 'BTQNTEiXfTWzEhhhdpRCGDvauWXyBEVjMHPemHTkYsNJIrlPeYmrGJJhBokknJRKdiBiafiLCFCldm': 0.0001666944490748458, 'faj': 0.0001666944490748458, 'OYSEScORoQDfHfTUrJvsclCNhdmMHkQbGnsrgrMAfuYzszYuXMxslWbVOlVqpBsoUIUuiEGNUIeskLWvpDeQReqhgpnrNRLHRAWcdewsfItYIdtTHq': 0.0001666944490748458, 'HVmgOOPfyVvudseoyISyjQaxnuZDaTzKddPBcKiwUN': 0.0001666944490748458, 'ovUHsIkdzZFoxJkhSaDUa': 0.0001666944490748458, 'WrgVgsIexMyIQaGygBtTjZbTweQYbtBcDOOWYVFmNTXwLrLiZDhhCevjDzsVxTjlhuhKvajftsFOud': 0.0001666944490748458, 'hnddlVKaslhcGQsHdpFIZBP': 0.0001666944490748458, 'WSBnhSvoFaoMAPwJuXG': 0.0001666944490748458, 'AkKuJAxanGVVyopLShOCHudMoCJ': 0.0001666944490748458, 'lenUyQPhBYsxUquVtJOuZDBdf': 0.0001666944490748458, 'hYPYIVRDStFpbacRQopaadhfweScYHBcHTqvgUOmOhjEFaDhzitfhEQzhKfhMxPmclQDjueEGCHPVzuRlkFcSGW': 0.0001666944490748458, 'NSCkXdfIbwnRuQVcljFsxEdYjEQUcyMOkMwyfWFUmzatKFlbpxUzbEvbOiBRsuwyvup': 0.0001666944490748458, 'IRiqdkPNmEJMoCjtTJmikTdzISENsU': 0.0001666944490748458, 'aduedTgQJHVVgqvpKDawrsKFRVtgGhUEDfSTnkuujdkTCPrcvoUTngqDemMuBVZBNFvWKsNloqlbbFvmevNpVaOOoFcCkaUtweNQEsjYguoGIQ': 0.0001666944490748458, 'mxpGHwwdQoSyuWYmLCfhktUfCJcjatvVRwDvGkQ': 0.0001666944490748458, 'xdGaDyunvfBNFsWsLTeoSssQMgbXEoVNOatgdFwkpdyVCxyaKEqiIETojzyyuCNKoKEihneQwQwnnzCvlDVtlCrwlBtLSFKWE': 0.0001666944490748458, 'LuRBDFOfFQSCpOyNeJFAQdRUuqJHhswrwuzzBTLDEkMhDOjpRGprMMLqNapfthxqbTIogzqgXXBiGJVamuGfyTAyivFDUwQdZ': 0.0001666944490748458, 'MlzfAnuGVibBxursicacRXfMYlvrpynBOCmNJRAwpQCrXwQzXsVvSeZapIjbzakZdHrSPdovwlLbfBSLSIzRdaWTadXRwuundADWLQbchyYNG': 0.0001666944490748458, 'JNbFRmgvAmPhLaLNOs': 0.0001666944490748458, 'iSoeUqfUwVnICDMCagTlVYZnwNHzeJrtQpJlxvAWnuNbdUYesSRAxrMnFlqwmDmwDKMzsbVFMhCwSFIkliSvdDiJIdkmBPTghXDaqGFoNJgiDjTdU': 0.0001666944490748458, 'EFvEQXSRaLyxIOgUZOwmMJpxlGrBathuetdnmFWrUjLscDrnVSTqMxVgGfLkciVUVyVtNINyfYuyyBaqSRGiKtpaerrHaHNLgB': 0.0001666944490748458, 'kpbXfIaaHILRAxmFnEaIdWcNYRpYWGPIzLcpcSyho': 0.0001666944490748458, 'dylATkdTDWwrEsxILXVXsBKCUMliUkCDrusMNTqfpXanpuhnABhkImVoqIKLSaHpGNgSUsttdaFLMLABYeOoAPQhxLCuQjEsPJwnUNpyLkHljBOvPOi': 0.0001666944490748458, 'ntwLroXfHDDcXWFLqCipIpOIdumlNslAFBiAXfoZfWuCkrXXWHsblabwvavZvNEuSEZMauautxOqkWoTrpdTypSpuQZJKbNWCtyrqHxgnHl': 0.0001666944490748458, 'inaIxhQwMKWZCaltyBqNafGvQQFLxMNMSIXsfSeNGHBBPidhFVPEWvrDoGMkQuEvmFgGSVVkQXZMcURXHLtYMLfuVpsqyrfASZmFOkLiKvBYYhTDXcxXn': 0.0001666944490748458, 'ZjOwvzdxRJKvbzOQWeLqjtToskeqWiH': 0.0001666944490748458, 'dxoMXymGfzbkCZcvEStKuUrhxgSMvZLUMwavlTXZFAZOhalGnGhxdrkhWbiPTpJIaLSPVrQLqpsfrINGlovMfjTunndrxqTseJuFUgb': 0.0001666944490748458, 'UAVzADxtXPUxyRMfaWkHptdUWpsuuOqkzHvElStxhsoWkKzHbGjpangAiNjcxuoiTM': 0.0001666944490748458, 'VAUqHodTlpKpjfFngYmtlmbLNsdbuEIqSuNUplnaztNQqSpwqDRtoCqPy': 0.0001666944490748458, 'acHscuUcqndvrvkcMorfmjbEUWZQNoObcZbUljDSBqesufiieWFbrfUvSRcgISUtygQlYTomGpYMIMdsHRkaPvgrVQUtIujHvweTSdDQESaCPUpap': 0.0001666944490748458, 'rgrdCqForkmTCgowFuEjQwrRuhPUyCXCn': 0.0001666944490748458, 'yZOPauiQpmOTlQVacvtyJHFfrZzCQhChyOKysh': 0.0001666944490748458, 'kDUxsnRTvVNWeXehWSzIHTTLJgQebQhqTLQViGsTgQvQKZvyIdPgeCKjXPgNgyFzcOTvSHWdUTVlNqildGuXN': 0.0001666944490748458, 'VwojVMTwMYZAwZSNeZN': 0.0001666944490748458, 'wmIkHBmRDsdFDnCTBqItSorRcVTughRiKPkCRxuUFMUtTJpzygrusXDDAWrRAkGbnLqmJmOAwnlXAIfpzKLfVtzZPbPuUXGUzyrtNFkQQKWsLcvKqd': 0.0001666944490748458, 'HudzpmCWUbCjGPDJLEttNEgmqWXqaEoWvnfIfnSHFWUFQXMBMKsUpnLFZTgmsJsUKuHmTfqbNdDHrJpghBVclRmVJ': 0.0001666944490748458, 'mGRnGmNQTHRPCEScXseAbbIDQUSuwLKcfa': 0.0001666944490748458, 'gAWLto': 0.0001666944490748458, 'sFAQRPoQfXcCZwkPNyGjaPDEPxVrJzM': 0.0001666944490748458, 'pfVqdDgklzzOY': 0.0001666944490748458, 'KEoGbZzcanIxqcancWrampdVYLUtbZnqbpmqHClXttLwKlIPV': 0.0001666944490748458, 'Nw': 0.0001666944490748458, 'JabwJgFoWGtlDPAjZt': 0.0001666944490748458, 'sExurFKemnWCywXQYtSnLJskVkVzWoSdTwliVYgKopyJUQdWbXZ': 0.0001666944490748458, 'OGRAtFXezvgAfRziEXhtHWROgqkTHZRUVmvEvwHgOsFKvqScutKUaSOMxXbwoEBxgmJhqzRjvqWIV': 0.0001666944490748458, 'hHvcBbvwBwQdPaWmwOHkQCCESTLxCcqgAMODOqTqqqTBiILsXunGOSZDhzCAxRoynHblsJjVUxRDcbRQnukUlWelWVgvvsxwcsDFgflHz': 0.0001666944490748458, 'NrgBuFEAfAFEglmXQsJulBgCwtkJvwTKzXouQSQPuzEHvQdBDxrySzuNVWZaKELLvIZyadyzRaaSsjgofgjMeEMytFXqyRK': 0.0001666944490748458, 'wrlBLjPmqgAJxmMHEYDBQhKHcYbPbz': 0.0001666944490748458, 'pzCwCCAodhKnVIWVvAJBwAyPZhOdzlchZ': 0.0001666944490748458, 'fzKcRUJCyMSjWEHTtBYlbepvMancigIgcQKPdKebwBErOjrtomkgCeoZceIQKlPNGlfyulmUbtoEOsOHOyuCISBfXtoNEmuaQiEGa': 0.0001666944490748458, 'vjAZZNfnoOOCtFlRvZBqUfCTsbEvcGNKVHoIjbMxVIMtCffvqQPfTBFrOodUaQxIWqNACQlFgpDSgzsouHrsqjpdGevYoqYxPfgcSq': 0.0001666944490748458, 'aJESoqUtNHSiJPJqJkILYepIhZUDOLaaXGDAEpOYfqDmXLYkwzSiqduUvGtRExehIIlXTAvvGCgRSpzSqmLqHVPSEQeKAChMjneaILzXAMRRAxwIEXXhOC': 0.0001666944490748458, 'lfjOZiydJZfeuSPnyGkntkYutgTOoYuzUzDmAx': 0.0001666944490748458, 'jxjSYnXgKzdCDBjJAtwFdIOUAxleYqkfYPUWVaDAjAczAzHvjczRJFcwnSPUTcxtrAAxAENisiXpbggyOtKKUss': 0.0001666944490748458, 'eHglxlvNovqLJNSXarjtZDvrmhFTuiVPFkCKcKRmqNxtYfVomtHokinkfQGCabURReNhMUJxFCjFHZGizhzwGbglpWBEroYELKdkPqmIqY': 0.0001666944490748458, 'YTbQjyYVvVrgkDYJHZQkoiMmIFKlPfI': 0.0001666944490748458, 'yiqfqkkVezfRYGyGZynpbPgftDEFMvgfjRAvVQNGUHupDdByNTTNMCeLsgwRK': 0.0001666944490748458, 'zbVsriGWlhlVuxUDgPkaOegAIclVvQKGYYtweoiOWTuhiccLu': 0.0001666944490748458, 'LbWtAksoKFxXBicqreHUwHPuUKOxSUEUsYhUrWpMQirUjjqguwTrHwzpXRJXUSy': 0.0001666944490748458, 'vWFEhjKucymImAVFzpXwVMWNSrjNXcmrnYIYdRpMYCQCEMywiPlFHZNbUQdowXk': 0.0001666944490748458, 'OZRQm': 0.0001666944490748458, 'zeKWLweUieZyeeKqhhHTUlQciSoHsesksdtrdKoauxbzoKcAZiCbQPRxHrASSsZduNuuATcUepqjyFfLl': 0.0001666944490748458, 'RuIUyFVoXKCKcCNVhzjOKEdsIznSRFKrnqNqADqacYfUrlxhzxBYIMLhmCaKPeleRiEEQEgOuzFprlTwGsXzvhFTwQNZpaWDeIjyGlS': 0.0001666944490748458, 'ckRkrRLvWQcLCtkfJKPQuBENSHUWTfJffGqdah': 0.0001666944490748458, 'CyeByPRzoGjgxOUbwdvPuBjdupCqzttEtoHFxpezBxYonQj': 0.0001666944490748458, 'JSWKxITveMQFFwOjskqlrWEYbsbFMLHOcRjJYjyQHzqwLFJGwgQeqGwDGDKuHvyNoyMppbwkWhKuwCfLiivKcynGxk': 0.0001666944490748458, 'qaERbRMysoAlgPKyp': 0.0001666944490748458, 'AMrYslYzbmBpQhEXRlcVUrXWHoqsNHzMqCacDTTayXQwGqXeEfdOFWEkzfbaTOZOTxebLznJUiJMCqwxPwxlqYjmcjiEzBUWqGaFtzRFxoFhL': 0.0001666944490748458, 'BkmvBRkrchdKZVigEwNqTwfTZlGioUCmYpYxHQaNldWKPzORplHXf': 0.0001666944490748458, 'KUdCfNTsqUdVoQbbcCJBXpAwoTZhqwottcPsgBCXvnyNygCrKiUYdv': 0.0001666944490748458, 'HCBWWxWMOgcmdTGzsNtrpTrpqSKdGsBjDT': 0.0001666944490748458, 'gWmCNykLqtCXglnqTOXXPyhwVZUyJZUIXoGrUIebcpxSVFmsQidSxtRIPWWkWTFsYSBYynSXfGh': 0.0001666944490748458, 'bxwfxlXNoKbJPKTDMQ': 0.0001666944490748458, 'FFVdeTclxpneviLbynwZERPidoYAkvkrszJIgMAXfnBuvtlvidZjKSnzJ': 0.0001666944490748458, 'gEOnDjADRKfBQejagWLWnQzSrYQTMqvOAWUsDFlXWHQchIZhXjdtWjFcQwuncMwfWvCsbxCTdCeLmMrHyEsNmqRXYpOacugajt': 0.0001666944490748458, 'jCdWdFTqtbCwHOBaVFnKgxJhokZduAmrYfpgMWXQhZPQBRTXyamlcDEhcHmqEjYWHSltRj': 0.0001666944490748458, 'aEQzEaiPwUDJggvYxeLJkBoHPolbuxlgzGeBniqKaNWzZxcwlqF': 0.0001666944490748458, 'yAbtiImaz': 0.0001666944490748458, 'PYiSFKeAEOaitmvODlhUpuXbEjk': 0.0001666944490748458, 'xdjWep': 0.0001666944490748458, 'jEMkGCZsOdniXrtOKPxqokxlwjCEjlBorzeLooGqzOYzHdXvyPozB': 0.0001666944490748458, 'sUlziuQJjBeAaBqtitfPvQvhMBqWEDLLYAxFOMESFvHBWZbNdLQjABNWCphBNLXPTVDFxctoHQCWJVQluJjrDCcJlONVnJvwqxvCwIXfoQZlFKF': 0.0001666944490748458, 'erasaoCOwdfkwKMXDFrIqOpJCcqeBlxBbrjkARkHiegzPOZfLAtYFnxffmtUbabiXaVHwf': 0.0001666944490748458, 'HdIthvEkcvixpsyiKohkTbzmQlepKLDfbMMRpdMJFiprCsYOjVixUteXQNLoLThCkFspggcZBknniBSWMQQeMIEOPIrbqUHsrFmOaLLnfCUwtQOkoy': 0.0001666944490748458, 'hxPGPdHHwRANJXz': 0.0001666944490748458, 'URYjcmjPDICzkTxgNbfsibgUROmvGVWoHFIYgAqykwpzLqPTFjDEWCQhIe': 0.0001666944490748458, 'WAgmH': 0.0001666944490748458, 'oBkXQeFvNEKCt': 0.0001666944490748458, 'dRstbiDLQPqFKsisqv': 0.0001666944490748458, 'iphiZMloWrUBAULvJlNhJFbtuWIiEiy': 0.0001666944490748458, 'vNomTenrYHjVXHbVsLPlUwRxQqODdAemOvrkbWzsMJCURMdZXPssAbzQkUNnSiYLHXVjzpVZWeQCkL': 0.0001666944490748458, 'qgcjecRpgMKfBYhJmMvzLis': 0.0001666944490748458, 'bjgbbmldziSCLymeGBluMDzwnCTelWHhmLVkWGhTCDFeSgwAZqExnHlmFzyLOcgIYkJzClxCkVSFgcyVkiKiDhCiFdeiCckqwdLVTDyiWnmtGfyuXIwvZ': 0.0001666944490748458, 'uhvWzoYhUiaGMQeVwXBGbfqDRtPrPdyixXpYtVhwoLPyfBxVgohkMxMCQVsrNDlCmGBEWmJIE': 0.0001666944490748458, 'pCcjzLkIoWcgCjJeEqBwGXbRUTPwnPyooMgXXpfkIdUPDBYyCytbvTNlbaGoZaisqsGrSeaNBQbyitCUGXDpqsqQgyRjsSBISjYuSnrglcnkQDyKbGEcu': 0.0001666944490748458, 'RUCclXbuWAu': 0.0001666944490748458, 'CuqBxsLYfsQWvHqVcYbeReIpKytdKJiKpIVnporhCNkKanhrPekdIsRdZqOEvCUfatzEUutaotUHYNRcZQUkUXCVCEGFFRtEqFwSLcVIS': 0.0001666944490748458, 'OhiHGBPLatSKKsBksPkoeuIJBmhCStCratkcfxWDabL': 0.0001666944490748458, 'YAuQrkAASjMGkbYchxutEbYUGi': 0.0001666944490748458, 'mQLjBfoahzyLcZIlvXjyEuTXNaytov': 0.0001666944490748458, 'JuwJsNbnluHELIrccESZTgpPDjusoejyZTMhCHEKrqjilAWRgfuXc': 0.0001666944490748458, 'rQGYXjxDxAzfQIvMeZMVxzrQaMsjNnSHHetLHZvPNpjztPDzyNZPOsInsbWtpXARvyQZaPm': 0.0001666944490748458, 'ybDMDMlXHtPBqbDuBoSPFuzqyUIkyvlUkLmGQWdFDktwltd': 0.0001666944490748458, 'zrC': 0.0001666944490748458, 'CEwRFIuJVaRpEVheehfOzKTWaiNZrSVIiCe': 0.0001666944490748458, 'slUQWgzVckSPMQtnLPztJKHLKUpibSMGGlJuHlhIai': 0.0001666944490748458, 'DscJrKbroMdfhHhRjbOFVNlcGgMDElXvMLSbdGAHeYQOLKqAhWCAQjTzwPCBhKrge': 0.0001666944490748458, 'haNJufHlHsEFnOENsauXAInMbXWsppPlnqTWnNLGZURXOJLAuwYZcAmvOXXbuRDtXAPMQgfXaaCRdKxRFSvXZiqyRowwx': 0.0001666944490748458, 'qTcJPDlUMzCzAYOIlkBFSPoWrVpIQbJwgmkLPZWZqqgSrUCVKhQhKTuKhHmatbQSvmpMearpdmtXRrKOyACoBYAEAm': 0.0001666944490748458, 'kHKwdjKMjcZzFORBVQAzIbxDoXdWvuCPcPXolxErJZwOiqtRySxgKrAgERvjxYPPEwHVGbvcA': 0.0001666944490748458, 'fxyGWWJLnCLhJIPRafhRdWwzVcBjyAYtYxHwBMzKZFdLDTXrzXchNTmnIdEvKwOFDZJkBnithc': 0.0001666944490748458, 'zDHEfokWxlNrZsqkmlefnjcbJbvaawCYJbVALZFFdoo': 0.0001666944490748458, 'NimZnqGidOUZxfWnvtVqmnDhaTWizThsLQkRnNebwcyfCjDxTzQbFnzfGYrXe': 0.0001666944490748458, 'eeSAKtPmAgZNLujIRlOjXJlAunfRsiMlFJDKarrIHqeTTyLnPMEwBSfKrwHHByaXdwlWAHdSPOMsdCIiVgRtCZmRVwl': 0.0001666944490748458, 'ghOSlMTwuYrgHZypwfMHloqUBU': 0.0001666944490748458, 'TlmBPuUUkzqtrymslGFAPPeGioJvlOZIOXxiXiZxg': 0.0001666944490748458, 'YISlcdUreTfYbqRqHrjBitEdZuRDCwIbbFEOyVFcCpEpcuTZusMJSmplLNNbKwnXzAMqbpQmPoyVglSGsFVHyLggdiztwIZAtZdEMcC': 0.0001666944490748458, 'NSDqNDsrBRVVsELyubaQYdhHRGQuCyDTHezdndtkYVCRuwRxil': 0.0001666944490748458, 'cCRFxOmBOoFHKHmeUuKZNgS': 0.0001666944490748458, 'bZLGIY': 0.0001666944490748458, 'gnjCDjXRWVsPqCuXIyggfcesCKJdkQHvYuLvXzNLWaZDwhPfUfcrGdLVdpTVTJdiUetYpFYKHJmpICGVjPH': 0.0001666944490748458, 'oNmvAlXGVrLygzwxesoArxmFmPHoByqMZLEkQnDYsGHqLsEhXFEgxQgbVADTCRcBkiBTyMCmAKDMGWhmDLYgfiyHvWNAWWOUDyaDSVIBDPTsusmPTLFfh': 0.0001666944490748458, 'XYNKimPEXdHlGzyRgdbHZoZlgTVrMqlOWHrnNacqcGNesXEJfXnjsofyxlBPKptdIYoFYVryeIQrUEvyXXSSTBEvEPdbeL': 0.0001666944490748458, 'QzZOztnWoDbPZeDxzUaSvdZGFpedkixOsYrcxhprRKUJySVYjlSzOEIiudFhlsZdRatwFGYmpbssQcEDrWuGNIBYHOZsIDpLbHjagsoEmiWWHB': 0.0001666944490748458, 'kSeCaIV': 0.0001666944490748458, 'gQnFFLOfFUXnUsHMZrkUxU': 0.0001666944490748458, 'PoTemAibryIhBHASicsnGkvYInwWLlmsNhzqocPeQkSCehLaZRcRYQswHeWsMPaXCRJhNKwNJbtucTWpLiHjuDDFtUjhCdHKCZjJlEZxvPZ': 0.0001666944490748458, 'uoeoROxwAoHmztsuUkhJohVIuQAlqWNQuRUHQi': 0.0001666944490748458, 'tfZNfwCUFENwTtjZTrNTZLeXJJtKfIawAqVoWUyYCxQyqsCpiUCqxqJiiGlJZyydMVS': 0.0001666944490748458, 'ZALBzYvBHOKTrPftMYYGUbW': 0.0001666944490748458, 'eMiSmNRDOuvkCFToZCusjLewPRmumfIzFaqOrhYnlfcWjtOREASvdxioAZTHnDSwRgHyfwlTcSPcb': 0.0001666944490748458, 'cXXeKWjHcnEkUSvNAiHSVBTPlXePyaRUEHqUauRmsGk': 0.0001666944490748458, 'lpZMMtRBhqbVxWJZslLlcENhrrJCiyfhOLIycVZoCQmrwoXiwdnFImHwWpiWiq': 0.0001666944490748458, 'GshTxHPGU': 0.0001666944490748458, 'wSRexrgukV': 0.0001666944490748458, 'BzHKtAJDqZhXWafrUgjNrIvthKTPmWZmxwzfDOapttklSMXEZLDsGowTPR': 0.0001666944490748458, 'VvSRfJLnOipNIDzZpwZoLbaWcoAYHYcIiodnxtphl': 0.0001666944490748458, 'jlFiRkewAGoQYucrJbxYOr': 0.0001666944490748458, 'NeLrdzclFCTZPKAyLjudoZcfpjGikGdge': 0.0001666944490748458, 'hkGplpkDWwJMCwzRWmMaEwXsVYGggymWjqXwzSSAFevaZaYHrSLGunRRhORJgSGiXhqdADqfuxnaoZUtWNbbAqoxxTecm': 0.0001666944490748458, 'nqrKnnILRaNOuwfYFCicZgndHzO': 0.0001666944490748458, 'vKxgUomTuJQaRVOBTSHsMRvyUGpGDjzMPiUucWZWwAOxVrvOtzvOxVMLRPsxEVTBpDomchiXItmGxaFKFygpyReRo': 0.0001666944490748458, 'ddawhFUHHTsvoGgdjYSErlvQlDjEYUthIfiwBCEDajnmMRiNQpLBhjbtvxSbcFTKMYAluBnnLaRBCXWlzNWKaUJaoHtPEpnqplrMmUZrvMNjaXPVFLE': 0.0001666944490748458, 'goedeqBQNiwsKrtAUkIoFPwxjBXaLZEFlXYUZRahOVyyXwTpGTnlpksUfuGIuxEcGurGDehTQODzFFpZdHxLXkCDFmTGuYKCasXgC': 0.0001666944490748458, 'AsyAGKfCxPr': 0.0001666944490748458, 'QgdzbhJfhZMTxwZeZxvukIaTiI': 0.0001666944490748458, 'olQqrnhasvuhcXjkfOzBpBzmVkOydznUASGGCxJCCYUrlsfUuSYyTLtNtFllVTxWCYinCHGteXfRqnQVYItjJRTqCzccgnLRFVYqDw': 0.0001666944490748458, 'LDiwSNDNnDIuvZyOXiMxfDQHRaDfYExhBpCNLkKicGxfYGxusqbPzxWzkJSsiFZaEMIzttRKMSaeaaaOYKBguCEVhuTKuAkGfKAga': 0.0001666944490748458, 'nTDibEYMqMRNgNglbNbygNifTCVswuTFLezwQMurEnoVcsgWMkDqsIowBpQuMsVoZxBEKMniMOFqENpUGvdDWJ': 0.0001666944490748458, 'DHWIjzRyclPfmoJovECBPfrLwaVsJmZFmYkPSmMbWoANcfnOXWYDylclqIHXxsfEQspSJfKqShZljaPwJnBqxtgFHsKssMirzvuJc': 0.0001666944490748458, 'wvbehDjRtmGeBStlRryIgsQrZFEgR': 0.0001666944490748458, 'POXEmGRIskrHnjxvTEBDnOsJNYISWZNRZyLQtaiqcDzGpGDTUEJCeroVgBqJenpqDUtmBYMeqvtDrrjqkRKkBTeIXnsxPXJFzkcKdVs': 0.0001666944490748458, 'HLSUjWXYokacidLlCKGUMDZ': 0.0001666944490748458, 'POwOmTHKTSlrrxaJYPWYRFaTtqUthytWPigWVLiKenKvQhDgIwFXgKvyUAggjoFszfRxbuAcXGTrarkKhQGXviPJIgFrEraZdMDPpbVbhFdiTOnjHrS': 0.0001666944490748458, 'DnnRWNKHFVKzqkbfMyyVeWZVpmheXPtaLhFYCN': 0.0001666944490748458, 'cGkMl': 0.0001666944490748458, 'xfXBKIEtCJKBWrrQPRVwucYUhIxKkZXliAXAObYdxTCvhucMLmz': 0.0001666944490748458, 'IrKghwDxZXYEZqIwVhCztBXuRKeJjhHRelsxrCGclIPyoTpVhIFvXvaMwIodAygjpCqgw': 0.0001666944490748458, 'TkmVJrYuKxdFnSkKztmKQwlhTUGHWMOjZycFhnlTqXqDRXfrxHlDIUFyOxyQCzIBxLqucQxPTRwhSNTQjUtuBWzQINHOLxHeBCPZoFaXiVv': 0.0001666944490748458, 'JDMRfskmFVvhCoTRleubAaevpGSHjTYYTbItHZPYetaUHi': 0.0001666944490748458, 'yxImiCesarateRaTsVBlGTS': 0.0001666944490748458, 'MA': 0.0001666944490748458, 'INEOzHEzBLKJYxXKKldETOnJxtCUwXxjhOmkrqPTpTvPTSwRUpLeiqaKzCyLazhxqcMmHeRiBkGlYNwVNL': 0.0001666944490748458, 'aIHbsLoFnciLOlpwZjLQCNSXtTLTuOVDfLfoclABDoawryRde': 0.0001666944490748458, 'dZJCHcvFDMZPSDgqNm': 0.0001666944490748458, 'ZCTsgmfheGmfNETbHdjKhRWiaWApsGkBopAPHBuooxBxROTgbDCfyfYJUOiIZfWfvDhoMUcQuMq': 0.0001666944490748458, 'vbgFGMAOepJemyQoBidbVMjgSkDzqje': 0.0001666944490748458, 'CvRmTFIQzVTRirohkYOSORNWeQOpRTWFZvuAsjxNjSsPwETpsdYQspBKo': 0.0001666944490748458, 'tjAwKRlajmYrjKokTkdIoKDXZIHWBbIFbNRAjDEglmWIADjewFteIzLjDZuhBJxrWpgsbnxlvOfxaiWvyhHOWil': 0.0001666944490748458, 'fVLftiMXyvvayVWyqOiEprWOtHvZdYQKoGEphVMuKk': 0.0001666944490748458, 'IrMxXUzojVCWoUWlzjwUYAlckjyvCifhOoRqPsJxBhBNXchYsHtuWvYYZGjWnEgagXGPnYWsNkntqYesNuC': 0.0001666944490748458, 'udgsGyURdXMjOAzkfFQgysggHKRaGtqyRxfXzwxgLDqyrAqHBFeeVlEEdCTdInRrpBknHjbyknVMFpxjqiKVDrphSXAzg': 0.0001666944490748458, 'BHrbMkUTmIZDjDyMQyhDYKYirdTpHJwCkYyOmdoHrQVneEMunIbCwyUFTGmamESkq': 0.0001666944490748458, 'gW': 0.0001666944490748458, 'JpatVexfOcBbsVYOCgMEeiOOeKTwWDGbaYtgsEKqiTzWAMJPrksbyrmWuQDjXuqDxnPxqXMVWjmkqVKAOhpBckhNnYWjxKfWeKq': 0.0001666944490748458, 'vmMeSIeKaEgPuScCcpmmWsaDYpgqnhqUeNiRDVttqJWbFyNNgcqMKhOPPsuWMGvxDqABtBHRjScRvaNmlCzyRlWOPxQ': 0.0001666944490748458, 'TRgClJbhooOgaeXgcjlWqnQzHwgZLOWGjceLWkeiqYcJJKYzgHoxDFIocodZmofgOFfdJXYaBGwaHKf': 0.0001666944490748458, 'HAZIWrgEBtnRMWyGaCGJSAhickaRIErxRwKWRbOhQJyBdxQksngReTiCBVegDJTSrnKCQAsDxMiqO': 0.0001666944490748458, 'V': 0.0006667777962993832, 'AyuktojjTvvzbPePrKwimawDwwqOScQAwPpwrLToQbUNekVURfIvG': 0.0001666944490748458, 'uNRWmskwYXHuuLRmwYOaIGEbndFIjJZgcvyxAUz': 0.0001666944490748458, 'whTzdUJwcKeVnXrWsjFACKrCEhLOwaGshcDhFpdLpzyefoQLkbMJBfFUPQheFR': 0.0001666944490748458, 'fuahNyTNuh': 0.0001666944490748458, 'HbJiZSOcDpwRqjWMvfFClvuUiYfsvMJqDzKytufsJuqkjWBPuwxeymjmFONzdAsMJOJvBablTNGRuS': 0.0001666944490748458, 'zOWGAFIOmCHqyiHGescvlIWyarHPoQ': 0.0001666944490748458, 'vEIcGDmCiLKXCHBhitOCsOijAtHoGbrjtpTKMBAYtTrYUAKzOKEKpEwhbIN': 0.0001666944490748458, 'cHOhgwo': 0.0001666944490748458, 'LWcKFpYVPpboHBpayYslYTIdpejBQSuAGuidUbaNbTNYNbQjsbadlhtqeDehaxxebUFJrvUJLJvqRwUYyyHjGy': 0.0001666944490748458, 'kVQCPplkVU': 0.0001666944490748458, 'PSlcGhOzvRhoWhYBsrjxtugtLBrybGDSEvRgYJJUJEvLg': 0.0001666944490748458, 'WTzlhhvXZuhIRvWqakQPQYnCgqZzrBRFfMbtJCkQmSjdhu': 0.0001666944490748458, 'WYyFQjq': 0.0001666944490748458, 'jNkqBXCfPEWGFzToBBkTgWxiBlUJKRExWNUYowAssI': 0.0001666944490748458, 'AcMqhShSVvlnfzGUbCZhmWuRIJgUBecICfT': 0.0001666944490748458, 'PdDogGDIxQnuCvZIcevCTdtpGKrdsjBPcfEfztvDOMcXMhRZPtSkTHPBPJRRTTMsGWUUCQjnKXQiLcKAibDYKUCglZrIZixsORZtsLzuKPXAo': 0.0001666944490748458, 'jGdGQSlhhaHOUoWBwgAOwjBiYL': 0.0001666944490748458, 'SfrCmEChCtlRBDEKOVXcpHMtVBTjDFJWSMgwbyyjyYAnpmpbwgblA': 0.0001666944490748458, 'gT': 0.0001666944490748458, 'efrvirydZwsPgQpotkBtkzQQXdF': 0.0001666944490748458, 'yQKokAKBBGWVTcinhRRfpoJbBUZamGLpDMrkzIryriuTqZUlhhQXwXkGdPONMaVTfErsDMTeNOqZNzYJhxP': 0.0001666944490748458, 'DEVQMZTajFEFaqPWHLxigHYWJATGqwhZlbFLzq': 0.0001666944490748458, 'nLizuiPoabAYBFLvNbkOkSbqukfHIHCmjOQDEZHWiqBKFKhTgyIGGsfgZroqsbnvDeHXzvYbbOiPkJyAGCmPVlxmstgTZ': 0.0001666944490748458, 'tRL': 0.0001666944490748458, 'cdtxBZELWKEstSGXRxRlKaOGZwBDDypFqSrOiQJNAmiYsyeycqzPOvWazKOwPYfJMZdCkSNrw': 0.0001666944490748458, 'FuCPUqDjBOufuTZhkMHStQHkpPNXSratpLtDCDjTQELAOIQmgpoHyQLjrMztRmAtkgDeluQHAlzQksOYuSMhArd': 0.0001666944490748458, 'NpEOuGyTpaozUGurQxdsjLgAJGRAtpdadLIwhsORkVFMmVODywnMSqrKJtZFwcyBHVV': 0.0001666944490748458, 'whAdSSzZuonodBKhQzehCwObqVtaVCROaFdiuCyXoXDlyFtUbnuBzdJGPyRzZVYSYUNVmasiPziuVsHkNbCzcDNNkpVtBJVcT': 0.0001666944490748458, 'qeGJBeCXNHSucsdTZbMybthfBeztEEFJgFTIGiequvQzLsmBaQtwMhkPKnUdIhNFs': 0.0001666944490748458, 'THzwyIBkJtdjjytLYxpFuGbgyHzgYZvFdyHFsPElMHqEHmkmwJKOUiLxTHJCguegxwfgmAW': 0.0001666944490748458, 'dAhPwkWXbcpUKJVjZwPNDbHXuCnQLyoitTvxDwLRXCYAZFavBMkyOlbHxsUBCojRLBUCaZbMcjziRpqCDGXSiAEWmBjGAGNOi': 0.0001666944490748458, 'YMLVaeAMxKiqrRmDghezyGkuQGdvfznNEuOXeERTfIbNebNHkOzcXZlYjCd': 0.0001666944490748458, 'rIQYLZLzJKzlDbzBpcISzWLPdcyCtkVTzUvFUIeGyKUuaIcFvaqJfAWqvMtlNwhljdUfGLHsCfOUE': 0.0001666944490748458, 'FwjAKCjsFPZcjLJTmjrQYBSOUvpzJzgDvjkSSFYsSkuOkCFpDuIcHFyJYVexQfNxkYZ': 0.0001666944490748458, 'VLVCGzYQBT': 0.0001666944490748458, 'rQVCcGKaJk': 0.0001666944490748458, 'LL': 0.0001666944490748458, 'ASxTfeIFVrgOI': 0.0001666944490748458, 'xAvbzerlYaHoEHqTAbAOdUnMDeLWUHGTrrhgUdMCEkj': 0.0001666944490748458, 'HJaJiygBbz': 0.0001666944490748458, 'RcLYitFMukQhNJNsRaoXjQNvmhDnGRSjusNhjNMvJGNkFfiychKUHWfAhaQAUAUFMfTmmaNyqubyCxzPdcWRwKRJnwM': 0.0001666944490748458, 'zky': 0.0001666944490748458, 'NxCeMwZLxCcYwNRqgUzsPjksGgPKgcwcN': 0.0001666944490748458, 'MsIcRqoLNchhRppFmbabNNGTNYLljjaTlCaSwKNbLOeigsyvzZvkwOzPjrrpqoQGsyZzfBNgRfKoEAbchOfLrVLBHOBXBSPCoQILlI': 0.0001666944490748458, 'oMazAsYAuwSBezsNUWAexeCBNQeAyeoFaqACOZptVZFusIVcvAEaOejoUKYxXJBquLTsumATlmwCKztxqtGFba': 0.0001666944490748458, 'TvENWJNosJLiuomcXsQjJDhKNtJRdsapmbDPOeLgdnwIiufBXdkGDmKNIYZZzETdcEYNiRahkJPquJQpX': 0.0001666944490748458, 'WPyEWPaZYnELXxClnMTVppOQIKyTwqHrqLUDPOToThdwZdaHIfnwQhwxCEjeXpvyBYeYcbNBVoKBcTyaNgX': 0.0001666944490748458, 'VnatmIOScxSNaHcIxRnnWtWIHgUgsB': 0.0001666944490748458, 'ThBxdCAkMKXRRjpCZtDdDjsTT': 0.0001666944490748458, 'nleYaBAewjRvikbBGohDASKmYwrTwhinjpR': 0.0001666944490748458, 'hpMIWVGqRIsmAYUvTViXOoXbYWdSdkOIBVXLOWZOxlNAWHeafCZnUuWLXThrCTPoomBgsi': 0.0001666944490748458, 'StxeHmE': 0.0001666944490748458, 'gjwNNnmJfuBTUrLxsKXCCTMwnnNDweJmJVexPguzVdJZavLppZqyiBtBwUwTUqSPdOEvhzrTccyOUOlkaGlXCoy': 0.0001666944490748458, 'CHgfWKISBEzWpnGESrIEqGwKkSzjRSOyYzbRyUsiKQtCcBlPWdSVewwcFuhNAuGeDdswMJiMHfOacjOffQInmDioCUsmr': 0.0001666944490748458, 'IsKMRXSckBQjYeUYGgXREATkSDmFetraBoypfoCwGdXTNQFwZxopIyobhNNyBCnUqPJtdgfrUrcEkNedohxanlCyZnmNXvbesSDBkNkGgqjlE': 0.0001666944490748458, 'ifqZtlLOVqKiTQJrpEuDNHCBDmzNTquRAHypg': 0.0001666944490748458, 'wpPWDYCapWtnjKddbAVfedsFemiWfJEYBMHRyRWrijVhgBoyBCKEgqpOBWFulmRrWcVYewEXYDEOtQHEriDdlzp': 0.0001666944490748458, 'AUuptpBmXvoXhbAySEVRmNVpdOtRvivUVOUKZDkJIFPWydZnlfjjZEzlkaFIcdvfQBmSzrWvEwDFLTzYQhEouGfwGEANSjnOBCzaqYZNQXMTPQvhlHI': 0.0001666944490748458, 'gosedEZkAEQIyXa': 0.0001666944490748458, 'ABWViKVRfxSPmENToRUBPdScgpCtqYkZHJYprkKKNQqrMgLUCOGlktJSflcqqEvkd': 0.0001666944490748458, 'JoLgEJwWoRRMOiLvoMTYUMHccinmnBjdHlWiUbjqLuZgrODJsVjOTtxckXpRPtMwfKRpOzMIRIfnzcNmINqExruqbtVERpNUXWWQMCnDFNq': 0.0001666944490748458, 'JRuiYHMBjpUTNRdlPEUhsKcDywApcbNnzdnyjsVdIdkuOpYkQWTKqsjfhHRieNmxWecOzjVhbgpIfyeedNcTAWPgwcYzriEzjGZfUKQnhdouBhIIqJALPn': 0.0001666944490748458, 'RsuBKDOtYeCsutkhbBxJgpuyAajJCZqkPEZbICDalPplByJmpKFbfFyIVwtYmMJBgyDUKlzkTWEyAgCQuvVduAtlRjOYbpRJZYVgTIz': 0.0001666944490748458, 'GCLGeixIwokAbbKUXAAYvwsxFFTMOTdkqXuMlceLGGPWSjInJYZBYkQoUuXMKkELfTDDnuvgzrhYZSMpGlvoNUkrECaabyRuJF': 0.0001666944490748458, 'XdhuOqPDWBmkKQbIdUdhMYKObSPRqxWrsXHdmKPMBCVDmvpfN': 0.0001666944490748458, 'fnaLbyTePuBghWOqftZRXaTlCnGdAhPqpNIIzmusrX': 0.0001666944490748458, 'mpqwnusQMsbUtFaUSFVjpbuweysqMEQFyefFZKkoNAkBkl': 0.0001666944490748458, 'cTTDViTQRsrtpJvLNKwIWXnQRznNEXkVvmOapAjfEzblQLAohMvQHEIXPrmnxMWOuuJeylwPs': 0.0001666944490748458, 'IpRCfLEZjBphfDCdlolHehuApXASSNuPveMpTcg': 0.0001666944490748458, 'XUoMzBwwkwIzSYKWyyYADklAIpGrKsQgjmqAabRdCnEYCwsVytmMyDohVsiffCNsyQepEqgbFdHdjfYoDplAKrBPlVJeMOQgM': 0.0001666944490748458, 'IWMEPXUmmBDBPnWPzeKZXswqNWUpwLWEFVRuNGvZbUxgeBRumTzZSBRrLlKnowmImnaORczNMrlnWcZOoXiKnjdpDYHJuoIiUdvYjHCeEgZ': 0.0001666944490748458, 'jy': 0.0001666944490748458, 'QcDnFfYARzOGrXcUQRzHYFhFqiqBxDJfawPXAgcEObqqaylPrUQFHZrpgwerqZEBeZSEhlgtQhBacmBxqCzkHnK': 0.0001666944490748458, 'uPVwOiGZIGJbDrliUN': 0.0001666944490748458, 'EMIEQLyCGRcmgrrZvXrrFjNdgpPEteFvPvByiFbcQ': 0.0001666944490748458, 'AohoxVsSFGkar': 0.0001666944490748458, 'GjiaeqIbPLXynreagdEtpsPJRTIvjRwjkihaFurrDGkPEtnPIlPmcUZbDufxzssCUTtEyJlFCbAASzcoKpCBgrpGXyBPU': 0.0001666944490748458, 'JjtLKCnKvDojTbtHWBRbTBhlEninZguXtOsdLgLlHpcscZNiwjajyQzPTqnJBsrZiWowLrMkTX': 0.0001666944490748458, 'FlOxVtCuQXDifuPTpDvKtTpwqSxzUkduwlqgAKvNTqaxuJPTfqgUcavlUWwQgfAJCZRrCLyXfabRcWXJJ': 0.0001666944490748458, 'g': 0.0003333888981496916, 'crbiYMjWQuBUFmcvREvWCcqJCRHHIXsmheWuyLTgtBvFfPNsNpy': 0.0001666944490748458, 'vJMslPWSAVCAt': 0.0001666944490748458, 'ElbSpmZSmsUInuhpfEByPwdBCgCRBvkVvWGDBqGvXxtnZuSvFPflVPvzGoHMkMbEeaKwEFDeYXzLoiPVdiNoSZyCLeHzFlRUFngNFpPTcVMUDZ': 0.0001666944490748458, 'iMzdXdmZZHbwmEGqfudoEZizJxvvQeGBaNWsXJnJXaqfFBsYmMOKoxwLZDDnQIXxrsArfBQvHDMlyuzzNxzVlbuGrpuUXKAcCwsWLBYHGyn': 0.0001666944490748458, 'qogsRwoTveWLHnduZNLsOLlYHFNZIVhGPpaVPUZAqgWLaDcLqqaekaXPprSjwoVlLJprN': 0.0001666944490748458, 'ZUWIWkinjynsWfsKPwSeDDiDBbJISgOOFyQATpuiLlN': 0.0001666944490748458, 'jVTZgQXRAZCHXblyhjAsiloDlVLEXBjyLn': 0.0001666944490748458, 'BRakKFbEQLYkwvejQSqqyiIIcbJhnAQPxMEAviMbRiWsYLMHwgYQeHTsqbUOYfxXHyoPjneCLDHWCXLSYmgaLSNGW': 0.0001666944490748458, 'rJGDjzpgrSIcx': 0.0001666944490748458, 'eWwefEdAvNbxGJlzqRqHNCajpphlYbRoMcYFmGRpTbCwTaOTJXgPfnlsEOoYocqByVCXfundtwcWrTpEaDwgARsJfjn': 0.0001666944490748458, 'VLGOKutPbsZCXIVwqnuvLBeulO': 0.0001666944490748458, 'AgviOAsTPbpKkkBtuqfCvpeOVXOwkaCyATwMSMdJDaMHzVHUwcyaxEZLcpHIVLwcoalBHSNQk': 0.0001666944490748458, 'lkuVhCSVIRzHDuaeBwIKjTlowrivnDqCdEjEBUuZXmjMCFEmNNnmWYPcPNFQlXXZhrwmaYZgfsRvuJ': 0.0001666944490748458, 'DYZPLUWgeMgbfEuliWXgmjnMbwulYEYAbcaoKNGXqjxhMeareslhEdTgcxcFgqT': 0.0001666944490748458, 'dmcGETsNcYekfyJyX': 0.0001666944490748458, 'nnxejycpfGAmOztZrUVhrRoYgzeYAyEaKOzyikmrQjqVLMnxzLCyogZLXuUSNaF': 0.0001666944490748458, 'EZcSGQxLGpYIAXnjGFTcpfGlqJpCUilxqbilMHsYBsbHVSqHISZHV': 0.0001666944490748458, 'aZrAcLzuaxEIfQnGYVNDuVxcqmNWgGWvcB': 0.0001666944490748458, 'RnbuSQOcgYzdtYZLoMuZCpfcyNohdWJJXWKyQDXaEqjnefCzeVfAkjwAjV': 0.0001666944490748458, 'DkIciuYNNUSbwkXJQyHJKRFCNcQUhQRPDRGCTATlqvualEtykOLfLtuYNBtlkhfSbAgULpeozeGzjmQRtOqDcTrgTRGyOozyJSBfxB': 0.0001666944490748458, 'dviJeeoxCWtemoQFuOmzprBnKauheIYCkflcfpViNCTfqiRHcTTUaKONUSksaBMdqtwzh': 0.0001666944490748458, 'NCCwYvXiqGDrwFRPFvAsNEimIfynbhBHmmBTepdpejFYNHsEdIYhqCeuUz': 0.0001666944490748458, 'FMyYItmqJzLMHWF': 0.0001666944490748458, 'trhytWfAlxJvnQvyvlCJTdqnkBhahceKvoWxPqaqOPPBAqttdZfofZqeuQyTXPlZvXxFaeEdHOvhjLcUKfiUOAzuFtbgZSsgaZBKiUlFqkq': 0.0001666944490748458, 'aesjWbURQyNSFYDCsmsmpECJXMUbqlVjaTxeLcEdVafJhExKQwpAeIzDtMJyPcDHCDGxXfujqgZVLGibQitDALsWzZccEIKplTHwUOtGAHdruM': 0.0001666944490748458, 'iJs': 0.0001666944490748458, 'ZtFKmpTUWuHhxDLLWXxwBXNFLRmIjzaCcIXBRooigYNnIufiLiztKQSiXhKGXfIveJrMATsMuNucdJyrReBerA': 0.0001666944490748458, 'IXOXBpmyBTuuUiMTOnLjFX': 0.0001666944490748458, 'HCJPHHC': 0.0001666944490748458, 'GKGBttqLElVPXmaSpNTlMojReHCBtuLJwNXTBvRSNqYYu': 0.0001666944490748458, 'RHipQcqzURilBlGConMFsDWUPoMTPcmTViDnTRsrXVcUjMceUsxiSsByaeiVVZM': 0.0001666944490748458, 'oRhxSBLOIazagzHOuxUtBuSIFTppAsqPQnjbcqslodNsTuXrbrccSJbUjRqofqNUjMBMAOIOGVACkyYInGSCUTDIgoo': 0.0001666944490748458, 'nawgSUTrbiUoazWGcJvaIGOrBFGRfoNJqmRtgJyWXYiTMhcdGMaCcozRoYPtniIFHESlUCKPG': 0.0001666944490748458, 'LGuoaJoRQUtNnyelwUvJdepCCegvANw': 0.0001666944490748458, 'NYBoclzvYBnXdMBEK': 0.0001666944490748458, 'LcRsAKsUywwIxVfYwBeLwUkpwerXTahRzStlFRNVqAMNaeJuVJFhbWarQYBTcscwPcXAjaVZpiLLFBJQYJHwHLbgfw': 0.0001666944490748458, 'tytEnSjvamtCfdVDGKpdLnrIunQknJRwsZtuhNVNWjASkCaobpaDxavIcBOFyvYTTR': 0.0001666944490748458, 'rLNstZaypQEHjLTtuyysiZJArHVokriRygykMUMbxoibQsWLCettYiSjKYzualloXHgWHESABOjvvJBgzQG': 0.0001666944490748458, 'yVAkwvqXODvhDgIBJNpWxMFBanALSSKCzpJYscfVNGZNcWSlpQFtflUJdCLMwxeWZJWMeRJQr': 0.0001666944490748458, 'qCJmiFsIFxPyxHgHJdHuADPHjKGWZyXmvPcCUaHDwuFPnfyuTmQAnfDYjMfTnoSbaLSFZLASKGoCAaQcPcYXYohecwzQXtFuQetBQSJDuyLbbKsqtpxbZ': 0.0001666944490748458, 'LRsMoAdfkkSPSCfmQFRsAtfjdyzsfTKDNaFBWOEMKeparbXDQZOOLGtncRfbJrslyM': 0.0001666944490748458, 'hTWZSiynqgpnjUVgmiPDfwUeKQTEcgLRjAmMuJgERpNwaDwQBgrLBgsDpKnCshUsIMTabZNXYGdDhhKeuhRxkFqxDDWDDrGeyQCahZCsHwUIKjHOTe': 0.0001666944490748458, 'eRUPGnYIMRsnNeefEZrmcqPdFCDjmAQQIYHYSkfxQTERbUdFDSJAgqnpPXPFynVXuPooUjTJKpsXPtZHbCvHYyVFK': 0.0001666944490748458, 'kqvScpAYjjbKaZIyqWcYUbLWqoCtBElpdnFWqrdwgiVwfzvNGpI': 0.0001666944490748458, 'HKyRQhUzOCwaOCSulUrlIGcbgKeFXCHbMEHrNzrEtuFLqfYJbanYbhvggquhgkAuUOaMLYVatNoiBeuLwwJVyFvLTkUbl': 0.0001666944490748458, 'hISuGzTsZTKIchyLGYxaFnqPPrrnNAfyvXXkOiMJLclPgQjtTrejRbEqtsiZUzgtvqzJWKnLauGdmHsbyZOywjyybUZwSwDnNqMiMLEwWcdSzALlfrJfG': 0.0001666944490748458, 'QLIZSCBsxhuZOzTsLvMPX': 0.0001666944490748458, 'YgutOupQrOODoantILqCuCHJEzUBKPoPAyDGWcwLRNGsRXAfMOvvV': 0.0001666944490748458, 'rIMhYpUxAnXAcJQGw': 0.0001666944490748458, 'cMqnOiAkdGhNUhtMSvsoelnzVOfUngWzsYcVcTgvAQjLyhiVxBChsPmzegrDsswXILZrCBfkBXQdHMBVGotRYAWsj': 0.0001666944490748458, 'ITHFLFDBtBsUmGxgZt': 0.0001666944490748458, 'qVvWzqZGZmiUYRkLYXjMAOoczCeXekYKkuwYFHYWjVPepycKZDWJZbSCQmgbssOfRDLKxlxTUiFupqGZQYILyvpEEkqXsiuISWXAYHSSwRYcOL': 0.0001666944490748458, 'aLeVGqnVamuKWDUdHlbpZzLBt': 0.0001666944490748458, 'fcirBMCCDHmEzrCNFhePjaxKTmyDwVcnqfqIFiBCKxicxdDGRpHqrDonYBHhOUxSQVuIUDR': 0.0001666944490748458, 'oJUiknMWldBuyYDQQkktmucINXzbiJglOrefSJbUMfKSGmPTnhTdPwmxSSQgMZJzNyyoIDksdFGYpox': 0.0001666944490748458, 'XXRObXSAf': 0.0001666944490748458, 'jtKrtqxZjebhQcoOYgODLXHWBJpOqbREOImtlGVSZN': 0.0001666944490748458, 'IuUCgikhvvNgsZvDFAIIwldOma': 0.0001666944490748458, 'cSLwrnKsgULkaxJUPAULfKRTqcyHDTWqDyM': 0.0001666944490748458, 'BMVgIQxQBrzWyzSNWnKWxZcfZHZketdeArirUGQrkVsikCvKsvExzSUMIgigSlnfALvttBZBzmnYtWmvxioxaEcLudGkhVrsbeQIxNDFsaVFW': 0.0001666944490748458, 'PdRijouqtyVWkGiUrvAdkhRrnFleQnVmwwXHrHIzeMPvrGQaBQJLZwlOpuNNoWAUgAXoxZizgnaxYKMJAsksssmreToxXaWNYS': 0.0001666944490748458, 'RnGFqZBtGoLQlrqGGQbXdKJoHvvmUiWWVajqorTSozyRjTuuXGsxRwObSxadrxKxUoBngSWgjlflIcjNfuuodCVVzu': 0.0001666944490748458, 'JNXZXEmsRcwidGpGyGTfGHLzrrydtlDaQolpQ': 0.0001666944490748458, 'xmMigfeGJwaQnpVxj': 0.0001666944490748458, 'tBSnQlv': 0.0001666944490748458, 'kfcONifycOdextfgDe': 0.0001666944490748458, 'HvlIlgQRtvvBiMvIWQIcBWUQOqpCXDvFYNBAIyo': 0.0001666944490748458, 'vZPoMdSqgYkTAiLJC': 0.0001666944490748458, 'SnIMYqFxynhEtdmQLWoXEEKCGuAVsEwzGzzsvORQd': 0.0001666944490748458, 'QyiaUutYdNzfxRtmtKhuddGphArBrClYPQfgaztNuvqnvyeiJetmhBMip': 0.0001666944490748458, 'QIQzDrMuXBdkJdbaaKu': 0.0001666944490748458, 'aSdUsxcpLkoqmuLKFdBogHgwdodhDGOQPtcHsUYSdsJjfPeZUGhxisXwdkiXZSlZxaZDZdVKDdBFTxYkDNLAgFGyAOOkjB': 0.0001666944490748458, 'mFThxfxAxvyDoIxLiaoCBfZsBOtemhkwGDkoURBMnnPIpUXJljaafcnCgvvxFqBfoPboTfcAfRcHNx': 0.0001666944490748458, 'dfSDBaVMGksWSxHeeaSBZbgGKvBZvUJDvNaZvEDEfOshTVYwjjJTPPdLcxUXqxlQTpZRfQuZSAnSQvEvWMLgIpuWEKurrBexIJTNcdOrnRE': 0.0001666944490748458, 'mjijiKHqRMveUQEDNRsnCwtpgVdCcjNExCFYQFhjhnqAKgHvNLFWYTcaRqPwhOhwrU': 0.0001666944490748458, 'lKcugReeuPJHNdhkWutQlMFpppIOoNxTZIw': 0.0001666944490748458, 'WqdGLkkWFBTaFgBcqICrmZXDQrCVyKsVNgjWqyatDhVKldNDVKuVOlLiUkpcKrvmVZrQkJtWJOcHDAWkGAdIsoOJEtIyCWODlfL': 0.0001666944490748458, 'CIXZFTZHErSsvZFnrfuaFIfxMbEnkYCnfZBxinJGMXaaAqHpjASlzFkDtepIAzdiMllDmSTXPyW': 0.0001666944490748458, 'ytsHI': 0.0001666944490748458, 'KGRGRHaGFJNWvAXQTtfzixVKnIYWirdJnIbRrMMdKWALbJiJhlKyIyitjFVnmPSzlYAYqOFLxwmRbLT': 0.0001666944490748458, 'MPsKZGnfJpPHfRwawauKmkOnmDwbyuBDGVvNmWFwba': 0.0001666944490748458, 'LFxjXszqZKECa': 0.0001666944490748458, 'WqMsIyKjpGmxmLXsIUbXsKQoqbOOFHrFKSQcoiKAIhccLutcuSnlWXwMecQaJDEdGXIyHnCwNugTXKbbicAjrhU': 0.0001666944490748458, 'psrPXFNDNzBSggTILuj': 0.0001666944490748458, 'smTMA': 0.0001666944490748458, 'uyTiVllHxrMFgkEFuCgtaUreWjxLEpEMDNAdkcWSjoaqrJhcK': 0.0001666944490748458, 'BhKURBXetEMNxLYibsNzyohkpeThxIIDTuWqZWtIZeCcLhjMBsOgTIdAtQiWSkgraYbqxpYipLWmPaKAz': 0.0001666944490748458, 'ymNzEDSgqO': 0.0001666944490748458, 'yRBnQYoaRXAXEjTxZmlcVFPwgWXfWiQcTFuOPzEQmAOqvNmZwZXzTSNWSxHReebiy': 0.0001666944490748458, 'yyIsYaSfYoibMYScPRizvEKOALsCmoDlRsCgiD': 0.0001666944490748458, 'zkpnEgAmoVxYDLfUpDbXBRWpUwyWPjtMmcuQBgbmYtNcejcxXGusQHDucLowrWSwbsKFLIXdBeEQjrOKKgywkvm': 0.0001666944490748458, 'lEXCGsCogOPBefJeOJKdlFesBRnyReCGWnexGreIMjl': 0.0001666944490748458, 'RcYWUBhDlGYInygBPSMWFkBdfDxbIPoWJKlaHSwNtvncKLRhgYYqCEn': 0.0001666944490748458, 'CEkxRtnWuQeuZzoVFTVoSnCHbwnkpkOSLcwNXvdTXGiaihbVYCSgwPx': 0.0001666944490748458, 'fcnaMqxaAMDLLVYKsITrOwedvAGkgFNtwVOduspDGQwYMepCjXvFJSgyCWCppZKLYRjPLbTqGZxNmqCWHIrXZEjaTUSgvqrAEtiShwcfnvWQIEZfuiwbWO': 0.0001666944490748458, 'SVeDcWAbiNIZBgmoDfWRUCXiCYKYQHxLbfFQrSxusVwARtNONcKVOimTVAbcfoZxcnoJeIhXUNHmojLwBxVAxqoKLVrCzKdarjPymWVhCQNyRy': 0.0001666944490748458, 'oRvbtQsNqkMCOuOFiYsQtC': 0.0001666944490748458, 'paOQwzRAGZiugwOOkIHRzSOpvFKOnoRKpQNtfWuKnbDZWoZetgCzubpeYS': 0.0001666944490748458, 'NArEAhJqnYMwu': 0.0001666944490748458, 'AmtWXnfbFSDlZhlTFHPxKEaimNeOzNClAgDBsqzOKVZgRllPDoZxKjautCqvpLbgkwwtwZPwjFNPqGAvYRBLaODMVhbQO': 0.0001666944490748458, 'T': 0.0003333888981496916, 'Rd': 0.0001666944490748458, 'jhJKPZSROtTJTbyPpLyQXBWMIbbeOMzKXNTnjnpEHbsebOUoKdhbkIhTwNyXFxNYeCFXoeEqICYYnXtrsgrPCyZgWAjDzFFmDHBjAtaSFXTwg': 0.0001666944490748458, 'OddpjYcGhmIigVMvWbDetKFcrotBWtsZdhQaGlagRMGNamW': 0.0001666944490748458, 'LOXgSJYovKvVfhMqNWEvTNabTDVLuRaKStXpaAaAnEIlkAljxxheFFGWrpilfAiMsDwfGOLINPcAkkwtorIwNkc': 0.0001666944490748458, 'REumBNXjFmZekOjomWkZDuhwTzoncNWHskVTqditYytqQYNqtIJzeMRSIGpknJVXnaMyjFXznmLNktXFjQcSxEEnpnQGEj': 0.0001666944490748458, 'CIdpuqiwQTksBOdqYEayQzRYrBpOWKDZJgCEhcNWLrOUyrgYbplRXReRreNixWUkXAvhhiQNWbGqMLREcdFSuTyBdfYyojflwTHedKOGDcJwbEjRYTC': 0.0001666944490748458, 'coZpqioQVRrYDbL': 0.0001666944490748458, 'agNfMweahvkuMbMgAbWnbQDLmlpfDSFZAMxNSKKsVDRFVzDRghnJbYuGZAHYjEJSGNSdkAoluGGSpKRkROWqGscldPkgPHGMmipOliIXL': 0.0001666944490748458, 'KeFBPbkwgSsUnfaxIFVSfSSeWgkYfIJzThcrYZQZKbogvkZcDUafuzzijChxTif': 0.0001666944490748458, 'AbqDmLATBZuFQlnwAvVXNIaeAIkAyjizDeISRGKNhZodgEnYyQrEhCLAojJJjC': 0.0001666944490748458, 'lHWHYnmbEGTXhossGuaMGzcCkFOYxCONdLMIxRbXGJqhlWCxfFOpcoMhCvMEcTxuk': 0.0001666944490748458, 'SQhRdoFYkbZnnOEWJLwdSxpgrNScMJPItXtplNYNEARhDbJcccyLCeCpXrAkeoDmcRvCVTJuohIpBvLakJQeRucNQNGcNlVGwTJOw': 0.0001666944490748458, 'rBFIsrMSBcsBBfMUlzyxAYUuJfPFfjsvgDMmrNRSpnaZNPVHmLyMjiElPrNUoZqftuCYZECiwQcHRLEASZILYYFnnyMKlTuxegmyXtoKmEJkpGRXqjLXwEB': 0.0001666944490748458, 'UpFArTSFqgvXESVSbgJEAYIHhVuihNphxXzXGRrfVxjrUgl': 0.0001666944490748458, 'WpyHuBnvMlYKeggSdfSRwKtMeUhDJQjqPLeOpciXykBNskCUtmyRfNHmGAFQbn': 0.0001666944490748458, 'quWZOLdCXwIWZLgulOLBCVLTDQkBjghBOwRcllIuLiiWEVWevIIahtyRWysGVHtxIdNiCxgzAyzmrTFZWdWvqZURoSctUOSOayvpGHszIWTEm': 0.0001666944490748458, 'aoxMICTAOfvxhglazVSyufMHsuSezjoBnfhueSunUBLVvIpJBEmuIwC': 0.0001666944490748458, 'MafaLgdCnpQSuODmpHhURccLOUWdvBSDVRdemwfooxiATZwwFJQFiwMastteJOTePVYFKvw': 0.0001666944490748458, 'YQjazWFKhHQvVIEceAjnsaY': 0.0001666944490748458, 'qlYGg': 0.0001666944490748458, 'oOuWVfkHuUlZpheEJQGhmzKDpvVwWn': 0.0001666944490748458, 'btbHPoCpuLSkjYOsUZtDQIsPHHSLPYXznfeknfsdv': 0.0001666944490748458, 'jrrByDMZtWdEQGgWalCkfcxXWjyTRuWgiYxzuJtSeRKLZylhtVMYvYOZhlHxb': 0.0001666944490748458, 'dLYfjOsDSTYoFRhrHlZIuoHRZGPRzbZXOXxnFISaYlGgjaMUXwqHR': 0.0001666944490748458, 'OjdAFmLZcKxHYrSBRcmsJUXAcocdcuCAsAwEUJbKkaTJFhqszhmZyjObRnpeqYauRKHPSojaCxkGdkZWiWBJMLFCuXV': 0.0001666944490748458, 'eaMkGqEnAaOHcItAMBkfBtMOtihLemzSDGLfZQTulURsISSqZVHskxiYzoS': 0.0001666944490748458, 'kNqNBLhEmedIvB': 0.0001666944490748458, 'bwYRCumfhw': 0.0001666944490748458, 'OMiWdjqAbqKyjqQXMEncZT': 0.0001666944490748458, 'IsrBcjVGjtItnqcMQAnTUlyiETwqXKORvFunkczKocRWuSaVwjxNcnkPVTl': 0.0001666944490748458, 'bqbrEZAsfrPkYaCOvRaGJnqSrPoxbXPlUfXRQFwjElaEqwsHzbdUWwDzwvQiVhziwXRXXlHNKyDaGkT': 0.0001666944490748458, 'kclUoVlztwqZJBJGOloyqdzvEOnuejAlkMGFSOOijieTqZFxPOpLPVYGJFbrVXJozQd': 0.0001666944490748458, 'uKRqiGNWB': 0.0001666944490748458, 'FnraKirLUsYjRihxlrEjoXvAIYOOEWpNiUBqenhEedzsGGIbZBnJsbKnDsxHIvnLFzZdBBtZDuKJxenBmGyazkXHMLSdzcirgZMQOcXvz': 0.0001666944490748458, 'zqqpCKUFSShNNDxQdFenAOSfYlKUKpxwexJvTyeocA': 0.0001666944490748458, 'HKxgDni': 0.0001666944490748458, 'YTNKhcHaRhaEiBKFpszoJOo': 0.0001666944490748458, 'nuyfKDXTRJeNDtPGKKdcBBORnmrRTvAJjwfQtSxpMfdqpugusooMRvUFfVkLCBMqZomgYKENQSghImaxzABbFkOeyVhTPGdqsMYzHSLoEdwLzWPkoJXfdM': 0.0001666944490748458, 'QkbAqgGNBwlzEwWsQQIXTkiTfEALpEFLjPItplVuCHCCXuHvOihczsx': 0.0001666944490748458, 'KzqTXJDKeevbnSxlXyGHEsXEIaHvOugiuEYRiMKbhHr': 0.0001666944490748458, 'uQxHUOHGfuXuTrYoW': 0.0001666944490748458, 'wbRpfnjtHeBRMrsvBXoDoWiRXQzMgLfPYwnwvMKLFJHRoNjNkNEMYkOhlxnmToSaKMgyKJqfCvcTNjfMcFHQIvlPJmFbmrCCl': 0.0001666944490748458, 'IzlKPrIlwMPVREzssVDaAUjuMcBWvQctOpwzntPUKkKvrpsKqMXjLOKISsRAzSgVNVieoDeVEswAALaMVzytSjnFHKthoPOtNaPYk': 0.0001666944490748458, 'oVjJpjsiIEatRlZgCGdUawT': 0.0001666944490748458, 'qMdnJntzmTMlgoILsoGRxKjPexlWtMkncraSlKYbmgb': 0.0001666944490748458, 'YTDZDfsemVlNLRxYwWcPjijGwOxBtqInQxGmgIrBQmCjvmmeYXFlHXqLFudtYUWAhL': 0.0001666944490748458, 'pFAfPTFRwCHWWwGbuASSadeJsAyaKoTrVqHtxeYlRMdPRJPENJdKrFPCIaiGCARPfmQpRCvQBlzRIFpEcNnVXTWUkznEfNvmPccxjHDlhM': 0.0001666944490748458, 'ZjSSCAchAtXPboKBpVYLFMFuaneSrQgeIjWszIxmcRqDMAaGIssSSUaIwMCLnuyTWBseLwnboBeENdEcreWdvOi': 0.0001666944490748458, 'ycEIzkuNmfvqXHhlcskrngfnecyBOgvDLkVvktkudJB': 0.0001666944490748458, 'nyPydezgHCPuqytWeNOBFwVViyisWimrGvjmnNSYDWqJsxUfQqMMTsSkzzGKXIfTSkQjNocKDnFzfVORMqwNpQhXhNpzevDVpzzJFZwwzS': 0.0001666944490748458, 'AjewrqMtfpnROYBbCmZfegIORvgnXhKTsdzFPdicKQTZFcBOQLJxqTxCNRObVRzJBPghtRSeOZqkgXSNCnTGLlmyxYnHABTDlIRITNaJqz': 0.0001666944490748458, 'HfAnYFjLzZMwYarRqpVMZXemRMBXNmVHqmktYJmSdQHipcQXtkbqgEdSAvRqloJfJPXwuylboiMCVGcASATesMNwEKCojZjfX': 0.0001666944490748458, 'fluKBjZRihsTbrMZXHwjIxrMZpGgculisLzFMOTuVktHNDbyFDZiHLRwoJNujUN': 0.0001666944490748458, 'PgLcLxeSynyuXMHsyDfhRDLfWuqoOcBsiZmxzQoXUlHxQVVaNbSJQlhtVqtFPS': 0.0001666944490748458, 'DpIxNuhIVdqOJyHbloOIENZNqyfiAvtqdsDzHXmOaEbdtAwKCbkTKLuFYHavbPftaiIrCSYJBRKdwUeJFPkOtxKvZGvQCTFoyxzjmOUSdhzMXNFgyb': 0.0001666944490748458, 'XgKoXSDgBUhhAeEtSeqYRLAGjtBmkWAhbZIiQVbEWTvmlaomghnFFAmOgyGzdRINVzaWHydkme': 0.0001666944490748458, 'aDvYHVKhTUKApsZldjTklkBgNcPfnmgdsqLbnHXzfnVSevszeiRCBXyyZXJuwzolXAoucvMjeRYmuKuEjLB': 0.0001666944490748458, 'KYkmqVZviEcGXsudWgFUDjOAxHGgfVGeENcsNnafyjEeDOGampuaupaormjaLbQ': 0.0001666944490748458, 'AzXkimPMSjQDTUBVohvoWDYZGxLQDTDtsCxAAehxiVprrGVeWdBMfBgZFPBENdmQcWVtRbpUfXdcTpLJnJ': 0.0001666944490748458, 'JERxnMDkvxcksZczmpzUqOoHclvThBenrSHjNxcLLVUmXgzfo': 0.0001666944490748458, 'sCwuDGhjQVOkbxayqrsdUtyMXDvDUtMKWkvjuGKwhznPyHtrQxsEbcLPJGgXRJAKXzDGFNOuDcYAuoNlbnVpeNrIYrnELVjQKrwaUjo': 0.0001666944490748458, 'wxBYglYNvcUtIULGviMBlmamjTXDikNypyDivMlgQjClgCzQBUmTwHPoYLcGyAmdRejvBHNoZuoxeTevKPePtVHwgQb': 0.0001666944490748458, 'OJjmfDZiTxKcsfIAqSBaLrK': 0.0001666944490748458, 'MhMvXFpHSSbcCIoEwOHexXadC': 0.0001666944490748458, 'WMpcVrcbuOKAibIkQiNMpJnEjuLuis': 0.0001666944490748458, 'iLvChkXZnVYmJWsnVxhOKktpsiqdsTUegQ': 0.0001666944490748458, 'kSvwYceuaRJnaWhAVxrZWuHrwHuUNOrKhQxVqRvKjqdImQmPYNpvsmyidIRzgXLTgHUwuGeYPGIpjpbuLLGdvWeKcHioFWaTYlZaNvxcQGGhDGO': 0.0001666944490748458, 'wUACSMhNkiOQXmMZeMYcAOkwaYIsZCCVCezdjRuUlpQOU': 0.0001666944490748458, 'VuwdjdCQmkReqtQKXpalkjnZjosmHWTqZGGQQuYbaVHMdbzmihozmHYrdRyfSEDBQMuSGydNsUbQexiJvnszETqoqGB': 0.0001666944490748458, 'FNFDbEaLGPMlcOXUMQpitwEUCBdUjRdqxDyAJSISuiIGrnoQeCdcEQcCYhWhqSvRtpJvpQLnhl': 0.0001666944490748458, 'uIUsZjWRyFWKyLDcsIcMKMEMWHooWMnwMBHSquRUshpGLuyNqAcsgMOkMq': 0.0001666944490748458, 'tlZozMbiFUpeVjBxCFVKxUSJfXTwGnIhGLjPyqGlrNLSVxZwGozkjJaxfCpNJiebNtYHuvDJAlHKstwdVpwPobSbBFfkXDH': 0.0001666944490748458, 'WMtTPmXUUJsKFyqJawNdOXpSkzuSfLNvFKukVMDKdCqQDPQLbXtnGluOBZiwHYYICzPXbAXRXDaNoEZMbKMNKCmCTAaXbxCMqtb': 0.0001666944490748458, 'QrmfIbOgcBbGkysooHEsOcd': 0.0001666944490748458, 'BcqwhsPmnYsBkzFVbgNkOSCdVQFaGddFjRrjFyQlQTYYnFpILgvXVCKIdFkiSRnyahjUnUHftbIEyECeZYYWOi': 0.0001666944490748458, 'sDrHce': 0.0001666944490748458, 'NgziHczFZDAGtCOGwhiIR': 0.0001666944490748458, 'HBzFOofZDmyBckFrJJwBzvrbfnzpbBnXfVOxLOJCKfECGhSCMRvhk': 0.0001666944490748458, 'nLmbKtxxARkKYdbZRLyYJbSlINrRNXmaNrIgnlGIWObeJefzMsZAUEqmpaCNCYWgwyLiCkSsxwrIiLBywqQgHjJTJOv': 0.0001666944490748458, 'qKAfyiDaPOTlFzZKFrLEPORidlNpDbATPiibcDNcgxsGGqgVaxnEfdCTiXtTBv': 0.0001666944490748458, 'yGkBHoTtbpRIyQhcvEaADUsOdZhZGznZXUtbyvDWNMJfOzLmMCLHLyKKsk': 0.0001666944490748458, 'oLUZdvGTqOmOLzmbL': 0.0001666944490748458, 'sJKYnKSwxnAfXOXyywKLoaTlnjSaomaDaYkXaKSojIryOhQlpCQedFoPtMoskPqeEueZrWleCMPgqugCmfKVZNayGjhbkD': 0.0001666944490748458, 'zeSSPKuAJGfMduNtNFuuIYuyeSFQIIzHbMfSNBnAJllAGTKVqXGJhHSHwxjSVFXdpcnHEDMRcaUdaeHyxA': 0.0001666944490748458, 'KeOuLspPCkggjgSmPQYtkuQilVTBlCPSwfxmwoDQADGlqDPsmDHkCYxRbTzmFMyjWcsjsxMKmrNNhbFAGecPcuqcsxrZQtSOpWELkpnEyLcYawQVf': 0.0001666944490748458, 'OCAeBUZCTOQBCmUpDfGijbHDDUuipBMqWoKgMFEYEpzxwFeZYhspzimFYIDKsxsWVpEdIZW': 0.0001666944490748458, 'UUGkmqagVFHHdnovveDTDLnEKNmRLXXECoPPLtVcBjylGAMhiMNLaJKZevBtsISNNRkjhTJRNERVEXcwHsWtBLJoUOIpbkqQHZ': 0.0001666944490748458, 'BvPaWoEESqKRDWJVmkWvNQckmJoxJXZsKIqNclwJgVrUUxsWSXxvtS': 0.0001666944490748458, 'hacjPjsQZmSlJyBrUsjqZYMAANmSzoLiRf': 0.0001666944490748458, 'MwFripByyccHlScHEcaGUCnDJKsIHKNXDsBoXGmTwkKTtYdsuclwDOEwRLgJxeppJJrrsCAYxfKYtgDHcruHBaumifr': 0.0001666944490748458, 'ySURYTNmLreNUSqfZzAMbNHkHeGbMgCwDlVuLtsiRNUjzVTOExhhkgIzsyeYqkRDzNA': 0.0001666944490748458, 'RYHyGcKfHIMziRcrljQwgpZGNOTKjsnmoMusLEyGeFPQVwDRJLGpFwQHLsfAGm': 0.0001666944490748458, 'wpkrUpb': 0.0001666944490748458, 'LRjfomhnLqAxPntSwL': 0.0001666944490748458, 'HPVrMpZKoX': 0.0001666944490748458, 'TPKbCAyRtwblFAwDpfuZJpRcnmTadEZamyyZtCPcfETgZfQQNbJzdgbXuFVhyHXzUnQwzBOZFQPRMWFPU': 0.0001666944490748458, 'OSALPpIXnshuURIsxvALSSTJWDANDbgQYforNAZVWLGoJZLbQjaBko': 0.0001666944490748458, 'mKuRkzpdPXdvIYXChmglkpmfEKBPSIOrimQvdIQvzYVhrOsYbpltHsfvrusZiUsWlaLpLYz': 0.0001666944490748458, 'AhFdIouHWdjPBXmZZTLFHggzzinnBnblEDzjDOAzTVMafYJJSYQLQrAUMYaTGcjAQeXBKxQflmogieFnqBSKhllovckwYObIEe': 0.0001666944490748458, 'zRCdxYQekhqpDlCDnCEyDLYiwExEipidDARjzrQSkaeLwobJfAjhbOPcYgiLgVCDjgIRPVijMAhPzKdtDBoGEGhQOIDBiDW': 0.0001666944490748458, 'UuHjOlbSywXIdCfwFNOrGnaEDFgtfrNRqaZiSyb': 0.0001666944490748458, 'zyIRNHgLJstpZbCLMETtUPndYSsOGbiiHhQXHQvOxHKITlLPVcPYTNuBjvtUkwoRtYywCjtARsplKakidwXSZwnDERoLRfLtpFOXSrFPXaxkuvySq': 0.0001666944490748458, 'bEWZgzUXzdIGtORnoALgGHKwgKoOGpCHOJWTpiJPIlTPXOnLfGOQNTUDwcueQgJcXpdsCobTrpXRqiEMVAbZydyJmymcysDlsSRKggWzvhjDQwaqLAwslc': 0.0001666944490748458, 'EkfVmmwzuDcrarAGFOcJYVDJJL': 0.0001666944490748458, 'bWfltQrMpkWtbUYCQSzRoRgDAPUbcOxqIqvBxIngQNZun': 0.0001666944490748458, 'U': 0.0001666944490748458, 'GvnFEMRAVxkTTbsbgmWRNvMgzNLGAwiLEQuHbVscMTYtVtLXpWVgKIstsJkQFDThEezFqKukfqRhjHrXEtdptcBvfw': 0.0001666944490748458, 'eQjpaxBhuVYDhZHVDRMCcLqJZbikNnUkqmwbcMTHenxIzGSlcOZgBLRAuTRiuzzIicrmLgZAcBOBVWtztdibUkPkXVXHjKkLspzbu': 0.0001666944490748458, 'wVckiOJOAxLgPZOZGJCPvyRjsIPJlJQlqztZhzRkRUEUGZdZdazspovyCuBnjksXnWwIEBuIcMvZglUhzvpkDloFJwKlGUpMfZqLCknCqkJoyOwrFn': 0.0001666944490748458, 'ZehCdEAYkmuHhpJHmwIMMdrxaeKwFoUEOzjiMjYutEgufwvRtXqdvylzHOiuMDaVlasAEwawmkCnwSZpnxyuU': 0.0001666944490748458, 'gSagCVfILfhvElSoKMgDIsradIbVBhJZWGQfeNetWsfijecHlJKkJAvfQQpPyVscuqxImlzgxQWlkuRsBLvWoltMpgcUtNMCGRkRwAHirB': 0.0001666944490748458, 'wspfFLojAbaPgmYhaZvlkDfGxZCeamAAPWWwIlYVHxwxrRjnQLTClfDBYHDhlJUgsivcpAonmOjSseX': 0.0001666944490748458, 'DdQOVWtaGAnlpAVkSfmoCrXJnFeFXvmXxvjeA': 0.0001666944490748458, 'EAVPdJQklVYMAHCCmodVJxguPbeimxVlbWoBwkBtILbNBcStVJMeKIzDfVQODhKelFEfYTN': 0.0001666944490748458, 'JzAtLyMIXpwOKTQSDRMYZXHYsZFdgssvFnVudWkfiYKxHbYeGvHQnGFhItBRTFYCapOZxnWGxDX': 0.0001666944490748458, 'wyEMSqyZTzvXTkJwaIrZQrazkunRJiUhmOcbDymtQLRtAUvhRJnEhwmHveVBSyjuNMTjEziEd': 0.0001666944490748458, 'JMUSRHneTGtaVSvxsFPTKbkxzfePRtzNmUDDqkPZPXmyhwNUeivFDtvJRjMkOzenoIJMtYTCfyl': 0.0001666944490748458, 'MwlTTZKxaNmLfZNNBnOgaAbOLuBlIEgcQgppvXLJuAUunPqrEPSkiHOerRAOxdpWGZgIjXOnIzpMHWJcCjNGLGUGKD': 0.0001666944490748458, 'zZcUiepyqBnRpLRwKPlwLTqAhOGdDXESmHXGuvxaXvdHAWapJWNepejEUnzJdNNsnWFMbuX': 0.0001666944490748458, 'WlvAuSAelDzsXbTzDPBWbHHRKpMScMMVsuFwYRqnwdUQjDsXiEqkQKAmFAkBMInILSsxEWhPpSDeTEHBMXTUzzHATGQydTqIAs': 0.0001666944490748458, 'NyXCSFyARTFDKiRMDccwQhEDSRjokbtGYUZtsohdFrDsskHluIwBKPDMXSJdfXNizBvbarfePJvbAytvKHIdPEvlN': 0.0001666944490748458, 'pxIkkGTUqOtVNWGQQVrULrZRFTwclDfJDkdolykScCHqDknqilptNpiWGRVeuawhXIXqjwwFqV': 0.0001666944490748458, 'iOKruInSaheqxRbrqbixBBvwZtwcLmdowiUKxLkeKuDVYyA': 0.0001666944490748458, 'NArbFXpRBTYnYyQfuKCBIWgJnseFAgoSguRyHkWeCFHpKevwXiFyvOm': 0.0001666944490748458, 'YvMZkKMOMwHFVKNPcOuxxuApvdHTLdcFzYMpXXmEYoQncjcfrsXDmOvjkupCgsnSwqWCJQYAZM': 0.0001666944490748458, 'cCZKVMawdGnKPGPrOrFWjbSBoPsRiPaIdyytwjWbaBrZcOhHVrgriOiNcvMbEInOlaewnLbRGKcPJPqQeEpGdolbWlxfEV': 0.0001666944490748458, 'YDoztiMaWIkvSAALiBBevGIMMIvSCRGxnyLdtDujcyiEjaeQViLGmaPSecLNimoaILKISSlbnZQPHLDZYHCmSEsViQgqFsqeiOCsTuUlEDeLAILmraH': 0.0001666944490748458, 'OEoiCAJarbFcNplZnHqJccnbFAXzyWtgPstqHCiNiXhZfdGVzbTWvqaceNXuaJT': 0.0001666944490748458, 'UzVVfmxlKWfvMAaddBgdaxNtwXKXfeaAPBdVBpqppGMdoIznZubpIlhSJUSeiWpxFhkIlWSyZByPtiGPRSCVDhLqm': 0.0001666944490748458, 'NQoBAhnyImQFdISEsFZbiaUXbhNellzhLTeLyHylWNrdqRmZXmg': 0.0001666944490748458, 'sjyVUY': 0.0001666944490748458, 'umOVqwbyHEtjagPwLramQGmXMZeTVCbKoPMqSdkXqocKGUVOOYwSHxjZZoJWLyxziNwhMFcDouEUnkMWjIVcJSCOUYxmYNaRoSgbvxeaQqInSDpsxgsMG': 0.0001666944490748458, 'Pu': 0.0001666944490748458, 'hRGCtAHMfwvKOsWTQAeeeTIlMvBjaQVXzECgBXWhQcCraQNIMPGhCmnDYoGpvasteLAkScxeRysAAoZ': 0.0001666944490748458, 'yDmbrihqeIEvFuhypHLxUd': 0.0001666944490748458, 'XRNKdwoCpzZ': 0.0001666944490748458, 'XzCukgzhDzyJxIiIDvuoJjLRwuxgJQnlOwEtKzxwVgengzkaDkrptvudHq': 0.0001666944490748458, 'IJRkSDwuMimNYLmTQiRciQwllymiIwQTMkhdUeDOCpyFnnfeFJtxvxlbjJwEdKGtgfLoJicZTXtVmMztmdVbfLnsBxR': 0.0001666944490748458, 'NGknHypYgZCYvwtAz': 0.0001666944490748458, 'gAVDnhvZSiTFhliVQZYEdXNMYhiUvTCWfrxzioZnLnQzgvSgamjwWlUhFrJCcNRCGPzVhMpGSCYlJFsZFLOtePLkUHyUZUp': 0.0001666944490748458, 'XZiNjtRsnjfVjmGDKYEnmCxgzsyavPeGwmaZXbbmZKkJiTfyjQHKlXgXCILExmqnYYSQKlkVKfqSGs': 0.0001666944490748458, 'dtjaRGrXtrmkNtkqAUNhollMZgXuuMGbhQFGAz': 0.0001666944490748458, 'PKfHybdOnnteNjXgEKSZoNNtQKVuWZeJZwhdMBhRfYjMCjNcCDDFNzqrwhlWryotCnAruxBsZy': 0.0001666944490748458, 'kKwwGvQAlbcswSi': 0.0001666944490748458, 'EqIIZoNfwjGwjXpjYC': 0.0001666944490748458, 'XIISvmIufYjPDjckYLgdqTVwGrMBPJKzSLVCJchFKydgnUgMLjadfJxvvlhtuEkXQbFvRRNkhzEHGwjRJElWSQgWCKGpTISnCtmqZcHWpJu': 0.0001666944490748458, 'rznsnUpmQViJsNmYYaAgIciTAzBpKZnZSvsNdyhroTKLsWsidfFdvTedfhGjdKTEIRxFyXHJhyNJxF': 0.0001666944490748458, 'ucJfgaoVHdtngopAyeCaJqRKPAdZMbbCIqdsABDYuItAntYHaQBSBhdZVaTGrYWEmvwGouMQocrAVFlGOdJPjEYotwZTWMgdJykUGkOBVjogxrnzIyf': 0.0001666944490748458, 'yRyaDZTArswlIoypWuZnecrBgMuIPKHkpGNbfPTxnHsPFbcdJTP': 0.0001666944490748458, 'UjpXSHEwSVNWoNhRbhFUKFVIWwHVjCSweSfKerlJjrLDkAhSD': 0.0001666944490748458, 'YbYoVqPnnajUqMN': 0.0001666944490748458, 'MLWIZAscpBZOVBeNhAzgBGKXIjrIEmnXfwdsxXblXPcudUZXDDnvoOhZsbVDFveUkqtmVlyhbfERNXxyRa': 0.0001666944490748458, 'xzWFPJZiQdTBJATIvLfAhRJKQBWNYcryHFHrYaIsmRpqgPqKfhMmiNjqJWgjiRKFTgQNTCdVtOhFeVNlmRe': 0.0001666944490748458, 'XUTLRECPxcrYAMLFbhZ': 0.0001666944490748458, 'aegcNYWnqhisPnuQJHRrvSYjguPJCVzqiMhIkidvzVMEaMkPjtIUWpZjQBKVpbTBwRSSIDhsOTISxcTdUOzUxGKoCQbOeU': 0.0001666944490748458, 'dyPLSQbujDkSmxuqzyeRMXgCTYLjHWGagaEMHOBXnpimxaWpWZJYyzpohpwKEKGEklRzOipabxPehDHxrqFlQ': 0.0001666944490748458, 'NowoTruuAaBIUPrgZsIRAHOCvVPhbUcNPjJaajMDZBvMadlHsTGGIAyWuqMvhLN': 0.0001666944490748458, 'jiRGXzzsYKHdVXQPuZexOGjJNCDXBMASHTfUgOGikVPwFdvCQTMPp': 0.0001666944490748458, 'LQgUOuryYkuBzdCzmyqpGVvfwqjymJwHropKxiRqCAeUPtCCKgLpBnupOVvwuyuWmFIivAAAQxKheIkVVrt': 0.0001666944490748458, 'wsGxZkWogbMAwVHuDngmAIvqpuMrrxSfCnsTvTUrVFbhxwIYNqogjoIPdHtsQdkVnwxkR': 0.0001666944490748458, 'cENPYoCRpLCxcA': 0.0001666944490748458, 'nJ': 0.0001666944490748458, 'YAEdtBMrGiwVHPEFJfxSIgGOgGOIQKGQXJxtQLNQetCgeliXdDXDxHwBCgBMQg': 0.0001666944490748458, 'qUMFxxVeFtXLfaKzVnBsQQWkliKfTPr': 0.0001666944490748458, 'HTdIyQpeTjbcuHZkKPDEKYgSDNHyqXarBbqNgoD': 0.0001666944490748458, 'PGYluQfNkGzQvBPCms': 0.0001666944490748458, 'fsZncfovSUkzXPfvNtFTwmXwZJUmANTrasouvqrflJZRwbdeOxrgYoATaSppyFeFcadRDevCjKrNeiRplQAlXLIrSiEuFnBVK': 0.0001666944490748458, 'YMUSzEpBXSZwjzPCHNLJeUnKkbypUyFWKSYoBWWAFbNKrGCWnoirqivAqhbOIoihxjOiMalPukpTqIUfUHEzEctxMxgSHvYLyeVqiaxvfJUZD': 0.0001666944490748458, 'jPsEnJBWZLFzrvlcfL': 0.0001666944490748458, 'sBrdjruOyDzGLLvEdqXvWGFBAYPjymyKKEPttTvGKcDDBJWpjXmfcxTymjELU': 0.0001666944490748458, 'RLzJtkvaoGekEUjmCmGlFcHMUORLnCvXQEcfkMPfkPKSuOyVzUhTklPImJOBYfMerNdqUeUYrWGLZxOJgF': 0.0001666944490748458, 'duBQlWCPmcAxCDtKGbhrXNgfkMMsfDlAiRIMxEGCpRddYIcCwPRYXsICPlXytYFwjuQyzIqakfCbkGbGpXFsGufanYMnIupESMSaXtFzBNBsbmV': 0.0001666944490748458, 'NlLMJKPjCnwMSGVcDqicRhzsZKHtGOAKIrLtsJXfLqDqCEdJVOXjgjRdjmoDPvvrryaaWRDbxPlATsDnEzAsPLkAepe': 0.0001666944490748458, 'RfSJlzJSKRNSjwEArHaFbbHEYXQBgRdnGBOxzRwQPnoBqDRtPaBQdDwrceQUVtIbqLCkGmAerggWvMITeKMHHxz': 0.0001666944490748458, 'ifZotLvnMRrzsbvuywrVILerEKSfG': 0.0001666944490748458, 'HGTGicDgEzZVqaxLh': 0.0001666944490748458, 'dNCAMnQyqcjMAPsydlzaOJwlfPfbBbJfqvhktLQNbeWMAGiNqrXcxitRGYecRARZmPPEtgIXjAdWBgMFIzHXbHwEKYrlTcOoTKOwIyaVmbwwpHWtO': 0.0001666944490748458, 'LrjqvneiYCXaxockwHUSRyxvWi': 0.0001666944490748458, 'zTvfzrDoMzObrFaSqJgocmQwnWscJHALwhntXwhKjskYbGoddNUGdagablEYK': 0.0001666944490748458, 'apiPJOdKqJTIYGApKaBLFicsRCEUD': 0.0001666944490748458, 'dpiIaFjxSsGFDzhmYGuuIBUBTBo': 0.0001666944490748458, 'ZlrVxRAwZLhxpPcXeOIUvWtJsyicsxGjkSlahyRdtaLJBOUlGoZGdGfjzf': 0.0001666944490748458, 'ilKqDVooXAKMtLFqDlGlnMtuDIfuNMMdLITIdJDwbPBmCeLTdVrIxyuvEVTcyAZsyEKEUAAexOkLXUMxjvEKccQNdmluuS': 0.0001666944490748458, 'yRSQbmirVpkkhPGumuyvnKKnbtpzPcEOBFnOcIWQbPFPTvwKYjNDJFEYcXsJifsaSu': 0.0001666944490748458, 'johHozmZoqQiQmhdRTaXQrlaxNIslXilSWKNnpLhZlWJHRiPmwgUfejoQXJcdBjCInSmpmsDXEduOeHZNMUdSRrHBogQYZJQZuAjHbTvRuBvlGKLLkTFrIH': 0.0001666944490748458, 'rdWsQZkcHvWKSAOIzQRhFFMLfMvrUVDlvkHGqYSFViPdlYIpoOIndtrkOZqoxylsbdDwCBMmBMKQwFN': 0.0001666944490748458, 'pctngUYPdcABTxarRBToAvnVIWODujGaADOCetCazIyYBuWNFvDDMAQetfaRCzDMfNVofERqPVd': 0.0001666944490748458, 'nrTiZcvktsIUQDVSHqcsDtKClOZfMruWYNYNFFunJ': 0.0001666944490748458, 'qRwYuSfRwOPyetcxtkbLryizkZRyTtWXkdXwRTZVoKcPeG': 0.0001666944490748458, 'dmppWNKVyVWRHMrqvBBXNmvvjSpAzIvmOmcCsFeCFkeQGzVhplvosmOOyvNHiKqUKLPNAeUBpdgXkXp': 0.0001666944490748458, 'kwgxoQvGolxRfoX': 0.0001666944490748458, 'snXJgOEPuHzQRnUJAofXMjTLvzXJwDjALWZZpADNsIfHUNxwdDgUfvNHsfCUFZnsopvblOWIy': 0.0001666944490748458, 'ldfbiNNktZQatjEHCNDpRlN': 0.0001666944490748458, 'UNrquuhyqMKSEzKXahomQuKoqOvFCqtmtJdtxYoxTUzGGvdNpKcapLONpBK': 0.0001666944490748458, 'UfBCwpMoJVhwSdRvUdCOsynyLsDVSsydRscxpEvGGdGNsUxeOMkOhdyATqTEnyEFQ': 0.0001666944490748458, 'TJHOfrnWZLIPDyRcWqrbhELbEFEDVwRFDxfzhyWjTBpqTJYDobAoKXhlFmevzWAngAjBeNPtTKoLkViPguoOOegowHYSVqrqZhvzasHoPJiMVQIiaSoFfq': 0.0001666944490748458, 'OGwOCLHXvDEQFZXUnyIVyOrZFjBWKdAtPJrxjPvzch': 0.0001666944490748458, 'JhoIsVFphRgLZMoyYIviyEObSVIvdiiSKXiijIagzLvKcyIVYTOHoFMDybKWXUlpxs': 0.0001666944490748458, 'qFNPmEwrAt': 0.0001666944490748458, 'APm': 0.0001666944490748458, 'jDspiKixUsrBhEtvGUjONDTChpbvJHCGGJqETXZAuzdAquFTlOqjiGMGiKoXCNwBxOIRGJaoGdQyemMiCiUAQvmkdJbWexozbEEYa': 0.0001666944490748458, 'mGRAjXaYapEaYKpTwtbVEGbqecJdKqaNYgMDZMwKcikdYSDXjlpLroLpvRVNETbaTscbiyMJfZBOppO': 0.0001666944490748458, 'JrMDJQALlIGZnkEhijgaFbAeBDPoNjsISPsnajRXQGkIvbBaIvqVxwWTHtPmhIxyNmydMMybKLKpAEtjeUNXvbsHPbPoVixHEGUALhavdTNjgGdcMs': 0.0001666944490748458, 'WMqnjoPDBvGjuHvgYVaowQGhmvXdBjoerhwdwuTboRnhHfLEAtrepCimtKOKvsaWWXoxQndBeDIzzI': 0.0001666944490748458, 'RMDaBldATsNRLrwyGXcDQKudJIBIEtIGUPwTQTsfkB': 0.0001666944490748458, 'SbRasHrHpmEqbrR': 0.0001666944490748458, 'JMDbySxPoSdUjvmPQ': 0.0001666944490748458, 'YeXNfTKpCWTVvZmDCwMJtXocKSxxYiaawQlqZLAQxQiasgyfkinbXSfIAzCrVEozntPDFeCzluyMpCHclNAUAHnHigrrJeaizDW': 0.0001666944490748458, 'omstMHuiSpXZdNGRAlUdkZheaduJHBQqksspbamkKTpOVOTsTDuljWIlyXqrteHNOzeeZnvWRn': 0.0001666944490748458, 'B': 0.0006667777962993832, 'OAKaLXuHCPYPlCZgIMIyhovjESLKISQmQeHcKpzzk': 0.0001666944490748458, 'GyMogMuoncTGFNxOxIkBevKggoCBaVOjgRqjaWJGugMfKslbiwwQYNiYSapgUXTeDxDEOlvyVNxhOHbDLzlRYxxMtyTlhQpzoBaHfegDvWxhwaNPoXjCaJ': 0.0001666944490748458, 'G': 0.0001666944490748458, 'IbIJFbEGLTbheRpXVtgNOMgbRwprOAIzApKvbLsGhPg': 0.0001666944490748458, 'bGxCCRoAbCFDNNLXHUqZvWIMaKXDxeJzBsrzYKVRiIcsmHJVJAPtBaHKWHPVvuwuvELClYRfpGnWgZgmrYsuUJYsbXZdIZfXIkycTZcGgaoMt': 0.0001666944490748458, 'ctnwWVfICInXPqskbpVmJPKcaNphbIBuiXQRFlUAKxsdlohGJOVnYFUsxnLGJZETtdCyvkECSDiVBdfLlxPApklIeyaHxxjyRzrDutqjA': 0.0001666944490748458, 'MTOQPFqWHMLDVAGFGxIUgToFHSMHKRABOspECqbEoxjRVBScjfIkZODxaHcdXytVBnHXljWIKJw': 0.0001666944490748458, 'zbaTrChBpwqllgbfLbcDxqcKTcYyxogBAhQqj': 0.0001666944490748458, 'vVXmnQBfmPoylPUyfcpleNwESLgJCXTlClTDjcaQhNeEOEm': 0.0001666944490748458, 'FafIzFZJdLhoPBljnL': 0.0001666944490748458, 'JJAtZDVQWOSlWJhXpqcDATtVhjFnBxOjDKqiUs': 0.0001666944490748458, 'xlbXiPCCDcsZcJSxcIftCqadUPkVHsQBtZrIFftvvYOGGWhIRiiDWjCxjTbrTDPbGpEIKmJGQbKfsKSOAoDHMZgaqpoSMlISPtHulbF': 0.0001666944490748458, 'stY': 0.0001666944490748458, 'DjrClhSulbunLSkxCPNyondjtFCvpQKTjuHlkTFPZAcfBAZeGUfddJfkcQQcDEQrdREwPIfOusYNVD': 0.0001666944490748458, 'YPDotgJJnfxMROVnSpwNDOIMAXKHTwYmZgwkaYCGdKUTILMOyopOHICbvEYZeqSKqMwXNtUdIjzThx': 0.0001666944490748458, 'ZPwxVEHSjdzXLDipHmwncNfAeVNFMlNvQfEwNpEsaVvJkRmQbxzgwWVrVeatEXpSwdlXpivyzuygFMWHfhuehDvkcSpNoDlkAbSWTTUqXxXyLCrjQziLo': 0.0001666944490748458, 'lxHEfAvWRhjKnJuLJGgrfQGPvlPnuGmlVJWDtymILeldWQqFbibLBGI': 0.0001666944490748458, 'JsJrqevKhTTanNGsQBTDKkJZIpUzXgmdfGeADqVjHbVspgGyjDokWhlVGJwgJmgYlxZrGxpDaNfakaMTOqjCmgGCemKtHutKiTbWgeqUymAxGMGBzvHgxM': 0.0001666944490748458, 'WovxYUBZfoOLYbSdwZeHYGzHNZaMCeyGzNaHM': 0.0001666944490748458, 'ozGZBxdaBzfFeWHddqXvEvLAMYuNonPKWCiuXVNKkJPKZdSKfxNAUvbOIsjfoyGnWabvdNrhuMdfnkxyCFiXsEE': 0.0001666944490748458, 'RGUYPWyzVHBZg': 0.0001666944490748458, 'CZAKaCXoZEqOGLFItkBjanLpDgrAjMpONHVpsQtZHjYdtjHJhGmFcUXjDdTjTkZEmFaOykonOpAmnbQQFOgZHOJiWpsOZSyIGhKeUmmQbhvznwGRa': 0.0001666944490748458, 'suOhHQzdr': 0.0001666944490748458, 'Kji': 0.0001666944490748458, 'AcKChJmRWYFlXcQCfslRYmomYUgpA': 0.0001666944490748458, 'UYvvDfgBKNQXQHOVRPuBLmutobdViccLDyvmxmbncDkHdRycqSbPkDVhrqekwlZsFRJuwdAPlxwlWwmmFw': 0.0001666944490748458, 'fSZ': 0.0001666944490748458, 'DynloLnirneGUrLKRkIOqR': 0.0001666944490748458, 'ZKDgvZbomCrcAwUFlYTMUfEjcglSnIMnOkiWmDBllchCAeOvo': 0.0001666944490748458, 'ZZCjxiMQUWlFhNtjugxAzPllJKdqCCZYLOgXFmZ': 0.0001666944490748458, 'TstZNIRERmmftLeBheePLvPpG': 0.0001666944490748458, 'pNqOUEgORfsPBTjPjdlyneVVgNYDCqMYtdqBnGgjymuFusq': 0.0001666944490748458, 'nmlQttggtxDXCvjAjWjLEaJCckFYcOqvqeyuwFIGrwlsUPpyUEQXjUMSlMvwYGiSKZxZmBdQQaTSCxeqrVvGTFsNXcRdDh': 0.0001666944490748458, 'jHZTrBEpCvRxvNCBYImEQsAngMguyQExRrgHiswQkfTFlLxgMndGyCOyNyXpkxeUZbaphKgYNLdfNQKJwDlSpoJrwwTObVsBMlFenjA': 0.0001666944490748458, 'IUHyGFsVcpdKPebXohkGJvkAflnTYdbWsmZuNCnKswNZCOcpmexkpQOVIEVMZX': 0.0001666944490748458, 'irYSyHMBQDNNMIauzLXJMUHwWntTORsbDTruuAoHTyadCDcPWfiTrlqumWHAivZigcYXFJEwvVynxfoSBdBioKOuSjelutJmpCKwdRIUW': 0.0001666944490748458, 'kBzplcEwtaZTCgpTnKqKaWwMsGKpafjRhKaHQlYNCnsbi': 0.0001666944490748458, 'chqaOPLtie': 0.0001666944490748458, 'kiSBZxNSizNShkmBnIXnPzqSSPONCvhmLYEeRLCjZeohexipKJveHfSAmEnvlAdhoGvDIPwykq': 0.0001666944490748458, 'MTQafEMtVjINFgIdUSTFGOkzKAjoqAEIbvF': 0.0001666944490748458, 'QxxmhjOdUzPPGCNkMrrzzcfnEltymgrNoxXpYQkGBNyuhHlCejaDnGjMklBvXZoytMdrNRkooknFyOSfZRxuQPQXn': 0.0001666944490748458, 'dQqELzkQKnKhoVFOl': 0.0001666944490748458, 'hgvhzFmFGdoNg': 0.0001666944490748458, 'AEyVxPGmHUdtrZfPcoTujLUrVeUOFuBmqDUJzfYLzOlfW': 0.0001666944490748458, 'CxXujanlDceQDtytluOYxJptsDbRWSZBSN': 0.0001666944490748458, 'poZfmJQhlottjfGbGDBftSpdUqdQxRawfKgxgtIMsoTVBCPzDBPDxewDNYrqIUgwpUXIzWqJCgNwbbPtLGhrVxxlOWDUac': 0.0001666944490748458, 'dnLCLkPKiZDcanbnCegxIIvBHsXJjCgkilZCWRtsXuVtfoDDuUoAVbnFOHlzPaDQYwIeQLSQunVvBwaPnHabvQFQGdEtQOlBkuZeidzmMfyvPTvYLB': 0.0001666944490748458, 'JqGGHtYKEdezFrXvIBjWqVOWeXxfiVWPLmj': 0.0001666944490748458, 'LIYKQnBbiHKNtNMlibfVpTeaUcoGObsTiOZCUQwWlQuMUcEwSroSpZyJvwgroIbAKarVgjoerjlttoZcVymkGINhwLLZSlMnDaq': 0.0001666944490748458, 'zAFBTyIpEROiTXrDAZDZxHilsEOlArKTTDQgaMyBoyQltqYBhNmoieKShoCVIfJzjpFDLaUiIzJxnVUyHbzDhEUNgYEGKYS': 0.0001666944490748458, 'XjjtgedmMHPKUsBlFbYlkKo': 0.0001666944490748458, 'EtjXBcWZbZrZUxNCtLAFpPObTrBkQIitQiI': 0.0001666944490748458, 'tvAHBiTrJg': 0.0001666944490748458, 'SMbkvKDXZshAZrubWt': 0.0001666944490748458, 'KMuFMBSGLIDjDhgaEGlKWuMBMupQWqQUjAJNzNSxAtwsGRhZfRXXBgjgKqbKkepntdVlGEhCGZzrXQRUbVrlZpbeNroxVZrNgAOLokjRBkRqF': 0.0001666944490748458, 'drorMqUsnXKYQVtCmUCmKymvjyadSzCilzyDjbavsqAUVhCxmcmpmgxlKxNFGTxMSrcTlpzzWyuKglkBnyaGxqyzjqufUtufMGEpBubXgiUsAeich': 0.0001666944490748458, 'syZTYjDWgfrhKAvsPTnCgLWkwfDjgFJNtPwchqItQKZlIxDgSHYTQPqsRdbFCJZBOwOWCoEHdWSXxZ': 0.0001666944490748458, 'gnfosBhiOEzSwDXvYdEzbaCEUjnoTnXuAadqMTdHDcMnnSKvjEAabAGsfWmBEKEsnTfEQuFPpiFuSCKJRuCKE': 0.0001666944490748458, 'fQGJNDKAmARfYUOGGASDGoPaBSbmxAhYHiklyayyTBwYBaVtqBKfJoLTDLbHCAcIfLicP': 0.0001666944490748458, 'fvTMxNdDDjtWMSLQLkTDnzUUaVWdwSjcdHfVKGXeUVSKvUpEmMsrEoeXKuOAzaFDbuDQYhiDwNnJoVngsIshukbYJaQhxqJTDsWHZsrETfwKnHf': 0.0001666944490748458, 'EkpRuGBuWsBvfePReDLjjOVnOKNgExcROilrSgSogcOSRAI': 0.0001666944490748458, 'jgsnNlpcIEXMzFqXVPkusYCfzvdrgeEguodQouHHSZozyRHNXQecxhcDzVLCDJNInZLpQEsXv': 0.0001666944490748458, 'WzMaxXNODyFFzsRClNCXHrygmQxOBXVdWBldBZaUuBNjZsVdetnvWELapXkuGUpaAAaPb': 0.0001666944490748458, 'FpYXaqpHyCMcYpxxnyfACWDjxR': 0.0001666944490748458, 'inbWVdHBMsgTEBUKPJsXGFNpmNBOJgowdKFLdCXuhvqJyBnQTpIyhCvQJQGdRfmMHBGLBVzvIsmzNxk': 0.0001666944490748458, 'vlyJROuUTcrrkYCIYDyQVrSfgzDeGjzTzffFJmWrgrfXtLuQhdqAkLYNKqeVt': 0.0001666944490748458, 'RfFRawPdzjZsqUsnfgkXmzhoDCRJVgwaSgaBWOtfJvlZoKNJRHGBbJjuuFSAsaCrbJVpLkjdjjRtzsWbIBwxkIgVtiKSnRdHReubyGflMN': 0.0001666944490748458, 'ExuYolGjJsUcluuJSYaJHXWfJIsbPJAcFTOEdZtqQEbvDoqpKBGAvNkephJXwQhGuuGVytqxEqjWCmYdwkJXXtfJIHbEImlok': 0.0001666944490748458, 'vYcJqCOFsxZUPgUZSVwRdvIwvLTjNfGjjJDLUtE': 0.0001666944490748458, 'XPDbJcVTqaARypfCaDtjuszjyYPuEkeXdTcfSOPORdCdRpCXVANrO': 0.0001666944490748458, 'ayEmPHIKSDdISpsHxVCBONiNOloeTyEUrvdCwcHrApYBHwsVAVsxqKGuYWGXaIoyNzvFamLXPTPnQqrGP': 0.0001666944490748458, 'VmVuCywdDVNnphRaovCAXQHSStCRyGMWHtMTJGQclCUxrblVDg': 0.0001666944490748458, 'seoBShEgGfAulGqlSrLRpjexHUrKJfalUcqrutdDXoDanDfgepGifKnZeVilkHMeddZvPbDnnlCqWEgqDFOpkGoIMenRGSrQnqDxH': 0.0001666944490748458, 'wQCTotgxUIopVyxwjAIrYCgQZSHiRsQgKKxsORakXFLlUAJjSysfYtftMfrCqNwhxMgrk': 0.0001666944490748458, 'UtqnsgwsJQhJXILPUPx': 0.0001666944490748458, 'bEdsDVDWclQtaMNomWSbfkdSYNjIkwbooLbJZFokgnlmehYyWCJeUGCxzLCHBnsKQ': 0.0001666944490748458, 'dRVpaTIGOFzssMaJVaEAASqwJOm': 0.0001666944490748458, 'vWOhvHBkSdfPldVqlGtoivuBGq': 0.0001666944490748458, 'rvoTNUwjijQlXJSYSPIXFuJLeeSOliEDJOjunAYhsFPuOoeIklpZLbTYQat': 0.0001666944490748458, 'NokykwAQGbuddBRHUnHWNtVjLJexchIaHmgRByLIcgdhGINOoPlrONnYBpSclhUMdLaaDlkaePfzRhvmVoeClZgovyf': 0.0001666944490748458, 'HTxxVQIazABOuNoQMyCEkYeGDYlUUGdQMUJbtEGEAORDkAVrQTVLnyHfBFZEPKxOEvQuLFkTEPZAzudSDmLIKTEkVSGTGNtqzqPMYjFVWZa': 0.0001666944490748458, 'KLidUxdgZhHuwUAQcQSPzRlrISPpJUBcKrEJwajPcpkPFgDzQCvmUDtKdNGMGIWOCNBUWkPyBQgGysnzZGvNcPEcgnQpcxHEIRGREErvwIVpIBhnisnDY': 0.0001666944490748458, 'lBJfDHGqlEnBGdpeNAHXEpyeabOuWfSFNhkgvYzGcCRDdSoSYcyyTkVXhxVLwfzxUKp': 0.0001666944490748458, 'YBFZWSXbXWrgBhZEnkcApvmkBHojs': 0.0001666944490748458, 'iqViaaztFbPanIITzGHHJXkXvwAseuzbzbJwucWpsodrRCqeMFtJBftrJWVmHiUptPEKvEgtwyjiYhWYQSOEEprcqwZxaxafoxUbFwY': 0.0001666944490748458, 'HFybyIjhttIbMroxezOiTeghgWpRdqFkTssmwLNqgHNCBvgsDedYs': 0.0001666944490748458, 'coACDpRmSXDZtiEZTBmveRVrujvutL': 0.0001666944490748458, 'nzOvprwpFqBYgCvqVjyXmeLFejJJgTkYNdWkEpjSsqCNPsXEzzHoThzebIgPwFImQSkzmXNUbeCQvdVetbMoaEauPNFcVdyCgfVLYU': 0.0001666944490748458, 'ymVKmdlESWLLFdfMLuEtLIhHLNNvmPnqztvnLw': 0.0001666944490748458, 'uEIAVxoebMqqLEPKYirqznzBCXAMIpdZjKPwwylzNRbGIgessYdKMFFysktDgKtxAFzQmOLDYEfixQlwAPtMsDfTowvydAMfmOVcdOpoacgAnylSBHJrfEx': 0.0001666944490748458, 'zYkXWThRAkCdifKBqZfiKFBUlpevMFHgIlgfUQvmqpQmIay': 0.0001666944490748458, 'isFHZCehGDITNswOW': 0.0001666944490748458, 'xmDIVVRvRaiQaQJLvjJjzEdMIFtLlUoUVcKOIaJYCFrMCIYjHQj': 0.0001666944490748458, 'ZwTXnRvfvHgvqPqyAYW': 0.0001666944490748458, 'DzqkpCDHkgnViTcQZFR': 0.0001666944490748458, 'iKsEjLFYTKn': 0.0001666944490748458, 'vmUUXZGGyULAvdvvLKdhhWqyXqOGrdNusSFgbrqpBbTcWWhPdBifuMFUzFormTiQZstglZZnQleCRUnOObnHMl': 0.0001666944490748458, 'BVMkHKJiHKlwfqkGlCWTzzOnnuQmqVuAVabbeuApCOpgcPOqgVzbWbHacFwnAEhehfiKdbOnWJFcvspGkuazRYFVLcCNkckBsyopYTtCNYnSYQribTCxirq': 0.0001666944490748458, 'mLeTzsqSlDqCZQzWUOJjgVVJiIhliLQIilJGDjtuGyjAYCCEVUwrHFZuqmNLsXLTjHWUQWpcosRYrlKdfTLQhAOhpIntCqtLCepvXuy': 0.0001666944490748458, 'sDJveOTCaPnomFPcGASQeTJaIcQrehCcHQLYfRZpIJyFCrdtyTfoTZLHfBETVZFpmbIyFseBmjkMgglVoeORAphkV': 0.0001666944490748458, 'wfzjHcjdqbuFcxLtJjwiogRLVAHVCxaplxUWKBXsfXQOTPDzCDrkvnxlIfKmzvbIWXkYFy': 0.0001666944490748458, 'pozehIizsAZZqPxYcRzLZTFyCZZbsHK': 0.0001666944490748458, 'EyLAHEdyTUTReCgoidKrJxFzArHNJDvABXiIDyVmJNPXwTsTgnNaYSkWkydnrNjBBNHQldGcIjkYEcvtrJRwcSmFzXIiNpndHZJQKsJug': 0.0001666944490748458, 'eqTQxszMLnQRLUkCOZzEAjhGOanlMJIRYUhSRwtiqVCBCXOZSNEVpfFQYC': 0.0001666944490748458, 'yYzJvKGfzZQjPRdMuLvMTPQye': 0.0001666944490748458, 'DTRslhGRaadgvFwWvS': 0.0001666944490748458, 'LbUUFGsAfctTMhVrMbncudMHRDEuOFNcsA': 0.0001666944490748458, 'tSGrePILAbZwaeyTHDfAYxwUpMHUQfqXI': 0.0001666944490748458, 'kvwNMZfcydCDhlU': 0.0001666944490748458, 'nmVBidWEIUqQnoGsAYWNJlgeItWEsdpPyIAoZfjlKkWpfjOTgMdTJbKdyshklyTnpaJlTPaAWhqnypNMNOCUtOT': 0.0001666944490748458, 'baOyfyhyONWcqgrVidLxHuBMkRNTIzJsiHKkrySpOYBTYBVtnVEJgZMpmuR': 0.0001666944490748458, 'uCQEsfV': 0.0001666944490748458, 'sNukGRBMBXRcFEKeYULAfdpruKKTjCsnMefNrscnOhMcstyXokIReevzlVqFLXmWKCeoBZopEfQCnEFtaAuMLFvVVSu': 0.0001666944490748458, 'OXcskmFGjnCBJpEavpJwD': 0.0001666944490748458, 'tWHgYwlyZPlPFaJViLrOzJmJobLkKSywIaUHyXgQtpRXyTOQfCxPUWQdyarJjFhCEOKIBvsaojooor': 0.0001666944490748458, 'GoysecvYPrBUmYoQqtfRvJEga': 0.0001666944490748458, 'mXvNWdT': 0.0001666944490748458, 'RnVQmLyoQQTHnmSGhZwXkqyZOxkIsthXKiBkNLAwZUjFIazDedKMwz': 0.0001666944490748458, 'OUgQdShURptRlYjNiexTwHKnAuJtANTKfcQaXqPNqC': 0.0001666944490748458, 'npQQoqhmcUhtHgpvWejrkZgnukaopXstBEvhYuGwQPXnFpTEEaQzcYOGvDrlfCugkZpSecXmGrAIhLioWmUXUIjSgOhaFWrRhtMEHcqMFxqImN': 0.0001666944490748458, 'aZLOkzZCscHulMCPEUdnSOwjQeRHgobjHNwIMRlqFzWsGXOkDVplTvUiWtdRAzIXzOtCjIjxJzAmVglQMYSjpiSzRfjFwFpoftPisThTFsr': 0.0001666944490748458, 'KyTilmLEPwuwaGjyuJwBRT': 0.0001666944490748458, 'EcrQUVevThNUMXJRtEFnfVWLeuLMdkmAEfQROatLdIbozJIqEKMbkxQsiOkPHAKpbxQqZ': 0.0001666944490748458, 'lBTGYzMdpVcJeHCtbYkVcrRBnMnnPLPTsXftkQk': 0.0001666944490748458, 'pnDSpMRfwVPESQMoKvFDKITSYSSesyoReeDpxjCBAigpJlEcyhMYVVpWqjOzsKloK': 0.0001666944490748458, 'ixDXeMvEyOVrlBIZPUJLnvMVTaYqpJCXFcIlxYoAynOlvQdOpfKXXruDQKZLtbaVKtdDkBitaWNgDCKGejMWRjvxJzoEYESwwac': 0.0001666944490748458, 'cmbsABmNeXdcyfeoMvMKbAzfIFuHKUbhslviUxDzaKftCsNfn': 0.0001666944490748458, 'TvNVAGtDvnbDuzxFTyIEYaDGjAkCZjEkigFoolGqcLvBQKJaPlMGvtiwMNTQIyXtRbkBwCmoGiLgQAdvUf': 0.0001666944490748458, 'gjumpnktenwkmIwgkfQuybIENNDcAyGbcsUSShUlHJjCqOWPm': 0.0001666944490748458, 'QltvQkcFmWckHeZniVpduMmKEFvHwUjcHLaRTLyHwhOZVYWpcHBrZFCkCeChkHkaJuEuXYoTEIWVMyX': 0.0001666944490748458, 'AVNvnYmsOslkYgAszflrEieBIVIgDDRdwCumaoWuXnvNvMEwxLfbwNRSAMOscFgzqqNdAdNubHdtCPRjcerup': 0.0001666944490748458, 'tkeFPgNePCZVgEyaGDJoSrliOhipRjMqiPgnCkgdAMFQiXjZRwAmMadUtLlutSBFKoDeIbeoGSJOwBmKfKWKvRNztHhofNptWJRyolO': 0.0001666944490748458, 'HnnuntkOXfDgRmyPkQpHmggxJYwSpdufmzFFuCXyMCBcGFEbQKuFtPf': 0.0001666944490748458, 'yEeqgUhQQcJLlKDtdeToj': 0.0001666944490748458, 'kmLqOOKKDDdXeVlVRlxsNgXlAcyTAPVILSiJzFDroqYqwuxQUTYdHaTOmOmdEiKAfwJIYugZnJheL': 0.0001666944490748458, 'CRdgsxaohmjeYjVaLlrlbzqbvrFhRfrhdsWkCbJvfVLBwNkEJABmsokczRyUcAOvUjHxwnp': 0.0001666944490748458, 'RUsWsfiBLOYjDX': 0.0001666944490748458, 'yvsdENmcMhLSXdNwPnrOGJocnDrSWhxLKSsOJqjrDGaCUWNwgaUFyHtyoyBZTbsPOwxcNllPhCnqhGvGpjIzpMWIKmUuMRlvLUpUjqdzGqexHPGYLlqSY': 0.0001666944490748458, 'iUzgRUCuYUisyfogtNeeJorUdGubBdGSulelwmDomlsVMxmcSfmnZUpmkpQzvigJsgqyFTOMkL': 0.0001666944490748458, 'djZDZITklEfdXegJcidpepoFbEbJFHYpEZKZZLE': 0.0001666944490748458, 'DjASbBwjwsnZRzhwwuOZcktJEcknTVYTBSouccyFnyGloDGNNYcvwaMBpXiOphQtrfQdXIWdPVgZcITLM': 0.0001666944490748458, 'dKgaTgJsdTJAPXYwOgEliaSxAsJQlhzJODVTNkOwepYnNTGeGMEKbrxgAcUdTuQRIsWMZPfCkfUFfkofubiHnttyJvBrgmkpHgnRmByWgu': 0.0001666944490748458, 'PhhsnlnyrquFqNbUDAiatOrtroZwcmEcnkOUNQhXwxRaNmZStlYOyWIPphmFqxPJKBrebEImWTvwAgATISIhNsZigWscWsVNvBLCjeQFjg': 0.0001666944490748458, 'VzctSRVypdJqxAkkpbmpRESPBsWjrHilPmomYxyMJzeOr': 0.0001666944490748458, 'hLlMSXxLUiycJZXbGrXfazLmprTKWhqFwBbozTFteXflXYYrgJImdSh': 0.0001666944490748458, 'IHKfouggwivkUsrJfMJFnhLugNZroXafdOPDAkVnUCLOOqDYANWpRxpsEbIOOxkmwvWcrRhkVtTYUpzFiYUnYubEDTBmjFoxZNyzKetOpvhYFYOBHoTfWIb': 0.0001666944490748458, 'uoQHTmfyYQWuScRlIIKXxHTCgrUHSSJHurOMQwpRobBIwzZaxXsgWEPdlFxDCpPhRFhciwoiFdtkTtgeFGgCLvTxymaAGRqOAMLjBaNuOj': 0.0001666944490748458, 'IXASVbxFhfTWFSqosYpaUO': 0.0001666944490748458, 'iJBUmHNMUJQWNYuKJpbfMmExguBgqislcYNUzLLPCPppMxJkuxVeciwYWHizxGFyrfBnuJoyNogGpkZlpFCFGaJxfzgoo': 0.0001666944490748458, 'GffYJqznCnaLyBkKtnrZyKmXiW': 0.0001666944490748458, 'BQO': 0.0001666944490748458, 'LuMtGZEyABJYSjTFtaQHICuSudFKUAi': 0.0001666944490748458, 'AdODwypjZToNPzMQNEQeKYVjnTUVKBQqtm': 0.0001666944490748458, 'SBODMdniSuQDBxaggNptGCKvMTYmIETFklTJRawQbgGVvkWztJTtsZIRvkpRAAUoZFwfNAeTCqvCgBdzlWZlMpWhzZzQZpSbJLNHXOoFNSa': 0.0001666944490748458, 'dnUuWdELIJBBgNXkeVXGngTcaiMcoQjVVaOzaTJUo': 0.0001666944490748458, 'srA': 0.0001666944490748458, 'hiAxTxUpbt': 0.0001666944490748458, 'zcaPlDdUYyELlkXofRUkZANoTnfuKTINMMXPUwkksQjMIGKDBAgWYmOLqyxAcXVSsfIltvoQcHbrgNwpIDaeOzcmSkMlDskRLePWaGVPFabWfSO': 0.0001666944490748458, 'cvMqXUlIUUbpTnhDZmhqoFMahXGKaozupZIWDdPbMfBtMXxjkGxAMLFnirOVhlMBiQzDNDsPF': 0.0001666944490748458, 'vEYvynACRCQKZyLHhNeZaRyHRJISqm': 0.0001666944490748458, 'bmTmivfwevqQYEFYjfKsP': 0.0001666944490748458, 'tcNhOUonOYxVhLQGRrrPvebSrnSggMAmwejjp': 0.0001666944490748458, 'NckKjFZlEIPWvipOHykxKIyrTrhlrozGFVxasEvEURzkzfLZqEDyURSmMvHLgbjmAFAnbxKUQEGvcpOYvlCEcZnNQgWnuBzGMfiiGtOeGaGKVADoQS': 0.0001666944490748458, 'svjSCMalQfswonlnpEkizBHlUEDhQMJVaZyYYbMjJfRELIGrjTFFxOxkZxbwcepwtYHxUvDhZndeqZEeVszfLnrzFANViTsITeGaalNGykXabbFtMmfLeKU': 0.0001666944490748458, 'qVFwncfsCJvpDZVZkGogiIAwgLtBMDeGyueeuDFmsQqQSrMsUqNVuooLPPXLGTjPJSEWPHXoPSKwOuesjmrNvVoiHwxcmqGWPhKObIqreoRLMokjJpTqqn': 0.0001666944490748458, 'EcuiMRZsdLItycfwIlHvVvwdLpGaZihLheLGQRiHhYFOeurJZjRgnGhPXqDZgLkGIr': 0.0001666944490748458, 'TazmeZsurqfDCYwvOHFmQLOyvImMZwvgDb': 0.0001666944490748458, 'GgWEZgwlpGaMuKVWPCAJgfCxJEnritcFXrJeXmJcLYjNGAYYXVEbiXNatLzbaFZqjzilaftCGXKUdmHzrRXsfBcxRbihXyaWdhzJLHVtFdtHHQHOD': 0.0001666944490748458, 'NIQhmdttkDKAHoIdBgpBtBXEkMFRGaqBxHToURCTaVOCwupRLNxArLtdpGolfGkwZwRnuFTWsqqTLTZRdWrCcnoEGbOaLsIhIJZWAUfRflvpX': 0.0001666944490748458, 'TMwRUyAslUiMPqvMJt': 0.0001666944490748458, 'jQcjzlbUIJVsYtvwcQKaZOSDCkkAbeHEkvAhyHCabgX': 0.0001666944490748458, 'YSUoWQGueWoghRmKSPdccXiWoTu': 0.0001666944490748458, 'BISDmylHjQnQxnUDwppzjycCYpaPXfZKala': 0.0001666944490748458, 'DhpCqBzAONkWXGroSUbLVqOpIVkxlzOZHEWxyTakHpZBqDZIcxKuwTvgEoyKePgkOUASkewxHyt': 0.0001666944490748458, 'YUNihUevTYMIXenQGDTtvhDXyWrmsQDCsWrAKmBapgKUuCCFguwfMfACXWzdbhbshLhrsNHpqegeEzgirjfJhhKAgolYsQEDPYeEqXZuqFOxNLpJmbaYo': 0.0001666944490748458, 'eatppjyHNBHlhUSFKmBhTIRkhjrwnShRAdWlgqpAThHqbwPzXFQJZgDhIgucogb': 0.0001666944490748458, 'ZumPpUdAGhKasDMSUTKutbDbHXeXBsERHqKleUASLgNNKVlkKcZQABYVAJKKNoKtlwfoKrsZaPzTIJofsxSfSxYsYBmKLR': 0.0001666944490748458, 'JHCODCpbmAgBREEnxeKkcCSswrtYebIKJWCItmdcjVGtM': 0.0001666944490748458, 'wKUGyAyqsjBEHmxQYchBdDyVliEWaptDyXm': 0.0001666944490748458, 'DMdWmPeoTMLFHBwQNVGQjmLpfvNqioOegXGgtSjvuuLaURK': 0.0001666944490748458, 'pFeYMtWHUcIENCrTYFgZDHYiiJVNLHqRJzgTuOAOTtNgaGVGlcoFNIQXmDVlCrjfzDjFJcQRCBZvjYYpRjfXwesiuA': 0.0001666944490748458, 'XcVzdvAtbQyghgUGoHGGn': 0.0001666944490748458, 'oYMBDUdAxThQfXDQTGDGRuyamaRnYiAsprZiummPCJlzIdadbPjfxhSHsKiqtzDHPQXnFkantVLnQQksxEwis': 0.0001666944490748458, 'IcptqmrQiKsqnkDIzRQAuRUuAZYsxOJIpiULzBSOXfmXSWUfhaMDgZdjyJZtwzBdRFYFoMsefBODwy': 0.0001666944490748458, 'Te': 0.0001666944490748458, 'NTkmkMqMUZKBoFmZlkwYBPpxgCYHkQhTiONnInMKslFyLtXPSgttkURshWfULnfhQqyeJIuTRrAmiEmOqoDfWf': 0.0001666944490748458, 'ZrvTMCnbpcKdgHOjmMPFpopcUqbWVcMlMmnhnsluhwxBDAgyHDPZowBGRcNVIeHAyJDPjNHAOtagSd': 0.0001666944490748458, 'uvFnlkRvKyKZcTrlHtnBAqkzHrTNCATtjnPuX': 0.0001666944490748458, 'EllMKEfsFSYDxRFJNWQNAwmkAoCqRlAYpAJATLKDUwLmJ': 0.0001666944490748458, 'vfEuqPqhBmzTSlyyMTmzwsGwYLrEeDvUzsaYmPnIoyByraYUDgYtoknmlvldKGkctZAJJfUbblNzEPZqnIbTbQyygvbeoaXYlnQFUIpDBQOMWHcmcKuIsUE': 0.0001666944490748458, 'OxKyBKyBruXlLgllWjMxniuOKBGBTtaJNjPeeszCCsGmEUremEyPBNSxUItZzYvIGpVSskeXTzfFQGhxrhqsuYivWLMjSgOohEAfdwM': 0.0001666944490748458, 'yMllEGrRiIwmvLWGwGCimIxyMGMgxOSotSxJNjdrNYwxZONjaRziy': 0.0001666944490748458, 'fUzeHaRNVLuECPWvpgTWwWoIjmLazsccqoKyAZqSxUeKMFVCCWmEnkwTUHsGNfMkpacucBSzzZsnwC': 0.0001666944490748458, 'CBtMssiyNjwpxQwVSWOCPSTIhWNpoWUoMXDGaZLEiujpwDktmWfxHxWCpAMpFMYSqEUVulDrRBHlHOOdNbS': 0.0001666944490748458, 'oZqbMVcRgijdBDYsHPLQxWwHNiIbRgGdkZvbQanEqoprIpKabMtAzVWvpWjaePYlWDWCTpzaEbexgYCOErMVDzzqCftwNbG': 0.0001666944490748458, 'qGQTFkvdkhqGwxHwJxkFFwJrgvPHNTnQxjeYmdIcUe': 0.0001666944490748458, 'oxdEMUoUQwOqQaajfawzekpYbYLGgzHOeOtZpFujtPXyqnihmVxaOehTxewJvoNlVkshFvXysMQKdCHUacFLeTNHiFYjnrdeAzgNWmHWEyrLtPGTsSD': 0.0001666944490748458, 'OviWwzUBTggtqTqAuFRvucbcTWgFUmWXTLoPuWvKQDYoDfbyuq': 0.0001666944490748458, 'JLjddCCGQEILQbauUuAsuLcwMKSJresFCVaSBkKOByMAXXIzCduEJBaJIBDtWefNDoOWQMPWhVBnokVqUuDbh': 0.0001666944490748458, 'CvzMcVHJxiYCoDJWtKomlzfpya': 0.0001666944490748458, 'uRkSwEGrJzArbjXenlSudJFjlpeKtfnxtPJ': 0.0001666944490748458, 'qRhUvCDIgUVSpbZwpyJuGiwWeIcRhopbOgzbCyrNRxreasdwPL': 0.0001666944490748458, 'pMzUUKuUMxGYkAAkOSzcjvPIXwQkJFksUuetYyMdIomgeoROtFM': 0.0001666944490748458, 'uthuIcGeZJmzWpcazKOMRPfKhxvIueoDghHuYJQuyYYwszbUnqZQv': 0.0001666944490748458, 'eKsrNwgnCxjPlZpqeFNlMJLfHhCPEdgVE': 0.0001666944490748458, 'BzkafMitdOusCFJvqAhkbAtVSovysrNLAoBRlBYMwblYbYDjDmkuHvEmPlfpsSgSzG': 0.0001666944490748458, 'xJddGzKvCJlmClmBvmInBFOEYGMKTvqjPYRcnxsaHnoZXBlmJPocRjHpTbcMV': 0.0001666944490748458, 'KEwxufDILAoXPdPoZScCfKyHAXVKKoGBKpKuDbuQEQwttGXMLieKLxshOaRELYOoNvcHOFOyGSvwOjJufoUiReoysAwTKjLDUnFMcWJXyiWeJ': 0.0001666944490748458, 'DAffQPjqzHrwtCTMLeMDylcFcUdDaKYTpJsOElgTqrSdTHRCinKAblgKikKxjHlNRjatQtSKUNBOPTsRuDhIgKYxiLTsv': 0.0001666944490748458, 'dQyRfYDDkcUXERveGClDHruVTYTbfMJupYsZOnMpBQHWCsXDxeuvgCtlsFHwpvAmDqTKedXGWBtaNOUlsvPwXwi': 0.0001666944490748458, 'nMVanYXtqSOVBfGNXNBKJFpJqjZAIfvCDbGmBlpGVrznUyVtbogMQtMbqAtaAkIQdqaUeNFFngWIzKFNYxpkHAzNNZlEXkzHtPP': 0.0001666944490748458, 'mWPLDFOOmFHIAPawMpdDFTgwBlRjHKGlDpoqzWqGEuc': 0.0001666944490748458, 'hqlHHjUQfOtrglKVVJcIXwZiMbOFUhaGMGByknbmAbkSQPbdIpHNTADFxiFFOBQuFyoDmqSbOMSupOmoAARgWXcMmlRvwWsFpZ': 0.0001666944490748458, 'ILhIWEyPuVnpTyjlrPxUsbGQmis': 0.0001666944490748458, 'EvAfeYzrNSZYorPcKavBXua': 0.0001666944490748458, 'SfcIcISbcDzbLgauhPwriHL': 0.0001666944490748458, 'JdZnv': 0.0001666944490748458, 'RZeDozW': 0.0001666944490748458, 'ljInrMuJlZfLGlWrIBqVGoCpzEEZnGVqXtXlWtzYYVtgZSZccUHYHuslxZAhuqMmiwnXVWmFMeMhUEBCOBfaIs': 0.0001666944490748458, 'MTKLyaAhpunSWhlSEZyrIOztbVqtqdWshR': 0.0001666944490748458, 'gIuNIZpMCkGatEnQgXHfKUImvfQFQtXeqTZQHwxyAEMhDZRpleedoPDkfPzoxSAiZjoRYTfnpyGNTSRUn': 0.0001666944490748458, 'FSZMbHncRCVRilrFRuxnrTyzZGzJXLZEltkAvCWmwgmttKbGdQNpFyWFjDZpojqBPkHPain': 0.0001666944490748458, 'Ay': 0.0001666944490748458, 'NoPcrfwIfrgNXYnrceiZjWYvQcKJQnuuAFTrKyoqKetKNpVsOyTfFdKFdPloNzAhbIIltvRXRggkKvmaJt': 0.0001666944490748458, 'TppOgBjtifwNgAEFLkGoCAIrFfEzN': 0.0001666944490748458, 'fdEzGtHOgStxbSQBpGfqKnqzcokkllbTElEKSghUtZVqvOFapeydKnJkFNixmdMzS': 0.0001666944490748458, 'SOWVDC': 0.0001666944490748458, 'upFVEEtbvMLNOhfWfVxVBjy': 0.0001666944490748458, 'KoYQBBzKMeQfsckNyJpnPcBWhcYhQfOOMqzNDvNXVvpQUkEFtYKkctKdwSSdbynjcwodKWTJwgTnGoAoEfMBtau': 0.0001666944490748458, 'smofNSKmRQHRCqftlOWbyYxPeIagYyrFzWTVOYLTD': 0.0001666944490748458, 'NIzocmcoHFNwZTkyYLfIHoQAchaNpRivlHKTDMqoMEbLzJLOzOadXAvawvxnsqv': 0.0001666944490748458, 'QOOZDilvXofQcstINGLHTyBjLKVidKjEEpTIcRDftWL': 0.0001666944490748458, 'smRGCGTHKAHQBbBRFSWdXNHsfRtpZstmZRcZgdyFDWODeLYPekqRZPLgxZLCwlBeRvrqeLFRUNYAArZofAJOLWpYKLkFQmrGNy': 0.0001666944490748458, 'ntQWgPoEdPIPpTUGPBzBYiPeUcXvIRpojbKPWgWEiAQVxIJgQBZXjuQjtGfxLGnkEZUVxEmmxOWOlMSLBrbblBIQvdVrWKTMpCljqAQTCXSbuJNTr': 0.0001666944490748458, 'XCSVErcaVVyuomYHdfxNEBGuvaMCaRIfEbhbIThKktpkCgldoBpLNyzxxNVgUKXowFHRwlsYhbSDVWlvyXWjvCydmNAhnD': 0.0001666944490748458, 'KUttbrrVUpNudayHOlKszIMFbhddzntSShdGZiJHmVKcFPDKbrHsySVKjPYsEjfRkUoskpahCsuYlYgJlqXbpzPoscaMMM': 0.0001666944490748458, 'OtsbkbShloftjhXXclFxPSBqFqeQnIUxZQfUCGGnspHvEFzvNYFCUzsuTUxQSWbUVcXTHPQmdvWjCODMtoLUxVNVehKHJLWOV': 0.0001666944490748458, 'kFPqzumBAtnXenTETOGqXkyegxuhxlMcwHXUZaaTCLjwdUpoByUvjuqrMbZWPqINLGWKhXFndQQQDuxGDPQgszCmZyiLDMnCQJ': 0.0001666944490748458, 'mVswLDovsZKcdxQJh': 0.0001666944490748458, 'KaTXkgNYiZvlVENCzYBxkPAyAJRfPKVeoktTQExZSKwaKKvaJzMuxQuqzRqlfqDNRIBlBGKTmUpsYPmtdblSGZMULlKTrEDOQaMcdoCHsVrCfRRVUpVln': 0.0001666944490748458, 'TeDTybvrqJcYvVuKHyVvzlUYQBsoqLQGvXg': 0.0001666944490748458, 'eMMQVPltatlhbI': 0.0001666944490748458, 'JsOdqhddsLJrACbxahYPDYDSNueZYR': 0.0001666944490748458, 'tLOHMRaOoesnynfFDbocTuxmyrabnahFiMZcIubMakUkRDKXbNESRpRvbjUielhGh': 0.0001666944490748458, 'lLmGCgBVIharTGJtBUoVexKpviMxiWeAccjcCLIOzWfHqaTSGhYRI': 0.0001666944490748458, 'MJuekSXYIGxGgbKLxmTYLqiXsvIXoGzjinEbAyhuZfmFxflfdYbYiytFmsZrVCtPiIVgrXOKXhhXLsxszUQkEYGKdiOxNxIZvRdWbKxKTaC': 0.0001666944490748458, 'pEYixZkKAJmwFfBkUUWWwQJwgNEFxTw': 0.0001666944490748458, 'jYTzYf': 0.0001666944490748458, 'JJHUbqdfrsTJnAKufCDbexbyLVGysPermoLHuRjrCeFGIFz': 0.0001666944490748458, 'KmVQBkTmdNYvgEMOzGCtMycZREwmGnpWhTersWLbnUcUixrPVdJuQGunZhxGVePSFSEtNAtGpHZUpRKlX': 0.0001666944490748458, 'ewHkWNDAglHqCa': 0.0001666944490748458, 'etYCUgHTewipgenZqouWaFPHbJJNxAakoeCvzqMIdtcBsjepzmgtCwdCRrTOHYCYNXELaJDMMCMyqDrtMxzzOIPaMkgMbAQrwwFJll': 0.0001666944490748458, 'pHxHncnvVaAlsJPrpVyKmWENNriBkYNfeERdByLqDIcbzI': 0.0001666944490748458, 'MjQJbIBcDEiQjAiwbQeGEOjrGMRxJlaJNCTfdgvoKCSOIlYVDmmgArbfVGQLKAGMbCbBSZPYUWmZopkiuVBOhsTbXJmnzoVMeJY': 0.0001666944490748458, 'iXVDbUouCvfXtzWWtWPbHeDQbhcLGKkasweIGpK': 0.0001666944490748458, 'zXgAHMqOOWXHHjJMuSjIVDFtQHKgIVXqnyChtzqoKoWwqGSqWBQPMrTFRGRKzsGXrDIOSqfUKLpOCLnuhChLzYmwMUQcPdZpdVXBA': 0.0001666944490748458, 'BitxPtxtrPycavqQXcajLtsWlDEzLKfAEZdiONoNHgVADkmhCLIrOCkXAztoqrNNDF': 0.0001666944490748458, 'AhyRnoMFlYARgThOQvyHHfSjpbdTftZkoOAnLVKSyPDkeBwnulmlaBqopfHzLzZUGAsNHDrMirQyaP': 0.0001666944490748458, 'DOLmXIjLzwAlFVxOMsiyolhZqbHtjaoDDyhilMzDYejsukieQpjDNiKGTwWzeTcqtLVsxavoLXeHPVMAhxk': 0.0001666944490748458, 'xyFfMdkwVcxLelSHwdbQmYAeTpTmPVnJMyuMxPHkedeJEDRkTOkPeinmwQBYXhG': 0.0001666944490748458, 'ozeLzTJFikRkE': 0.0001666944490748458, 'sjA': 0.0001666944490748458, 'lDusvnwYoscMbSejOPEnRYiMIWLXJtHMMvQzlprqvZoblqdpxTVKZaZDvqXjHqPGotxeSeJvOAIJyNSjxlxlCjhSKIPnzWcTJgkfpSspxhAqBB': 0.0001666944490748458, 'gRlHIVCImEDQWyxsCZbiOeBEBPViUSLIPuYToGIdWgdZjggEWkMFu': 0.0001666944490748458, 'LIdxCeBuxIOmUqmvAYBXZvg': 0.0001666944490748458, 'BIVPuVxUmeSKwOiUoJmNyCLGFijddylqmPLzdiqLxhiPWwmrEfmMnsZZosOHZJJMeVmfoXkhsUnlbfjlCudsYatQLzFypwQcx': 0.0001666944490748458, 'baspLEdugGwOfqyXXPITcSqMCWxNRCcUEZUzuSfVSOuHuFldoKBHWtqQEsnFlpJkzHssukhxeGM': 0.0001666944490748458, 'KHacOPkKpMXfEFOsRvdMePEPudLjtgl': 0.0001666944490748458, 'qjV': 0.0001666944490748458, 'UTVftNALpbEVHXqhTByhQdTTvpwvMIMaSpSXpucvguajllcsRNCuEznymdgNQTxeLcLQjANMuwiBEzBuel': 0.0001666944490748458, 'ULriEkHaGpELPTBscOMpHLWGWMwMEuAzvblezyHamWmYwhpdXBbhSfIvWVJSCSBNgLacfzQANVoBEluLrMK': 0.0001666944490748458, 'Uz': 0.0001666944490748458, 'ezddRdPDxhICkjFFuJTxHhnVAHrCcRmUWWdwLFTOQmbdJvNUNfpWlGZBitsMDKRtvRVaaqcTQbHTwdwZhkHoyFGmpOYMetzKt': 0.0001666944490748458, 'GbYtvunTYanAkudYzeOsybWOzOCGo': 0.0001666944490748458, 'fErunNkOaocOelbRZQPRPpOPDTymSSvULEgqPmDzXCbXNfjwRQbUeqPeLFyUCudIxrCxwufJGKCSzcbICjNvqMTfZZL': 0.0001666944490748458, 'kHS': 0.0001666944490748458, 'qoeGFRcjERXoGKjhpAwKhOqXbNcJiAcOlMYnQjQqbZjRCUunpKJRGKDPVfEAJpJ': 0.0001666944490748458, 'rZexZWd': 0.0001666944490748458, 'VkQQUojNLbWVXELkEtEpUHpcukvoturgVktJEXFgBuVWIFtsJplgUFLtWHdblLqDzTiRMleTnXHNYnatejlBeZuyOkCOkjCvORZsKpVivzLBGM': 0.0001666944490748458, 'qdkoTIUkmQeBxFJDTHxlxDNAdFJUYuTlmpmUUrLbISmTawkBKyaQJKexhDiolon': 0.0001666944490748458, 'UcyBLOzSMlVaFccVuushlLjUQzJXLShLDqZmOsrgiBoHAEhqNvnAEH': 0.0001666944490748458, 'zsbQUAfMjENPPWotyWVUSCYVQZdloFoZvBMHcvHufciaBmfUcfgJQIIvtlqnnwCOfBqqaiYTENTKSUeYdH': 0.0001666944490748458, 'YRkppQifQswuQUkWseUnDehvmthWAmTETvnCwCCrQvSlbwGMfaniFTAcDejiyatXZHqYUqeaXtiRZovyBSEzZEdZn': 0.0001666944490748458, 'SSackUQhQ': 0.0001666944490748458, 'QIYhJZFTKSpZUjlJnRWaVzPzVbUIueyVEOyuRvrzgmboTVFvsTWhoNwhRkCEZDeuDdKZHKswCR': 0.0001666944490748458, 'qxCcKwiEwrefLLHxEFJVuanwwtNAYgRfXvUKPi': 0.0001666944490748458, 'KsnEDzeCYupfOJKpMAXbkm': 0.0001666944490748458, 'lAYkFZOJGklPQYmBwYaHPiVFApefGzYpEhUVqxrWr': 0.0001666944490748458, 'gzCoqgBhYKiwztLgEuzzlqJAuBRApPLhRIyGmneoOraRwxLNoalGnsgShzIwnKdnOXr': 0.0001666944490748458, 'wqXyRlWLweaTQbmCIVhufAsaHTteSeWBjKWkbkUxBqELHFWKECxasIjeceVcmoFPGovPwfeznOvAClpvDmG': 0.0001666944490748458, 'q': 0.0006667777962993832, 'rVQwCzfLLvwGDjDVqQHtmNnjuC': 0.0001666944490748458, 'ccQbNqlwwglHwuLgXTRmstyyqXAQSupZbM': 0.0001666944490748458, 'treibHLoIZOzghYfK': 0.0001666944490748458, 'qbgBLNfqbRKzFMEWismndUHrKGcbBoqbaXCjKrkZXSTOgPejLskXzVGepbrdOUWovFvoYhCGzloXaQlDZHRhxETkRq': 0.0001666944490748458, 'irJWeiVExqIEIp': 0.0001666944490748458, 'TWkBewdpiJZOEqVeCRFLAPFWcZhLogwoZPAlOiIayVXlCFxeIhDJVboxxNTJiIMJDWSsDWi': 0.0001666944490748458, 'VdZronjHbSFtnfEikgALkGTVAGRaRWfmgNFzUpQvMMyGTchOeCEyuwfHaqEYXPkTtXLaFzPLSggwCtloB': 0.0001666944490748458, 'CzlgBitXdPM': 0.0001666944490748458, 'fHFXPVwNbHeBFEQnSHU': 0.0001666944490748458, 'UwRhvAKTwhtsHJLCyLKdbGRdTDfeDfWMzowLFaBEgqfGnEeMjKV': 0.0001666944490748458, 'cYhNhaMBynemNaKUmlCIQXAPIehAOhyDhmYJhFYjzUJRjveBQxUsOxTDiYOofXjPwefjaAEqBxmRcMkqlzMUhwQjpGmBvpwIIIbsKptcGowsNLxWJfeIdG': 0.0001666944490748458, 'IEtcsSbfbbHhdtWPpwLlLOjrajmZmppIzckhhHNEjaQMqtpPiNE': 0.0001666944490748458, 'fuTzgryshePqGolPTRyWLeNNE': 0.0001666944490748458, 'lvmYEWZsUMmrLXkwusSdbWWMXKZXVLaZiWwaAYkYOTMgdTKCztDHjCSwymTyABtnazwCsSfWHfvqZneFRRWhHnYLWA': 0.0001666944490748458, 'xvjpRwCjtVlfDEdmzjJlZJJtLhn': 0.0001666944490748458, 'FeBtcrrPNjVCXYojgfD': 0.0001666944490748458, 'jsKDbDakwQaWKCIdTFBbAMlJxgvPQjIipppXNnAfThonboeLTrmPZXsQTiRStmLMdBDMVhSvBjNRTcOTPFJAzTchUug': 0.0001666944490748458, 'ZhACGeV': 0.0001666944490748458, 'LvxqAcCSYeDiEOaWwekEqubjxKBHUKKTZZZhATWbIrkOKpFqFkffnWLNeHkAOochcprqFtCJPrLYyFnSOCEVeZzBqXDWLKsSHlwEa': 0.0001666944490748458, 'As': 0.0001666944490748458, 'plYYynvuBBEYAcHRvZxvnCNgydbkEnvFfoYJneFZPnDThwpsomAfpqUPLBwGfDrHr': 0.0001666944490748458, 'DbwOYXrYyNBFCzCwLkqxTcLDahESndnJGZxNAXUmcrvohSAzgPMBgghIaSwYrgrrblRcJnQVCWdJPdRBeYDGSPeCbxrsie': 0.0001666944490748458, 'BXTQEwXeomcgBtgbunfWupsnQOafqgNOykljUJaDThBcoFzoUmqsCJQnzdZYLzpitaYprCCXVtusSYbdZsWVpjJeaeGYFCJsq': 0.0001666944490748458, 'NVhRugmJBLPRw': 0.0001666944490748458, 'kgFXTPTGBDFgpaqgChGsSNuOCxiyqow': 0.0001666944490748458, 'o': 0.0003333888981496916, 'eXNhaTxSLZadwiGKjIXKJM': 0.0001666944490748458, 'tkAtIrcWtvhUwMTzvcbFfbeXsaTSPDoJJENiyEPXWrXJQKtMbBWUIlGHZZlGGAnuwtWpPBaLWFOMumzDRgDfTMhcDLhvg': 0.0001666944490748458, 'jAHWEvwiQpQPxQDEpJOrbHUfswxoUmAXWHZtUvrnNuKWVLKhnnbGFoIEDGVtFBTgLJkaSjMgRKlHCAtPmbvKrBkMDquJdVejnUYCgHyNqTx': 0.0001666944490748458, 'hHlcRJNqwTrTAAZELJJwDSROYlkHDcDbTSbnSPZUpTjmNMVQbDInWwUzSvKRlzDluVxVejYlFkYPnNFGmwyxZurWZjmGgGiEVKwzGeoMfhctcP': 0.0001666944490748458, 'KFaWBpBEKt': 0.0001666944490748458, 'FvPjjUKhogmoPhCmkhkyoZOcC': 0.0001666944490748458, 'JizhxGuVdInJoLUUmNRLAmJyjHhiEBwTBwOUcOmHgRfwZrebwgmmIQXnAfpTfJekZFnzuBovfuieqToxfVeAxqolrdLWufwAJplVuMOnpYLjpWkgEY': 0.0001666944490748458, 'bckNpklTJlsuIdvtOMQThQhSn': 0.0001666944490748458, 'gxUmhrQWPgRKucpgkdfoBWEbqYurnEHNAGCDjvOTPSEAycoOdGlcrZTXOxFcFcoDsrxpTRViMEUUjRackvkHIaaYbyx': 0.0001666944490748458, 'AhMTzljjAT': 0.0001666944490748458, 'rAcaitUoBpOitYWVmytHQZHknnWzObZPLqcRCgqiPjuyhWijcMKJSUMNoZIecwWNChKTkSTCzuOgyEqtVJUmrkuZkkMOiCaaNatUwbuWhFuKFAMqTuftX': 0.0001666944490748458, 't': 0.0003333888981496916, 'iiHTRdDywgpRkMJSYINaUpxWiHFWtLe': 0.0001666944490748458, 'fbYIjWxHciJXkBdQTfJLRHdcZc': 0.0001666944490748458, 'nLQfBDwaRGTrrwROZUcIfToltTkCwhnwdsHxRldCPeMjVQkJREdwgCbhKFQGeVNjqgPehTNTbtyHedDuq': 0.0001666944490748458, 'kPfUKEKJTMvTDEydXGEPlZOWOQjpXODUUzPHFJXtBeADghhRwLCeVAGBMcAXpdSJZBLYmQSQOmMOtCXJRoNnvJlAsekwcutPJW': 0.0001666944490748458, 'RxEZMxMRKrQkaHhakvyfjUoBESpixNypXcJBEsPHdCYkrfNjgwZXICkqvIFSQygInhGAAPlIEYl': 0.0001666944490748458, 'BM': 0.0001666944490748458, 'crNKKILvYbccTFUXRisAOBSwlOfIxTIOVDPkfjazmHYUCnPMOuoRFwClFLJZWFcAktukhUicpcYYGZGNjVkBgCTCgHKlnqNRaERaGZFES': 0.0001666944490748458, 'SAMrmfAZRJjMBuqChiEforKvwSlNSUeRWoxamEpZCEptfCbADuJjrYgVuOhnuHaUKmBtwkIYpiRaXGPgDdWLJjikHkoIDTTrXI': 0.0001666944490748458, 'tmkOTAgvah': 0.0001666944490748458, 'yczwKWJFFeKVnwEcLsqlrDJezmDhkkpiJcAjUoHWoicEXEFRDqQPECoeetcZuToTFSXaLWEZZAOVewPvFqyjrDzjhetJvkp': 0.0001666944490748458, 'iNUzMBZ': 0.0001666944490748458, 'jziWPEezRKrPigpITgXZQPSKieunMKZsfrMsAVKeEjBsJhfluKhJZPZXbwXcJuOxMpSmXYKZwaAbMYMUYfAOLibIKbjGrsiSutAEZUy': 0.0001666944490748458, 'TuWcVbubaJJaWfqlSwgHGoKpNilfUIacDYnOMDxKGCTKGEpyZCqrYstJXM': 0.0001666944490748458, 'irQeuARihetSENnKWmIXvkceTtUOnlOfEZsjBpHIOWMHjDIVgArSSIFRyiISEesMBIrSBRJvhqCbpQswvYsuUNylxNNSISDnwUQ': 0.0001666944490748458, 'rAsHaCERHipoTOutPqMdVvU': 0.0001666944490748458, 'ZOOBDNalPkHZqrXanVwcosULPZzYMVeTJuVKCSvOqgDCLChvaVquTNmMRTzsKlXuVugyUSTevlGJhfKiqkhhdwNXfJvpEklgCFCZPXdTBW': 0.0001666944490748458, 'qCKDnJRJrwavhKYfNTHOSTTnymivbzSjzaCcuQNqqeWwUhiVtCxqb': 0.0001666944490748458, 'LgMaCDSmOzHGkpFqxcfmDPKwYyncFVxXxhleSwLNVCLcNUVsbEICdsEtPXDHDNsPBKiNEUzJNebcgPWkUrQkiLVnCLvYaInnzhwkIIPobfuDBM': 0.0001666944490748458, 'GoSyQFAnnxbBHQbGlhghiZPSwBeOIITdfBvaRkoRsbKuPsXTLziAKhpXJeTdjFJuTZhwfqHvIRiVLjOcTWQLgIlOtyAhAYJTPjKgOwKaRGJZCNfsuxhKMM': 0.0001666944490748458, 'GibCoTcRzsdIigUSWzwGGOUJjgZVTHnIqMLwKZciGtpKLAGas': 0.0001666944490748458, 'dawYTO': 0.0001666944490748458, 'xSzvewMsnnqXCOWEnSgLMSFeJv': 0.0001666944490748458, 'ocaPfxvSeMkglKRDHlC': 0.0001666944490748458, 'FkQwUFOWeXUgzTuraHOCXczcSCgnglzygePvnIclxBQHFPdbHcMyaGAWqmlMHqyvsrJtdFNSMYzJyXEJOejDgnOkYI': 0.0001666944490748458, 'QcuUHUEmQ': 0.0001666944490748458, 'UhGDMzNqYvFychElnpiDmXfSJNBqbmBOHBcsJEnsYkUFVzPdgYNiMEQOTpT': 0.0001666944490748458, 'eqACKinwhEfubReEERyDvWXDAlrIg': 0.0001666944490748458, 'ujWUdBMuJDjEUXLSqzbXVxlVyWKEPPpEnqKdqoiloCGVswwesmZgEczDbapWwZvISUjyZeMNnIagHvkvIqwxhZSkIYwDToAHiQnNAPBNOQsGusQfQDwCRw': 0.0001666944490748458, 'YSAnuovsdTutTyYOeiXcCOZgoabBUdDtClfxbGtaCSoIFUXoCufkZUStzrIgCiDngEpjQXqftODpgsj': 0.0001666944490748458, 'rbrsqtdYqjSfbZbcnkzbKbJlrTEPIiuSnvZSgREJVvZSYicTtIjHWyEgXuTsftsNC': 0.0001666944490748458, 'cFnhbM': 0.0001666944490748458, 'tZzMSeaCXYJyRKIUYQVmdUpjpHstzHzfMvswtwUCXMIktBEPJauFdZ': 0.0001666944490748458, 'zClGqnn': 0.0001666944490748458, 'lTFyZfwBXAbjalAtZDSgonMTbTyvTWBlLPKmMopuIPmwYEDRsMvnJuBhaqZOzEQYplw': 0.0001666944490748458, 'kuLOZqQfVOxgprgjEdhXlMwDzAWCzZMDfJJffqCTZG': 0.0001666944490748458, 'PEUyvaHTZWSpVPGIvGlekwapuzXQFJIfxeeLtfJVKZIptlvwBeMajUpvWfCqXqjOdbjrQMlQugiuwXTqWxXuATuwPPAXswUVf': 0.0001666944490748458, 'TsVGEWrxJOlKbWXvnzRallyHQPmrANXBzhDcksdumuVbgUSvLmpSKOHVZApHEUVdhNkLRBIePNy': 0.0001666944490748458, 'DjszJe': 0.0001666944490748458, 'vkdTcCHGvNmWFUXFCkdISGH': 0.0001666944490748458, 'nZOVPPHinaWMWOvviwWnIPMmydWUesXABZmYegqOrjqWNvDLWWjlAvgkhtsmZXjwuotjdhBnaQPffxOcHLORQcshuocqxbNTGIueUuRvOJxZUo': 0.0001666944490748458, 'zEpfuSZPZEJpqLRJqNdCdChwMBHPzmhUWHAmOiZZAKVkoRXfIUTIShYvSspSTWGdTytSBMcLzvIhZTjIWWLOPt': 0.0001666944490748458, 'fHjNLjDsDquaCriUJfwJoubCDQkGEtBvpvBWYrNZuemMEShDWMaXAEYzyzCxwvPGEF': 0.0001666944490748458, 'YLPTmawuLAHXoutlTnJOwFQjlqjaMQsmuKPtuYzCukkPUSlBrxvhlgneAaYovlQmRWvIqMXyIbwSOqOhEuydiokLuJqkJOFUdTSpgmKTKgfxjj': 0.0001666944490748458, 'JwWECtPbsONjYHNiRvyOLjzqamNwooigEkLXXQaWkNpgarZEXvtwwmbBszVlV': 0.0001666944490748458, 'QsCiOumvkltEHvQwdxBSLnSmkIg': 0.0001666944490748458, 'vefaAYQazCgnezNUzVvSkJMXBiBKuzPlZhLpKUxalvHcI': 0.0001666944490748458, 'dSgTYOzUvomWkowawDgDGEuEvXasQrNNVopWkAGGrfIlkYPeKOgBMlEwanzWJ': 0.0001666944490748458, 'byGddHSCTOqPAIjXQZFKQCn': 0.0001666944490748458, 'AKnTPfJGdwNauamKWKPFRwfQqmvjQgRTfNdXGvsKJorgiSAABUUIKHeQgChEDgbYhYvsetCSFIgAdZGcGkA': 0.0001666944490748458, 'XnWVqaGJjULPTJYczVqyAWDJAMKcSFASfEgKrXmfDwWOGzhKOCiKZxMHvCfwaQeUvFlXShPVzYU': 0.0001666944490748458, 'FGmMKlEUPaOyAPa': 0.0001666944490748458, 'KIaFZcdNwnGktDdpAZBSZFITlpwObnuZKaKCQwsBAUPnUhDmFdIJgocilQLxTdtJRFfXdjEZmyrOCwORxVy': 0.0001666944490748458, 'oMTsiBFnCzAuHKDogJLLtDshlWrgBGKBCxI': 0.0001666944490748458, 'kamQFMgsa': 0.0001666944490748458, 'OlyBDQdUzRbQfbsxIomYYwnhpzkJofHCpBrdfXOMoVlHWPs': 0.0001666944490748458, 'uMoOKacbRJnEOXYxXoblumWjrWBztfUnRmlpbPxrykPLVsCBaYeykMTtvFKlPfeSXMSkZec': 0.0001666944490748458, 'CxVuvrUHuARseRFbqcAiskEIbHheQIkIAUGLqcPnjIPJSaWFEs': 0.0001666944490748458, 'dzMLQZaSFCwQvOQLlramyVtOpCsGNLMNHsoSVvVmnYQmQyiuWzBFJinTmNPJEYQJCOq': 0.0001666944490748458, 'nBzrnmCbisKVbCVwdJTwKcItxrxgcIEJPesHfeTtjYGnCVy': 0.0001666944490748458, 'hLOjhsbIYIQibGccPDsYfgaFCmtXIZPOgWgpzQKAUsTvryaOwvTHRbJQz': 0.0001666944490748458, 'NoTbVpccckXNAwevxaZfoJRXUjIOjAaQVaJiiVAQYmeIlhIRZBEJkhxgJehrgnQZuojGbUHZTexEksyLcDcJPyOOAkEmEJMsMeI': 0.0001666944490748458, 'CPnjrEoLhhLrtxFTilzqRWHMaGIxYkKahtQSwyVHUSEyUnvcdiVEAPJOxjvqKqlciFFlTUtgRmUrOQQ': 0.0001666944490748458, 'OCWDTOyHlCVQqppQGatzjXJSMBKysrhmMDSdUkUkhHIREtrnGRQjvHcqVbFKSfwsAmYAbeKeDInbEPKzqVCOFzdYQmZftFIUqsmBAYpBHRAYttjCGkt': 0.0001666944490748458, 'mRPVHZQJDtqjHQgGrTcRsBuWMOUitgRPAH': 0.0001666944490748458, 'YkCNmXhmBHQuzOHQervZg': 0.0001666944490748458, 'HopsbcygledKzgmVrYvROkUqzxincFtIItk': 0.0001666944490748458, 'xbYDGkMBpwMkgaXWIfhHjTauVOaeCtztzgkEfDdZlxmeUJbloWJnDUaWaASblABxBfwDpTYbnzrZpEbMRuOLDXpmpqRMeIsMUo': 0.0001666944490748458, 'AwbctECaZtzSLkpVxtOSNcKKmQkOtEvWzoZPboSqEth': 0.0001666944490748458, 'VWcdgtkWCuBhoRQVEVExiFudfzFlrnxlWkIpvjaszAvPrJIKIRSqmXaasHnFo': 0.0001666944490748458, 'wISBEaSCnSazWCGhPVjnYlBuZOTmtOJsoJuulxP': 0.0001666944490748458, 'GCbrsWnSwCTWQPiqwixunyIMLGkTcPcxTUCBdYqzsamzMV': 0.0001666944490748458, 'QhPQpzWQFSFpPNLQVWNolxXQnqlqKkiCrP': 0.0001666944490748458, 'oLrDzSkoipStLsVgorlsitUqd': 0.0001666944490748458, 'sNTpDLCZfuKlhcIZtzl': 0.0001666944490748458, 'dzVwltKHfodDrXTjHNOxwUaLUacMANaxxVscrsNUwUBNHPhjqaclxrbeKpOeiW': 0.0001666944490748458, 'dmJtogYJiXqoYotaRAVbOZynstOQPBhEDxIvwrqjNWs': 0.0001666944490748458, 'oHxPiCpIzBoWjHipWRdRfCSwBDFvpbHikbGQGwRhinxqAEPtqqdCxnqMTJXdJPUesVOoMydcTwpGVLsNmnqGdldUIbzbVikpNWixAVSmziclgdndNjmtcA': 0.0001666944490748458, 'FrjYCWQOzUZgdkYqZIOJgQoccwnJgHPrmznPCjFGrpjNoLzxMfxsKVMWEMSxOh': 0.0001666944490748458, 'JGSHSfryONHzjRKQgSiiq': 0.0001666944490748458, 'LazPfAlKorGzWneBQIVxsJNcmbQGCAqOaDiSdcNoFVLUoykjDiBRNKvJbthNrgeXbgCIvWsUnnfQyMIlFGEMfWFPd': 0.0001666944490748458, 'zRqVDOYKUFCVgsDMvNfXnPlGfriTKUGAvfzyuNQJuR': 0.0001666944490748458, 'MuuzZPtwWvXiBKuRTGncbhumuousxOhFVLBVqLfqiKTcWfuqMJDRlzz': 0.0001666944490748458, 'ClNSwqWvkymYNHWtgATMIMJfGrugxDgqkzaTcRTBStYxhYJVDCUUpFDDfGVUnKkMMOlbiiXbzaVeAvztxxnFOjEnkvzXagPilZ': 0.0001666944490748458, 'IWRmRe': 0.0001666944490748458, 'QWFAGsGtFKOMnfWBMxtNksachxhaZxTwmCiVhKyYLwYOpmwkbbPOIgRNJgAzqClXVFIQdTUZWjkbThSRUPkbZuHRorAlRicaSEjxSWBTDOrmyjQGVId': 0.0001666944490748458, 'iRRZCFibkcVGKCBomCAcfDraVEC': 0.0001666944490748458, 'zZwOyyazobxCNciwDJSdmcOhPHFdRumKqnqvXovGqnUWZKRsRWcTTxIudUxHHGJFezZBAozcAOWuHxpabFqvHWWoUzssiseIc': 0.0001666944490748458, 'YsMnRFBBxNIhEaNkrSbtTMosoCyNnkr': 0.0001666944490748458, 'UCbfDQduVhiLrzTDLHNgeXziRJfiCus': 0.0001666944490748458, 'RmbMStHcSuCNjCaAIRnFvGltiMvimxIIXoOrKbz': 0.0001666944490748458, 'BtyjXwdOkmTNGmjcHmdLljkHRcDCxDgkOcAtqMxdfESHJLvbVbxsxkQkSssEJ': 0.0001666944490748458, 'FbimciNWdIsudZsxvvzWLMZUkMXFKMBiIm': 0.0001666944490748458, 'nKdePffeEGTEPVSSTNohBXOeHikfExYxsLhXhjigUOcPAqXebHFQZgpnnHSMwAlvtSegANJdppCEMtS': 0.0001666944490748458, 'MwPraRUWqfGomHGWDoKTP': 0.0001666944490748458, 'idMBZNrQgkpQRdTyqEiBaxKfjZLQGdHyezHBPqHJrbyKAsQSCCKPfZrxeddJRwVmkpTIbRGdiehCXGqApPWooTjRVQyrQSocf': 0.0001666944490748458, 'PMEvtkkdRh': 0.0001666944490748458, 'ScSPhFRvAxRnawShTmPEZIlaPCcFSoXdSVLXfgaveoKAjYbncwfjZirmTwbDzSADAsuDnxEMQErOA': 0.0001666944490748458, 'kvuTqentJYSevmDQXViFJrhpACUXlbqOqUpSZyZPkirxWyOcXEuvuHDbgUXqwcRXGuvJUzRCoafKabiaitEHLghLlMT': 0.0001666944490748458, 'PyCweVdnQkjBVoWEFRyyaviiCyUJfrYKtdbZoeFPUpbFqjxqYqCdriEHCGpiCPGQebawBGXQuSIFXLgWEbPWRFYZVMwzdtuTrZXfOn': 0.0001666944490748458, 'ICoxgRJqadomzQznxXCpXaeOjKLHNCqdxHIEuDslRGbeoazKFdSLNTxGyhtPFK': 0.0001666944490748458, 'mQEiXUuIxqdzoYYjSMnNBzFJT': 0.0001666944490748458, 'SdJQyocopAgKCkOrhiPAJhpEtQnNcmkuAkERBnWELqmZCh': 0.0001666944490748458, 'rVEJuYjGVhAmeNZdZykonMlVWoGmIMRRRmOHPfRoRMnowxLtAqYfvrEfYzPneWAKdhXtiqRjBdwoZZlRoguJpfjkZSRoYZfTOsiRlY': 0.0001666944490748458, 'WCMmzs': 0.0001666944490748458, 'zZOTLXrnZtFbFixZOyXZiHCxYORjIezVyQjXiSKOCltVvxKkCbLsthqpQNGahCqFvWdBbGkKByDeDnZvuBVCyDAmGhOcSCOrodzthLOwoqbiCVimEgWes': 0.0001666944490748458, 'wqIalLZtFqxeOFRyvWiUQiMtyeUyGFdhusHbbJjyedEzrTDQhPwvjMVgpawdOgjFzJIALwcjMXxcFAPTDVyVTryehUozLcyRg': 0.0001666944490748458, 'InhKgkzJE': 0.0001666944490748458, 'ylqCJgJisRXBKJBAVDVuvfdBqARBduqLrLhQxtYAvSQZqr': 0.0001666944490748458, 'osAqJNDEPHAtaPHolMoyUyGGepoPFjvTfcHHfXKjIXpgEHFKReunsDbzGEYhPjdoDyiInIibXDHoSqVgiogoUFyjPifFHHkDvuJDuGlQj': 0.0001666944490748458, 'OvsFHPlfHVsxtgkFgHluFVnhLeVNhLnCIgNcSfTMUbOSZXTGCNfuVzXOpKmepHMVDOaLYUe': 0.0001666944490748458, 'jw': 0.0001666944490748458, 'wiIWFfiJXLBcleIoNYUdsNsvXrHHNpmjlWECbQtXlYeEPfgrCZHSPhcHyvJTTAUDahSXPPHWVbxbfxvjMIMXscQSjWEYLmwRChuioqaHXuelmlVHqqmGX': 0.0001666944490748458, 'hwBaTnDsBGubBjoUhSyMbYwPyWJMrHtOwOBRpwyIINsiYcianLyHJRsaKPOHMGyRyOKGtZyqJXZdgHoyyJGTivvoUFOUdCNMwFBUYnWGkXwpzArYVsTsQEL': 0.0001666944490748458, 'JMuHnkodCIjgqmGgjAqOEdpQHqqzcipJmPkgTgPRnWePQqMPjwyGgEAPtaCRiwwDTVdujBoByXUfeBVfryQzAhIHSlbbeNAdigVhXKWizXjfIgjPxrbkWXX': 0.0001666944490748458, 'XAAhlwPFTeeaOfQUnLeUVlNhshIKbqIiyTzcvyQXWYjrYoTrH': 0.0001666944490748458, 'sFRfYXDunWRiAbQqOkDehXYHlGOthaVqJsKuGyeKIzeTPvxrHwiuuEElZGgYjTfQlZqJlIuSibWnScXjBdHOBmpnTaUkWiPYudCHlTHjonZTVvMcryTbz': 0.0001666944490748458, 'KPrfDUoSGgnGrqGNPcbhUbrEwDmQVxXEHJfXxOlzwabJexQZcFMxHoenuhbIJMhoSSBKVkXbwXUjYvJuzaqrKLFQdEdSjvrtitngusejrjQdxhxssduEN': 0.0001666944490748458, 'aMwhuhWCBbzwitQfSpoVNlRNdeVJnlyDpptzWcxowegNkbCyEKBbUzrJygwBPueikPZouPuTWgvfYZ': 0.0001666944490748458, 'idajkKTuybjGkUEJNmAYKMjRmaCHuNtQYCYXsmdEtVgEbBPdcWDaNIUPpCxPsNoGmSoYdxiybNDzYQAiMFxMyvEiv': 0.0001666944490748458, 'ItqGCLVqIcyayGzEjWprckAEhfGhgMBgyRFCKonROKpYOHFwEMWQFOxuXxiGmfuCgRCYHmEtEhJpjNoBnazimdugPHZUBeEEQTEcgXpGOv': 0.0001666944490748458, 'vqiQYLirAMNRkCeSPSFINLcbBfnUOdevxdufguH': 0.0001666944490748458, 'ZKfBzaTQhDhsqLjzKGHClPuqlwzkXmiNGrzvjbmKAGjPsrIZArRCydMqnQbCzvrNNEraRVcQIkLhNdrHRbJQDGxMRHqOR': 0.0001666944490748458, 'PvrsIWbjzbtWEFJwCXevYvIDXAPJDpsoBCqwfqrimOlYUMqxPkEVWlNumrXjBtFRfghdcoaArGdPUxFCQWogOxsxUvJEPIfXEcYPOcvGWyjOf': 0.0001666944490748458, 'wcjvLmaTmAxuRKhUSTgWRWWztgIfULCQFgcQMePHQDcTePdDtkOChuiYjjNmBqVgLfq': 0.0001666944490748458, 'eTouDCXPtbbeMJsonNBUXkkYQomApfMwEKpGoocghRiDaGGqdaRGSKwLFMlZJDhpFJesfJvxzucUXnFVJholCLXQkzIWyuzOK': 0.0001666944490748458, 'ZvezdhvONCosyFQKawDXeHETltMKCNnsTInTetHxINqoGrwxarbpRQHbguHPpmjPQKHJETujaJXiVveBAixtvKBXER': 0.0001666944490748458, 'NfdsPtjakVmgQkrQJGLXWQlSySnyVjejyIVbRPRChpTUBPEVCLOpBmxMhvlOXRkpVFMIxewtH': 0.0001666944490748458, 'QnhDVlpbmwHRHIXpxmOGnTMLqOVzQTMmwgAtNMxjpoFIyTpxBBnkBtRzryNuH': 0.0001666944490748458, 'NVVavMoFKShsvukdmARIcxLCGbXLQMNmOKzZncnWomBwzRniiVrUFpSMgWEldPelOKpKsMROagqNMRvKQoamxJGQSuBYJrVGndWZIUGMmCLyCgVBMT': 0.0001666944490748458, 'ctQZokgOWfetvUThEdZyLaHFaOgGvPOQRasxthjkJzUtodeeKW': 0.0001666944490748458, 'HZRPbXsaSDmARDbzbCqvXKFtBuWOzNiKeeltJjmPHSRWhgHKwzhCLnvOVccEoJkKtKPSDxJDy': 0.0001666944490748458, 'wNYySlXScI': 0.0001666944490748458, 'lVIOsMevaLRxaKLNIHIoGRNgPUIDpwUUXcogti': 0.0001666944490748458, 'jUzLzRNFQwMQPhqnRqWYAUeCevBjBzJByroQicDTnHsOjHdjBCgbvUhecpABBYaGDwoluxEhlzsOVkgahWUpDRyINIklJ': 0.0001666944490748458, 'NnomdVkuONSyqtZIqVNOGfSmFsuyFde': 0.0001666944490748458, 'YRlQOywFliPJxXwrDXGOIkpobjwhdEjiBadSAqFpw': 0.0001666944490748458, 'OpQGlUFnDftzGaOYyMBNjepkFQneNRuevACIDzA': 0.0001666944490748458, 'UdJVYZhBQzyYFrQRUkiCMeDMiozYeps': 0.0001666944490748458, 'faTPZnHoEibojiGPzWivINIoDAfclmSPbnntRyHJBDmGlDwkGCwgDfJDvJPcn': 0.0001666944490748458, 'QCoBLEFwBREimyQtZAUjRrMUcanMTmmDEmLYNrxkuRqywOodcDIlgtmSDzUlctaAZPy': 0.0001666944490748458, 'IYaBxSEJNoKRVkpQHhDueXoUSFJRQgLzR': 0.0001666944490748458, 'IZCuzjxbbTxIfyVmptLUxpycjkmOIJHgYWfjrXlCJguBGzJ': 0.0001666944490748458, 'TGmRWkLsqlzDxjHpTxnxvsOcdzCgibJjMzepbStjscwOMUOFernKBHWDIREmKsLHOBidWbPnQTEcbDGcqQBVBobChXpTwUwTiLLXbynFRuhdEHNtgQsTGrK': 0.0001666944490748458, 'JpEEnRHGKFOlxQGRuAJqXKvtOBawdWlsescfp': 0.0001666944490748458, 'XRrsnWMqFEgqAdUfSAAQgHAsPQqLXvmBaKvmZoOFftBdLAOvYMnDV': 0.0001666944490748458, 'YZaWDlEJUrvqJVThxdSeualPYKVierBwfbRhzvjaemWvYCBpyDAiplFYjuhKvSvxlFdVtPuiFkWVKTMBEVQpLVifexqbepNEjJlMU': 0.0001666944490748458, 'RlfmSYlCTCRbuQQkftbVEJqsqQOogdjwMTPSJlVAPNcBQaAMRINAzUo': 0.0001666944490748458, 'QmXkDQvVPRmzxlotzRnQHfyZJCPmvQ': 0.0001666944490748458, 'zVrnAwZbIkezdnzIrJrYCeEEwAQJgFeUcYkjxKQQYqhHnciDkugCIGyPNvEyjwfxeBTmonhjphOhYw': 0.0001666944490748458, 'lZAUUSoRruKcCntpSpOiMQBAdJpjuwZrFAbgQ': 0.0001666944490748458, 'XbMdUpZmqwhAYfNzUAFTEsEjlLmYSQvspcCXKADrZAzLdBjecpTWbTJxcOuEePXlGmiSzzXQEMoVOfFXWRAvUyKFmGTtPuCwgCKvOAu': 0.0001666944490748458, 'nmPvNAVCeYbzbBtZGEeNiWBBDOZUKOGeyAIPngNLfXcjBqcjOwlQhgpgCiZDiHxqjiXKfbZfpsVLCHDtk': 0.0001666944490748458, 'cKXZKRFWrZlmPxxHsFotBHxfeDvyhMpYUTjoFhwdUuzObBbKueZ': 0.0001666944490748458, 'Sp': 0.0001666944490748458, 'zSlMIBCopLmwiCwfQRlEjaTIOPTbFhAvrusdRbdgMJXhqbUPxAbYsBvAtlBdslTypDu': 0.0001666944490748458, 'DmyBHcb': 0.0001666944490748458, 'XbEafGwdyuDUGklkLmSsIgxZMUMVhqbXrALyoObpPDOYMOOWGhiYSmOpCstjOulrMYshwxWfqp': 0.0001666944490748458, 'yHnWLOsBANMzvdJvUnEuaExGYuILYjjKlApQQAnNWFlNFcMOYcuvyXpWiYssLFRAeGWripcWrDiMjUShJXUUIAcarDoHdhUBs': 0.0001666944490748458, 'gTycQNzkcAQKmbFuCRg': 0.0001666944490748458, 'uyBDofNlEgxZKpG': 0.0001666944490748458, 'IQNHCPjCAwAwKzikmDuUrUkpnrasrzQOdspCSzKxhbXVEUTZojxtPuhRksFbDKBIviaYteBZjEuTcuLbkoLdUdUug': 0.0001666944490748458, 'ljkxqOPXGyVtMGJBRHcOcChJcDIfhqbyl': 0.0001666944490748458, 'elHGqFIOtYSzXPiiVsfieLvHwKtIfINPRNeqZVztCXNAWvhKliagHlJHNzHEecMgNtqNyZHxxuXlEmchjQNHvwucUhrJvMFXBLWruHZiJsBCo': 0.0001666944490748458, 'XqJcTCQTaAxRBPETKcppifWFpjfYipOZARlybAnCKSqUWBILqvZvUqodznqizpcdjcxxfsAOuXjCaFSMNLLwSYCOJI': 0.0001666944490748458, 'cPDaHbFAjrWEKaPXgGIXRxsCNRIfkGFdBCxJqkayVPZmwLwmXHXmEwvYFghOkAmLnptduknlkJ': 0.0001666944490748458, 'sYWztxBBLixWYKctWEVljkEKPvtJmcPMfJHjMWquMIxIXNUwjeSmgyo': 0.0001666944490748458, 'mZqIHfLWoKJGoESUvlhFpIsQGslyFAdslLcZJwvRHvXBoyBTzAXrPqNDOaUBerIyLBGUhWy': 0.0001666944490748458, 'vyHETwEsbSTbSgcWBwqgcYJUeVTlyysmTleMhcnSZOTDXZwMLJQNkbNffpuZjs': 0.0001666944490748458, 'MMKAhJVMcUrwGKMLuWrDjRBMyRxIkGkcWnNbhujVrojRYhuhYbVAzHhlTHTMQScBUZZetkAhicygsaEuGRlTQSWSQiFcqnm': 0.0001666944490748458, 'bkwpBPyUnCVfihjaLOIQvDpdkVcnmBDeDcoTMAXVkLNpMeRgQLlBpPekuDGiTqpVFnfrmHqyJwpMbuIXCpKZpMmhtKGlwVTwMheGWMTkzykoFHtss': 0.0001666944490748458, 'GoNMpuCWFXBxBPfdwTZVOOWXZVvtJpXBMDzAlk': 0.0001666944490748458, 'mmtrSfKXMgHrhpMhjpaNAEBGdMHXxckqLpqEHMQLYDdlKFfWJJlWkPHpTd': 0.0001666944490748458, 'LJZIBybqjasjrTGDmsBngDVhFwJYKCkjbVrmoLPJqUBChLqgsgtAUcCqTDqdHCTny': 0.0001666944490748458, 'sfGyArWDrtwbtXJBPARLJAnIkhHhCcuQWGOScDzdkrZcgL': 0.0001666944490748458, 'BWVtHtrvwRZcSvdOeeKpcQFFrFGLWYoGPQikdbaqrHoLkSuUyjaAjsMIyNquBX': 0.0001666944490748458, 'MOtQqUhrKlMAkpHFRPHqpqRrGPSgSZjcwwkEBgyVOLcAemWEvyTPYv': 0.0001666944490748458, 'cXDkcNCWLqg': 0.0001666944490748458, 'w': 0.0003333888981496916, 'GeTpLKwtygeEyZAOewcxwCLdQLSGbZNnXAqNoiOOhPIBPLYHNDBSLKYniIs': 0.0001666944490748458, 'pxOMnuelQPnswDtEeXGkzXEZDKJlktqKMxSogNyzg': 0.0001666944490748458, 'qmmRvbMEedrDEUqUGBkjbMUgViXuTZJCIUQZOVYTxNIQLKRkkqAxVUVrSdEWwDwQaTvwvdJvSyBZFsJSA': 0.0001666944490748458, 'mfbdgKXfjpeKIbmzFHjSfybhcLFpBLnUiTbkDFfsxdWHtYPTlqvAflsugQnzIDnYpomPNednOQZjOiGyKZZXTVrYmhv': 0.0001666944490748458, 'pcdJVqxwYcLwWTUgGCWsbtjMxdrRFzqRLoLFUHIcsLjXidopIkaEYZmUkBHsGdjbdQghHlLPMtJTVrcKowzcXXaKW': 0.0001666944490748458, 'AnSsBOpFOvjgtvVFJgCFGOFrvZZTJjZrXzTiunNItCCMPBMHLboxz': 0.0001666944490748458, 'WOCbhVNeSrH': 0.0001666944490748458, 'OsgzEVaBNCzcWFpuHdijjlPFfuOFCrDxjOU': 0.0001666944490748458, 'jljBlkfUokhXDbHgcPxfsvwdbgbQOOJBTJeBroDKkIBZPPEjvGNhFodRTwPvPzhWjvUmrTDrMqlmsWNxxfKNhlFIVZRULamtqUXGAorYWdSQcExUsxkvm': 0.0001666944490748458, 'AZuyYHjejXDlPsoovDHzEyIHDvbZqgHINPaSUwsnqTsLilpoQYzTTxVBcdJkSmKXsCUQfiWVaIgwbOsnpVNOZ': 0.0001666944490748458, 'EdIYLPvGVGRyGJYIymDYoOFmHUsTCjlmrgikkHyuQXETkE': 0.0001666944490748458, 'aFQyClkBrbAQVUcfpMueVFfmMzbEg': 0.0001666944490748458, 'WxYPWYuneSdFReljANGDCpDvmybStyqcyljeWyrknMzvADnYXLTqrTQtNpgQWcsfLXmwJlDxNZwjvQoBPiQTgAeXLOdfpRFxZhAcBLiTbbstMetqLTtktL': 0.0001666944490748458, 'keZOqzfVYTKkvbfCwnedIuBpjeBOWUekZJHvYdChwQSTEEdhyXYVRejyf': 0.0001666944490748458, 'KOMQkrSlIIuYxRatIsIoBsbWuwCoTtPUXACGHDgyiWgQQdFoqAxTljKOyKrezKipzLtJFbmHYLqcZpmNspyFavararXEDYHaAoqpnR': 0.0001666944490748458, 'DHi': 0.0001666944490748458, 'oQXqwYOOUcdwvjUHODkUjroqypsYcZYothhCMlBYkUKVYzvfqBonxzHksAOEfyyOzoqPXunTIMTVinZWcOdDgIXssJwKYaLFuP': 0.0001666944490748458, 'cBNMxKAtamqSSyVWMomAVzsDWXKyEFKVZzNKMCTmemrQDeSYPtoBQwZ': 0.0001666944490748458, 'eAyCotKkGGo': 0.0001666944490748458, 'KOnQHxBySlrSKXJselcVltOPVWqzEtHGnvNRhAqZsAfaFDJSScWtxrVmPRp': 0.0001666944490748458, 'KeWnEsxyVcGecxZvjhxDzVkgcGpYyKfRlulkzzmigeTZsWPPnpGbWcEhqHTySphLxzJMwcymGluegFiQEzVYTyXPcxlehKuODLhEe': 0.0001666944490748458, 'AaAyKtTYdzQPaxusXcCQaCCzOBigMzWbELJGZRykGYPfDmMyhrQpUfaWFCBmfcJYDQzFYCMVcE': 0.0001666944490748458, 'znWNSSomTvQxPpJUHsFtMtRpSbmaFsSQtZawPQRPXhoAVydkACHUOH': 0.0001666944490748458, 'uDHYWWCwJsnKxIvcHrKtnVoNwnTIheATOVMoslRrrGJinAOMoSEaUAsKGAOIqNKVUFshHyQPqeDXRcUAeYPRqxydHtiRseB': 0.0001666944490748458, 'QqZHoHgNwhAVYnSWXTIsfTfKivIIuhReUHhoUISsanasLRFLAThBnwgqeNXlkVIGLYmeZFznBbztZpePGo': 0.0001666944490748458, 'rZaVtNRwLnwZVYnEARSliVkdjHVfeMNdOTZjOLJneTNyiGGdcqkGSePKxeGUkpVxFWvmbPYrGytpDfQTphxMCcVMsgSbCGNUoLWYZRLaJDKbrk': 0.0001666944490748458, 'uEQQtrfmUJyCUHZHuKOJMyQjgThWKuXWKCmOxuPACdqmAbeQCHp': 0.0001666944490748458, 'dWlFeObGXLxCHNqzycIirsNlyJbMB': 0.0001666944490748458, 'PcRPUYHKrnwMunrbsyqtkYanXZzoVeggQNPvqtXKTByHQnoLrWulOSAVUhJvu': 0.0001666944490748458, 'qTjvhuQgFaJsnoMXhHTGZtIhOVSkzIlKfOntvKeECYYJKF': 0.0001666944490748458, 'dtdmyoYAVFZWARaFoUhAINNsujmHNuASXsjiIKWHvCytumbQAOvpZx': 0.0001666944490748458, 'DjsYjmQalVCsiQfsKNijdoHZuVkgUYzERbDxikYFxLpSXWthAHnttutHKrFaPgDFwFUVvoEposjoZy': 0.0001666944490748458, 'YNCowVAjogACpZgerKozbLwJAYfcJWPYYBQlpZ': 0.0001666944490748458, 'EiwcjSjjzAyfQVtfyUqdrdBjCXFuBfrCXuwfCiVeJZlwqCORwXcOvwHBhl': 0.0001666944490748458, 'NvvhAtbLxFPDyOnukSTXrYGnkhzrqnHGDQxyCIdHqfFYfWCpCGdbMjlthcCsekXmK': 0.0001666944490748458, 'AqwEoMikUqjMllhtUSNCVTWlhZkFDIwQCnUabgLyXEbYyCu': 0.0001666944490748458, 'aubUZlhAOUNwnlgsKItjBvCFRsrcuobgDwNttYlwoZIgYUzeapoaHctoLoQYQONdsxzSiguywlXoM': 0.0001666944490748458, 'WQfVLWHMrjaTEPRyNEfKp': 0.0001666944490748458, 'TXTOhxbafWiBsSkBuzombgjBZMsRvQMjtByvqMbLicZMezNbXOWiSUW': 0.0001666944490748458, 'wyPfjQnySffTQHhEObQLdqYMXkCcJjHZGMGtaDLZQjPEkQwchpVXDZkYSgCFJlkWAGZFVkAqfDYCZcDuoWxeiruJCSDcxcVpwxdZFsHZEgIfiMEcUwiCjRJ': 0.0001666944490748458, 'usNDlexZBDnYHrfFslHEwSVtPMRRIiYUVqGBTdNRPKrvMjwnpkMgUOXcyGRUajGVLACDingQKuKDtUBoBgOIbDOYXsZbBSemOYjdOj': 0.0001666944490748458, 'eaQlVA': 0.0001666944490748458, 'uwTrizomyIHqVuCPNdgEMWFZfJvzDEBxYgyBacvrybvfTimxJXNBPVBExTt': 0.0001666944490748458, 'OwHNhoalPidypMAZgBUwwZVcFvXTPGOJrRuYaZlErVdaFqPctyoEomnIGbeXELwesXvZypJowzBAIklEmPYHeWCrFZQ': 0.0001666944490748458, 'UOLHQlzZWcRlWEtgKvkOjMYWLLtLkwgetJxjzKdGTwhQrqAaPgaWGyJCevAyTU': 0.0001666944490748458, 'HQMjESJrVGrybfclIYjzqqxagGYdKyinVpBxrMqRWflrQLxXEdARktVbBdcdoQBGZVmEGwZbUywyzRaxATVYeNWYgwUaAAkiZBJiEjALE': 0.0001666944490748458, 'jzDtAHoVVpVuEOdPeRuZacbIiFDhgxPKdmoowOXJxPStEeUSSLavKsPbGWXOEAK': 0.0001666944490748458, 'ChSjvTYstALTlQjmbatoOaGCLwDTAkCVhWznbcXcMBEdxwJuXdS': 0.0001666944490748458, 'jocpTzMmduFsUkNhqTprvSaDKGHQrEfeyCfzvKHhCCABSXhflVGexYAOFkXVdZAYYETkWYZsmNo': 0.0001666944490748458, 'xHkEhhccGwwLWLZqxnVYtRFxnIncWnDMntjzEtRuGlvKNpadYgfzwqIpBVjsDbGCFVqIjP': 0.0001666944490748458, 'YirkzlUYajBEwWRunvMZSYjJBVCeosmvMkPmIsJHraiuJ': 0.0001666944490748458, 'uZnOMnwgu': 0.0001666944490748458, 'ScFdWxYUumjgYFAktqpSvMsCcYWhBgeFDsdcVxfvewjTdzVwETvrpDunlNkQywo': 0.0001666944490748458, 'HGvlLN': 0.0001666944490748458, 'Q': 0.0003333888981496916, 'ysiwwpyVdAoqRtHaEqxQKbvzQBBkLlWPUVIJQyEobeqFGZ': 0.0001666944490748458, 'QiSyULxJBPyOqOtHiGmjNDPoJhboyqVRyYjgWHBZdjIKPjnqopbIiupAcedJBlGgDuVTEcWcEbneWsHDzDpumCdnBeu': 0.0001666944490748458, 'PoGnDfxHUIxuvUwJyCmsYjLEhQKpIFFQxRlXul': 0.0001666944490748458, 'mpwlDabmXxmWWpcNuzotbjzNvWGIMmzbGaohvEJYAQpnoMQjSwZbLgABpCshmsDpfAP': 0.0001666944490748458, 'LYzvPMVyLTaAAqYWqjnXxuxDpYHuLgavfhYAaUMszQiMtJPLrnbSTPrvjBPuUsDtxBoxy': 0.0001666944490748458, 'hECKXmqWFGWEcXHKFrYluKoCkKnFMQMVdptGijZBvjVdGlAQfzuwqCXBeBtLWRicfcmvKTljexCuRNdJqUaOAjmaGVUPHyNjVNdFmwwBYFzRoFshluhFcG': 0.0001666944490748458, 'QyxAYKOrESpTDzEbtkmVcKTRHctQfCvzRuCnkXidqMmBVkIGcqswTVEnAnRucCmQnPpri': 0.0001666944490748458, 'xwDnZrrmhsHFxorTKxDqqfgoEhFiLeYWSwZdPvOVgxoHUFExcjrgRjOOvrMHnEoTjoXAwnKoxTkREhSnCfCUcwzcfruGjuRoDWyuPOmgnDhIhI': 0.0001666944490748458, 'gDkZxmzuOyBzmCzowzufSOmBsRdFhpQdewkLvEzmjDcztxaWkXRvWGziXOstDHIfGIPZjpOUXrVbjkBeEdTNbdGvrJaqlqGIFwbwNj': 0.0001666944490748458, 'VQxKHhkhublennzEXOLEtTamlhawkWFtFNF': 0.0001666944490748458, 'zPzEHqesqgCXlynheMKKnSUkZnCFtmokJCsxxoxJyj': 0.0001666944490748458, 'jrvLfoGrYlMNPyqLpuQiLfxhSv': 0.0001666944490748458, 'ZgfZxRIbMuAdWhqqvXQNdFnfRKcQTXAtKHPjHSRzUvlfWxTmxlPyASjLzVLCXFRANHErQpA': 0.0001666944490748458, 'rdcOdiDavwaTGBFMdNNGamlYFAOhuJQkQCFjLlcOmVsTJDdQvDlOgXKNwrGsFLozaW': 0.0001666944490748458, 'TpPmdWnYLyCXXXWgCIvJjJDNWwFmcAwwiGeiRarwzYyImraOaAxzyYHdDNsMfvWaZJmdVEJrhXcIxxgfOVuaRtnXmPyoMysHnshpUTBfJqNkF': 0.0001666944490748458, 'kZDhKxdFzjMURTXHOgkzIxxpFsQXaEoCMBdGUunltT': 0.0001666944490748458, 'xqUuNjprrOjqQdciFcCRokPIfmeTVUJBWQwidYORdFUCmmOyfSuJZVeCFByiir': 0.0001666944490748458, 'bIAOrApgeahCvFZuBGAjOrmdFNYNcZUaBCWjSkEuublPGtwnXRzlDXeEKQfxknZqKVtsyYIbsWuHjVYDgyRosxgQTmCylcNzKoLARODkTxeFqszoHqS': 0.0001666944490748458, 'khhDGwrFPPCHnmMtItlLjSnxXlQNpvXNCTYtp': 0.0001666944490748458, 'XUWnslxVVZWlRMCNTjDtdWeoZBfMnKUGhOVnLqSmDxguhFDliBFbAZAHzVVKfJxkhJZivZyQFeCSuTIPamcWbiEeDlHNTOvcljmeHMi': 0.0001666944490748458, 'upjSBGzweKBVNWURhrCjRVFESTFajZcukBhSEhdGCvtyCtoqUkhhXLaMzXbKuN': 0.0001666944490748458, 'KVumIDbBSbYgSMDrKRvzxQqMXLeEZSsTTFhrHIdvowXDUufKtgylDJvqpHweMErsKDcSiAWTFbIXzjkpHezEEZIPpiusVbEJbggsVrhUZNoKJIwYMHG': 0.0001666944490748458, 'RcCqcDRAhAaRYCKdPcFDijDwtJtSaaYEYtIbjqGXSbuUdDFsqMyXCTuDqEEOPGEiJqkKPnhLxXxgEWrPJXSjEYEQaEPSpCVRyjAYEmFesFS': 0.0001666944490748458, 'fqvylpyZThKrgPGFGqWkTEDBcMblnjd': 0.0001666944490748458, 'fjzFHcYnbfgGtPbhf': 0.0001666944490748458, 'SNffrGGiIYkapNisksoHCXkMDHuBOYerFbBhRzZYHtdCkmGmNiRyYIPbXtXYNYsqWnmitHIedPTHTlavePrJvBZ': 0.0001666944490748458, 'amdkTMafvRVbmpTqYnVGwBXbzQXWLxuvxufnmXpbiTcjaSQxVRGUKbpEGHOCOFAFJkgtzHAzQhwqfYbakBRzqDpByXNmiLZJxW': 0.0001666944490748458, 'YvNUdLmidLhbtgxOEIYLhEtMlNsxdTKEAdMOeyoAweTPRIYWbupZiOEBMCJUGLSLQkAwdJtGuEG': 0.0001666944490748458, 'HaaPedGWwImiDMpYKMMMIscXqvjiGXVHJakAIcmtdzorrnCKwCJhlXKuAPEISIsTaEFxeUd': 0.0001666944490748458, 'PNRHyfvzUnWuFHyWgaKOXWLnzmxlkFXKlvFsIYEhDaAgGzeMBwiYsqmHgSIZWIbExQdTwwkmAlIgsvHzGitFVzRiJqKWNssqBFSxCRGAfCPqeMTfq': 0.0001666944490748458, 'MSIrNmFQquJSTAIiLCtQMNkvVLSsLeRupySeirOUpCfsuoXwmHHXwxBSYbxMLClNDFJTZAbMKPXhHFvdKmUQlfxxpHypGJcYb': 0.0001666944490748458, 'jaOABsjUzCigilmPVGjaGsZcVuJVKpinMqBjPEFGyrJWqPuqmPoQePUwhRSqQwNuJMEPXIngScggWCmmZKKVrhuEU': 0.0001666944490748458, 'UlSzJAGjcMaVYjZLpoZQxuGVnNOuSnkwugM': 0.0001666944490748458, 'onYYqLskfpNnuLz': 0.0001666944490748458, 'FdFnhlJZZQyxWkQlTfwpSnI': 0.0001666944490748458, 'BtjbSQJEScqJCqxrHQZmOxycIzylVMImpWNBqqUxCrOlvsTCmvOQdGsTzEuBNkvVSelAHuuUfcXeOGymtbtESYq': 0.0001666944490748458, 'MGBuEfDbrlQdGFyEprSGgTKYHONrfVYYNRZSlVHjNhxEePwLbMbOYuTNtNNHMlMWJPaNJcSLC': 0.0001666944490748458, 'jfzuVBtRSjqcSntmQaTfJytEHEkiItDzXusYGtBhnSFeKCFUfgNHzE': 0.0001666944490748458, 'Vag': 0.0001666944490748458, 'SlAaVEvrbLvBsUFgYIbADzX': 0.0001666944490748458, 'yzOTspFsZPdiPUlknNAzpxtOJwmJRqQAYikDoAJewrIzHt': 0.0001666944490748458, 'LvSmrNPylzHcOWnZdINldblbfPMVQUhPfMtMudfivYaroASKLPPJACrnnibFcXsGLnLYopzTadryxQcHejRnlkdBDwfEURQHWK': 0.0001666944490748458, 'dXzrGCnEmOlphMcjihbmzCReTokiZomjmGcxJwPvKZoTukhbCbaaRfrOBySBzjpwyHBkPnjEtmZMPxNurVJwNERepoSBpoi': 0.0001666944490748458, 'FEhvMOlrrTpcpnDlJTudWsfkPbWbqr': 0.0001666944490748458, 'yHrqNPpnzfQDCxABATRkgNQAbPPzMYfBhsmOgeqWrXbkZviXendiMZUlieYakmzEzWVhPipbLcdfDxNQRoHjS': 0.0001666944490748458, 'tYbDlIntqHDqaiqozZyREVqAdXATZpVfJWKqnswGDYopHuEEjxVMtkcCKIoevsmCcOG': 0.0001666944490748458, 'nPkqxuHPmmPNGEiNRAaRoHIFMygQNMEEwQKVDmNBTcpyZNvBElbwnhFottoNmwbYtUUTMCjVjroCFFURWhdDvuwLBAxljPaXxomfynESumD': 0.0001666944490748458, 'CPgBkclfVLPVzYmnANnjBZvLJqaJi': 0.0001666944490748458, 'KbQVDNnJrItgpQqkgQXsyARmOXahEHEwtUincbrlwmtShAtghotKmnhoJTxEEUkChyDxy': 0.0001666944490748458, 'IhLAguKCKYbIHljhqkakRpflPnuGw': 0.0001666944490748458, 'dSoTbkmpRJXGZGhjBlgreNfEPOcaLeRDZIByDXsBtREFzHQEJVzEuzNxElkVtFcwXV': 0.0001666944490748458, 'IZAGqTuIeCiMKqUgOwsNHghssdJbBAEzFybXsloeekHRcxNTLNgntwtRjNxkhJwBQDIamqQ': 0.0001666944490748458, 'EqwuyteOpdwAmKpjHuBFAQoMBOwHrCFvATMlAicExDLxdPunXuQgltRThmQPGVZiCxabEuhcBhIeelqEWrjdnKhacvVAVxtxgvRvr': 0.0001666944490748458, 'dXeqkPmGPYT': 0.0001666944490748458, 'NJoCnNn': 0.0001666944490748458, 'cBgKykXzdrCskc': 0.0001666944490748458, 'QolNbSQ': 0.0001666944490748458, 'CFmkEzBmhxfYPjSStbWitoytRudYOpWQQDqwl': 0.0001666944490748458, 'MSuZJpOFwqqTXODLqToznbjrGhvExgEhBFMlviNLl': 0.0001666944490748458, 'rpTSdOkDmcMghcdanStSEoeMrUlcOHbzXEGKcLfNgApgstuFflLshiRGIKYanZEpekhGVXGevKhLH': 0.0001666944490748458, 'IRLixnssuBTQcSlrsvXNqHgtoPZjPTmvyzVxIeVwqZcmEgYEJwIrVUTKiMYrGutASYDVLjNKaTVKLQfTONqKRNT': 0.0001666944490748458, 'nwZbMORskYsMBPBYeHVDZKSnjZqlOMJmxLLLKHThGwAFGOjjyrChMJiFvUgtROYriTnYvxaezBLOgflyygNLYB': 0.0001666944490748458, 'rnEBfXfVIFRNEtoglZvBBzZKmlbtrspDeusXSwkwlXT': 0.0001666944490748458, 'oRCCnccmXvyhrxcxHuWThybpUrUOphjEgMHnHIeOOYsxsyFFdAtgisbdvCEGTWnlQnXHGcfrMPVfJMdsXDSWpualgOSDqYvkjwkKraZqiblfb': 0.0001666944490748458, 'ehwQYvypjPNtccaEtOOZNgyeiqODvJEtSoHJeeYnNZVJTZwcFyurcgUtubxiajyGSlLIJWgBFGqWscoBiGLLGUiEvZxpUmRHjzyHsVvHBlqrjogUz': 0.0001666944490748458, 'htAlIqKgTiKkWHLTWFPJCwgSpkXWphoEGoiZrEnBoTNHGzPKvmhzqwLcxHiBgmAHHYQPHMmxqvocDeqUWqUkZIIvadhGarOcOWOYstraQVrTsyo': 0.0001666944490748458, 'vZvct': 0.0001666944490748458, 'azwCJPnxydfPBKj': 0.0001666944490748458, 'TcpLqsIQADDrrtkcJMYxJqMSlarskehLNazzTLYkdLHdRkzauUeBUizRjLZJEXiWhxYqtRnuUtoBGKRRkZvjhJaZoeurJvtZBRrvuerDq': 0.0001666944490748458, 'ULgVGhhHcVtQJwWJbTZegvnscWWAMMwZgkNYqGuBITpiOqwlVcarfzRzBKuLXWIbUUBFGiUPqxjXkIyGiBosAQIfKvYCwFgvZhNucNpRUFyThkQtNb': 0.0001666944490748458, 'HiLevVu': 0.0001666944490748458, 'HcAFBxXzsdbglvdirOSLhpdANodOARNHWkQzorpTZQzIkBvvsAfauFvckrSJinaqrAEQJKFHWWq': 0.0001666944490748458, 'ADetjesahtPlENsLkqiVnnExRwdZJnbACySiSTdxkMaMZJWXMQwdgBsyGcLqjOyUzmejDMPHACtUqwaLWNROQtwGRMCPxTTFkVruHfyVKROGHuWczktrds': 0.0001666944490748458, 'qdVLwOgqLsbvvcyjUeygHutZolFyXeANbQNJexZIWvPLpuakjxM': 0.0001666944490748458, 'cxGUeXjBowPxbUfVGsPkxeHRcSLdz': 0.0001666944490748458, 'hYuYeldHcGvhAMGoffsSlddXOofujQFwlSpuv': 0.0001666944490748458, 'imWANfSlwQLSXJkRgMvzvpljCXkeWBVNIgYqbHjdMRYgEiPypxTiMGcvIRZZbVC': 0.0001666944490748458, 'PYcieQosZkuPdrVwP': 0.0001666944490748458, 'zZCuQRWVShwWhthGwDhSAPTCiIPYiKTdTKOzHLcoDkfJbrgiOJSxVcBLDQpdODUBbBjGoWuVnpPVCJdgMVmNyWFTu': 0.0001666944490748458, 'SuITIJFASkQrWlsYeLCCcrhXieFWaguUicLOSRYtkCniPvngwdTxGrzJWqnlffwOARLnOumsBeNikqHcBwNalimJMwhOmjKjczG': 0.0001666944490748458, 'kVKwXtAgSolfrLyJojZKjZdQMvFgaOvJXKDnXjNmYbgJMBZVEuBUIqJwKPrNdQIYBklBjcRHtLIwSfuKLPMovmsrtGTjU': 0.0001666944490748458, 'jGoYMQcIZuIAzVlbcqBYuWaHOTcfULvuT': 0.0001666944490748458, 'dEVOaksSrUsbXHQxPIazDwukKLqmsJcTCxOznGgTjTdlnxCwkhOAXarzsYtIvkucfPcscYvvlc': 0.0001666944490748458, 'eitXbypaxzBrguDUsgsgWTZzlKVfznxrAQLZaqDxRktbYTJDQBi': 0.0001666944490748458, 'KIHrZCSbhwH': 0.0001666944490748458, 'gfSemDRkzawFZoJlLGlmmvFqunVwfQTcYxkhGAVFKOPXJKPjbFbwHV': 0.0001666944490748458, 'bJQAQEHJelfcrMnOJoOJremxUsZMnVqnKGnYtwnlUuYrEuxMBPUObfemnVnlVHyKcmTsolgLUNpFYpgQfZrjmBLSdoJJkRYHKIxOsWpctOrFhnpUzLbyyWh': 0.0001666944490748458, 'vjQrubrprpiqHwRNGVZDewtTCFyKBPlCKxsyBlFRsZcAYWNRQRPZScSaRsUfrMURvojCCqztVoyrmNnXIfEsWwCbyQFRTgeCiNyDKL': 0.0001666944490748458, 'QuaXTRJbaObfmPXlOeCcAPQtsBTivnesEUIdbKJkSiQkDRvOFSRCfiCstRm': 0.0001666944490748458, 'hzUqbOPQAmXjPoGhwSlMtizuSjbPuDrffJdPgEUKYatPbGLzBdVhnQkAzxkZSigbEcfwXnbwuTDIBsGClNHRJCa': 0.0001666944490748458, 'HMLicKRsHAeBHoDQxUVrzoRhCqOZqKMZtaBNsfpqZduGQNKtgrcfsxSoXfBmTyQ': 0.0001666944490748458, 'ySkdrxBASJzLJVBMnCDiTKKrJflqpUDqqTglbzXgugneTPJdFsumgFeZIZwFgtVOVcLDrzTsK': 0.0001666944490748458, 'XUhTeKjBKwdVzOzfupDTKpuzcoSFvRSTqqsxUnTvuRQYBTywVjJUpLqrCrMVOSYetyMWL': 0.0001666944490748458, 'PTYZDJcLHYTzjGSjDnFIrMLcUSwEZblewKlRKpvyjzNLZAnjKxEEjxErnEKKuyqBhgrhXADZnOxYwhobsFbuGWAXKyZzVacCayMnbfgVFYGHIFjGyxZoB': 0.0001666944490748458, 'gFdCAsXDvybGkvslgvGgyfFNMwrRpYtIbuGItrQxoMPaAfrGyVMFMvMXoRvWifzubYYIDfdGJDa': 0.0001666944490748458, 'tWBZmzNneFOnhttnvfZXTCvjMCYoLjYdpAAkgaelHODtc': 0.0001666944490748458, 'OCOmqgHsWutQhrYxUXLJTMbKkBTvdPnZp': 0.0001666944490748458, 'LtetWULQjOPTzdpCWWSKCmNVvMZAHdZVKxVElRPcQfKTnCZQHUsTvZNtkm': 0.0001666944490748458, 'uevqwCHzhamooCkZEhnrqJWKIECyVPlWGTbBZtvgGhkNYVUJtwgIzLWAnnECrtBODPXpXrQjhRuMWMcHKKKveJwxb': 0.0001666944490748458, 'VPzHlObToivAQvVUvgJjkqNxQixOVvjeslgDZfHlagywLNpRajIEYsGxgvNMO': 0.0001666944490748458, 'pwWkFJMmiNhOigiQLeAnFAAxrvYbLgWFoeZMnbeYtRSMMlT': 0.0001666944490748458, 'faPgaXspWhYqIKPWuct': 0.0001666944490748458, 'tl': 0.0001666944490748458, 'cUHAVcFHhClrrWHmfGwAIDjLvYjROGGvkMGFAJuTGpRuFSLEloIJGYkHybRtvrHxNYSFadRyNEKYqwACeJvGKywuTyTQbmjUznTMGdTvTcdWIZ': 0.0001666944490748458, 'NbSmSvzhcqSWDiLjpTHnhjLVxJQRYWqtJOCqDgQjnCJEdlNMOO': 0.0001666944490748458, 'gjhbqhINYMvmhxgPzPcjIXlLlIKFxVkyDBSJkaeSLjXiiuJzVlERJYSNtIoZGAATxPmoZKTkTTiLrUoUtpmcXXzjNyCYMqIFOBiErCWDX': 0.0001666944490748458, 'fNEQhtDJXNL': 0.0001666944490748458, 'uYSebCkxQHvkiMwmhCkqeIozskmOViabCaAhKtdCHQItVKVJtUhPWxPUdXginJyZLyDAZH': 0.0001666944490748458, 'cepXzKPVNsPIyRyIUrwUgvHzcxQdCsQCFiItZXpunmCLAvatlMgINIxnwhRouMKYNMXorcBAwYxBTmutRwlak': 0.0001666944490748458, 'CpawOSsPaXAHqaOPOiOdOeDhOvlCybFhkdZPhSalzrESKDNacButEEjQGkuxtJaHOAnOxeOJqbNCDBIkCtcYzHLaR': 0.0001666944490748458, 'pwZsQPUPiqfQQerVCjlKXYDmatXsV': 0.0001666944490748458, 'SzAuApnKUPutnRbWUSVdEaBpXYIBkoyJrljengFhZbRVNK': 0.0001666944490748458, 'vwSVVwxYVoOwaRvDvYIbPLMUY': 0.0001666944490748458, 'QJRqezEea': 0.0001666944490748458, 'Zgjym': 0.0001666944490748458, 'YIVZuaRAlTJSTjCYLezrjmatnlNbtAGyjOnJeGcKieqTZiMUBtodmdOBrIRfkWEuRKvjUPTAmYlpeGQERmIxuPIuoyPVa': 0.0001666944490748458, 'lmUAdFCOPmsHgRIvVBANhIUuDGZCsJNYQkpvdpHtXDGzdxZOydVYnaZMHRmewYYFTfapxBtDuBADhnqyjmEhsNLMwjsqqUtBZsXRMLRvvm': 0.0001666944490748458, 'gQrLpSNGhfWAzfZdncmpjAkoxHYIpzxYneXpwmsZaQOuLFaNtbYagCoPqROWNzeKytrYaoHotCSGsksoLsvOPUcRZi': 0.0001666944490748458, 'sGdPHGMSMjneOjGssMzoYqesvrxgmJJcawMuqvVbmGazAR': 0.0001666944490748458, 'gZujdWaldGhtGnnnysTPEOdHWVIJVXBiYjlmQgSJASjApsAiMxpMnUMFWwmEvaODcNlsrBrizkINUFodRYTkHqSzQetDHKj': 0.0001666944490748458, 'Gx': 0.0001666944490748458, 'WVcXWgTUbqAcSpFDBEQBMiqEGGZWMYsKzAgNBEUyfxlHkpqhcCrkUAFflcxgIvkHgTDOPMZryqcdqcawphEQpgHmSGCUTtlDPkiKvLfUdxrbGNKYRKJlhg': 0.0001666944490748458, 'mYUsbVKwbnlbMFcOfJLHs': 0.0001666944490748458, 'xwmgMeculehltfjQIwlqUreqwXeTNjM': 0.0001666944490748458, 'yHmvPUyNkDxEuvSOsvnERQoVPMQptNAQuZlGxwbzAOLJZuibyOIvAF': 0.0001666944490748458, 'XaKQwjdAaaEokEfHkZCqBcKhHZKYgPkQFHojOzkzxqmlgZlFChcFhAOLmdBBNyqpojKHvxDWgudwOpJkyfXutSuiaSH': 0.0001666944490748458, 'ZgqdRtSqkZLiNRCesidtWWflDKgaUvSqqzUvKdiHQvQUCDdqSqe': 0.0001666944490748458, 'LFFryLYJDvnxYZRIUBcUrIZUigowmGPsWwykTjBqxOvUOApuYfgxrtAPodXUFaBoEXByHwZQQcAScZFxAwd': 0.0001666944490748458, 'pcXVbuufebiafImHCPhFoVUmLjwBFsTIfKCpoVhjVEyajHwsmTUGPzMrlty': 0.0001666944490748458, 'vWYTBYcQkHMYvRXAKnarBETpavIuzXTsMNFwTvYDytIimpMSQwbQpUsChSpQURANFWAMAMHlrfdaPKmuXBBynJdqhxZjrgGqdploiY': 0.0001666944490748458, 'ptWoBaDeu': 0.0001666944490748458, 'hqwQgxBdOSYLTXCrHKNTMPUiBYieRfmbAmgFjunuLbwxHJNORvsMMKqCCZNASjZKkzuosagHPhXRCqTxWLouCVmFVcuXHgjxYkXoEvopoFfwrMlDUM': 0.0001666944490748458, 'XSuuVFORtrCrjqkBKCScZhKqDrrQLQVeXwmtvHNPRKIMpHaQuPIEWG': 0.0001666944490748458, 'sApWhIOMJWtPcurKDKkwrFKlJvh': 0.0001666944490748458, 'nsNqaklqeayCTPwMXxudHpwFZwePoJNpkFDTtrghhZCmbJzIFidvVYcmsRigUNGRmUgXC': 0.0001666944490748458, 'fdOwdMrHfXXwJMVEyLJYdJEcoMdNpzdOvkV': 0.0001666944490748458, 'HuYbyjswFAiOzhHOriLsxtCtiuAWZksWGqtkYcPKcRjZvrnSdjJTojESNFHyynHqOiYXRVpnlaRDyzruKqvGcRBrdTIDyMtaJL': 0.0001666944490748458, 'BXXzDHDbtfttPNgYyiwigRZaItWGVtTDJWuFdDpemXG': 0.0001666944490748458, 'BenTsXdJyBQymZZdHvtCfssYZhfXjhVpxKMiZbhmAgyGlirCMycLrJlHWyUrrjJrypzbaEDdVNwpmZrZKed': 0.0001666944490748458, 'FXzeJBWgqIHoBIarfziLYPubnJyTCijfvLC': 0.0001666944490748458, 'KN': 0.0001666944490748458, 'DivqyhsJxUmxsITHTaXbnnepZwZlqQsYLACxWCdrWIgqIdXeH': 0.0001666944490748458, 'fRh': 0.0001666944490748458, 'qdDBJQyfsjOQadHmsNpgSRVKUaaFbsEjsmJpOZraXKZhDfRTrdIfWhqKtDW': 0.0001666944490748458, 'JUcvZUqAekohhhChRvViGgDTJOzrLBkOMyBrRemkIkQORZkVWrdpOtQfAvObnzDmTsurVrlLi': 0.0001666944490748458, 'wNPxfhazDmgwQXukHpItlhG': 0.0001666944490748458, 'zuPwaAzDJRDheGDxRyeMxEcZcJuFLzjAwQccDdGGNLyadjYrvkjNvnQFzumrekqmNtD': 0.0001666944490748458, 'kwZVBBLahHLeurmrFtPkDNWJAouOnjtiwrkNJsqMPOHObSFGhXfDZFLZtsFGjzWWRzXWfQGradzSgnqvgiKlPomiBj': 0.0001666944490748458, 'YbUxgXRYkHGFrOMUBISJYEpypJOovdOdVFzZLCZhHRNmlreKFOpEOBfVezFxxgAabDheqVPMqTscxLuCLbwkzSLoqgzKAiMWj': 0.0001666944490748458, 'b': 0.0001666944490748458, 'aYZsGTNFcSlSUwoDqdxraosZWuTfgqueEQUrkxGupDuChZNbhvXhahTxBFDXlHieOXKAJaRhPJWTBeDZOhIgCKBloGwNYsSbjFTJCURCfejMOqSOJctJfEn': 0.0001666944490748458, 'GErJlcnJfgFdncoyKfCootjqbAUutfCeQHjOxcsAsHZiGhWPWi': 0.0001666944490748458, 'PEEvdeqxnfdcIcTfDQWPHNDDL': 0.0001666944490748458, 'UADNHEejAJZuRQvzBgOpnAzwTQGzvFWgtPSSnsSVdpAzjQOWGKBInPZGobPBoYJKjXYcBinOjhBSvahcfWKRxoFRXYiXUZzTmXVjBnLbrOT': 0.0001666944490748458, 'dkWKZfKhsLXSxVcFDuFxCghUyaQLwwTFubGBOcYJBiagdPbfJeVXQUnOUobuTCQlXwpFQyxiZvbmrJWgh': 0.0001666944490748458, 'qgdnyWOafyewBVSOpLQXZRUoSkoDlnUrSKjeaadWDjUMrADomlfbwWZlWmSrBHoUUNTBtIOTdUpZyHuIW': 0.0001666944490748458, 'thByIpcpFrRjYtahbeGyQxTeuKpWQyuaMGnJiarHHqEZjeaRUAYSReuqGHDOFYn': 0.0001666944490748458, 'UmBOKnkEkWtKwKaHNpQluSsJI': 0.0001666944490748458, 'XxYiyHHFNaNltNTbdEIGyoqDAeDQeDLeDWzNDNiqIUxvQfeKtkraNRqbIorMiXrfvFiKkeTuvcPARlciqQHqoyqItyvyqPeGNxJMPUP': 0.0001666944490748458, 'mpmLFITsYATYUuuUBdhFmGbyfTujlTDygkQalH': 0.0001666944490748458, 'pxdJFhMSqOTJIJDHbeyLUPcZmzGqXMgNZGxwINjTYVBeUwLUnWchgCdSWVGbqnLwFlihO': 0.0001666944490748458, 'qDPYaWddobwveOHbUALgfHGjykmqCxVvnjUIvihBfokuTTIaQvgyvlWBi': 0.0001666944490748458, 'ZpZRkzLqXoDtavAoPKAFLAiwzRnrJ': 0.0001666944490748458, 'wfxvhcEHrtCsxCpaBdPSmzfOHGCKkAytWOnvPUCaOCXugRluF': 0.0001666944490748458, 'OpYDSHxKAcCTeNDJIvdsseaYGykPvhinYVTiQwRhooJMAzAgLRBVPsmIPqyIAwpvwxqWzOCzBVppGCeJvqMxhXuEkOOVGyRhtcwkWKrCNvNrgp': 0.0001666944490748458, 'nkZmXDzvDdPInVaKrIUIJEbFEpYRttKmDMIJwcyXGPrjahrBNFGDMguZamNmUetSIItJseeUkYWbiLWhbuLygAmyduUPofLBHzWwBYrFCKr': 0.0001666944490748458, 'jyTJRszNmOZKHjynXZmQCvNSwtPsLPOBpmEGzcORObgoWCCzoFhUKqENpOniNzsDSDBbzdJNFaQLLFkHn': 0.0001666944490748458, 'HkmEJYeYeIyfQlaYavsKsSzAViUZOVzKwbBQUfZZAwvnoqyeHIESZ': 0.0001666944490748458, 'GNVMGBkjmWKyCPoFWONRwVGlSpUDhwNpffnOhfPDyoHWpayXrFIKycCBXznmHGnZJapyNWDzEveeKDGuAzisbq': 0.0001666944490748458, 'aWKeVlVROosRWLayVuYOUREUFsmkshxjMcXlSIIvDdCVvidryRzkEvXqEOSqYKjyn': 0.0001666944490748458, 'qPwCkoBiaZObUXrXZCzDrtEqtPEAMqOxxSCrxsfNL': 0.0001666944490748458, 'IiCUOmayPqCsEuiuoktvWkHTjTzsslQPkwJXhgBiiXFIEHhqPbrdppsleHjMSAFCnHRovfsZsOyOFpzQGxuyEOVEPBLyOFQjLXlCvYQZBOAcf': 0.0001666944490748458, 'qZsRVwXjMbIEZSZmBQCEFXYotAUAWMdkTroaQNBLChHMlbLExczUroDODPTdPUXmlxbSKHLzYWBFSVnIpubZadWslDSLoWNPGhJYeWOSTanYarnDuwY': 0.0001666944490748458, 'xDtVYmlLShB': 0.0001666944490748458, 'eopVZreeZPRYfGTbkYrTaOxgqgPTfyffJwfojwSBFUgkGkgxCgyVmLizMXJGtDDDgkMdeoxScoyFpOKSuNYIVfrtgJVvrKCsLyCDlbkQwgs': 0.0001666944490748458, 'qbvAHFXMFbRbOsAYRnFGgmFwwAXctdD': 0.0001666944490748458, 'vzcaQgPiFrFgIoqHYLXXmEOaagdDFcpWKPSdhHdXkciUGKzkJHFXFbhNpBwivaPvfq': 0.0001666944490748458, 'kgINLyGlJESdUlWbKfUrokHokPxxeNcpsPQQOwLNXvTSBibJzudDybauuNHkfaiTPzvMiuSexeFswVREFWErqN': 0.0001666944490748458, 'vmFKjWJQN': 0.0001666944490748458, 'SnjLRpZuhsDqnQpwzThyvbRwnQvtuQnkYozMPuCOgUOTXsVLRNqlRxxrWiTerIrJebOeCdaSyjrusGtMGzFJujHhNtSpjJMSDMUxJiRuUzq': 0.0001666944490748458, 'YBpgmdRbcTQJqErqjcuLoAfMJeamIEKjZcjXxrnnrYfakydPZEHAaUmMmLKnBvJrrEaUYurloj': 0.0001666944490748458, 'tObCEupixNACvHDxsGQhdKqCGoTSxFNbbcZynSzBTNWBgupxWCwdPZbafBFgegGoXSsNECudUmDasHD': 0.0001666944490748458, 'jkJfhcBPcTaNqITCUJZZLizlsFMstjvSjQxFuwHBSiFCVhPzKAIBHkKskDtqdLYlPvioyNStgMVrzypDxknAJQnQLQZeaGiuBSEkhbBFXZCQdNisNg': 0.0001666944490748458, 'LLiohsjhNAmiTneLODIBfJfgGgfArZNLRynBBVOSxeoOtaKwxpDqEpGDGWwsCAdLSjrkXUHEkdXepOpBroIAcKlwpbp': 0.0001666944490748458, 'RcNFUchBX': 0.0001666944490748458, 'wHfragemAxdXRAbfsvALHwYBtjseNugUpVOqKqkeywqszcpVqliHeUlaYMFvjAdOwUCcuOUQMCKxwKOXKkMdGlMLXVzaUqdIfesgsun': 0.0001666944490748458, 'ERNHWfmprCVovmaoXOmHTCDWyqHxyjQbIhaUQwmLVbuSRndnicnYPXssmQX': 0.0001666944490748458, 'hnoKZIBgSPJZCaSGUnvacBxnovaTyIozanaabGpbYeJJhzuLKnxmQgUllkSSRykvLuGneX': 0.0001666944490748458, 'jXfkcqxdsThpRsPTteHXJdmHq': 0.0001666944490748458, 'sshxVVZGSWFbNFYsmMNEdPY': 0.0001666944490748458, 'vEFrVFQIulsQU': 0.0001666944490748458, 'QgXXuoCtCZGIYMbiJEfCxIyGBepQJPJeeH': 0.0001666944490748458, 'NGAHKfAPJFtcysqGhuLgFJRmGGKrgxZHfKHQIpvdSJRkz': 0.0001666944490748458, 'qtryAdsLzyrKljUlDCbFIgwMEitpTYUIwfBMJbDhxZl': 0.0001666944490748458, 'XkemTNJEwNwXgcOsnyrbCLrZnZXuYMoZRedLoDOQHpQpaCIxdaiOVuouTseBjl': 0.0001666944490748458, 'umfvlFafDvuzraqDYXwEqXwxWzIekdaCYcrdILEpnZZosULqFHVyhg': 0.0001666944490748458, 'TqlEbVBelmbwJmTcKgSvRYHgxQeGsLPgUhfoqZGRVufunKOiMLoZhpzdHLOkiJHApEWtLaXsRlXRsYjcQOUJtJvOlm': 0.0001666944490748458, 'UoqJCyPkagGXFooqyTVzkGJuWRdrLzGiXfkYxsEpHYmsVRZrjrqEWjbbsnbercTZmKhBMTBCDghSNFieJGSwaXmDqwWmsdxanuOHmwV': 0.0001666944490748458, 'jCVLVVOONOzpzaBrXmEYFFOupzYBZxpTTfyVUWeUCBymhHkqBNYaUaC': 0.0001666944490748458, 'xviFXMvRvVYMRGuwuuksELMuJYJTFaVmSNWOMfhWZlrlvjemCIUGmFSwjsdSuWKbhKLTqcjhYmKOACQrbufBdOomsNrgjYlfdUnyfyqhUKmlSftEu': 0.0001666944490748458, 'blddXtXYxJoYB': 0.0001666944490748458, 'pOMRhzJKUGSXwpzfYbYCEeJXHGtCIICxaKQItDqQaITbUamhVCByncGEpzxCcHHDQmBoDQDlWMEQBUuflnKRfcaQYhilTIlsvIBSOjLhQFl': 0.0001666944490748458, 'YxphvcKesbtYHtIzWQxdNCyfkzbGEmGhpCfsOqReLTJnupLWwtbEpJbVDbcDkguHPhxHUegTIEwHsBYeMuyOgfpKrIVckuMGDvnDHBZcOjkhPD': 0.0001666944490748458, 'SwDJfsYFdzECrYqxSPvJQyRHblXQURBKKeRQdlbQwxEZSbWloaMSXdeWikgAAGDzJlnImvTeUPwNGRkrYvMBNCDBrVAnhwQYxHcoj': 0.0001666944490748458, 'QhMdsVajjlLSEfhhpZmTYRkUFeyNXXTEoaYHFBTRGdHoia': 0.0001666944490748458, 'wxNjySuQwqTarTMJJpVSVSKJaQYXNAeufduEtNzIKHWNejqOYRLNTofxDbDefpfCCBu': 0.0001666944490748458, 'vNsCWUUDGaOJYRyAMSuSCAmhsxClDQvrhWvgpemlJQjxGcnmCqLpBdSqmgpZONG': 0.0001666944490748458, 'ghwFdoEgGNBwXdmKPrJhKUXEkhaZDHMqhFWTQsFRCfPYkyyuEAGMpDTNndmsPmMcoqQjB': 0.0001666944490748458, 'BqQVmEQsoBHtCVpRrznjtjUsNGWLiiorWByGlaWlojSZFqZjOCBJCgxdhevenGRfvPGRBJDziYjpEIckiActQGp': 0.0001666944490748458, 'aSEnhr': 0.0001666944490748458, 'XQfijoKziRTnCnQRucDyVkrhBVsePCZdqybtiQXXppqPzNCfoUvXDKd': 0.0001666944490748458, 'ZIffHCOmcmcEKtLwklwwWalWeUQSCeYdsgiYzQcPIwDeQWEiOlCEpcccZaSkneCrSsLTqraTSLeOByJAPNsORFKfa': 0.0001666944490748458, 'JmNNyzRumXmONILlcsAxpJmdGMBOjSMoQXfTuFttpnkwDErvpPvVMprJYJoIMjdFtybhdRBrG': 0.0001666944490748458, 'YQvtsqpoxpCOELzaYEJtUcgcGKIabqsqyYklGGeIIXnqKjKKjYZwcvMRgvuJxWgVuKJBzapeVExwvYkTGoKkRmdCnRYihWQAX': 0.0001666944490748458, 'UDtDdrFTSOUqVTTimusISsdeJkAxtshEhIBEtKZAlkMFxAdnVncRsSTWLGCVMpRYjiRXQrHWToLqTqcWyQBjz': 0.0001666944490748458, 'gSyBXev': 0.0001666944490748458, 'wHVAmUHBkRWqtXYkfyqJqmbAffiLEzJJITENPKmGIFVzUhpBhsJKdyfvkmsrfHgPGIdafIlMOBSVkV': 0.0001666944490748458, 'ILbYZbxjNMZVVpfsfbBWsclmQxBNuDFANAXSgsCinIlhYnEwy': 0.0001666944490748458, 'ojbcAFWSXGHqsYdZjOtETFKJBr': 0.0001666944490748458, 'TlQXtdRwqaEPdBOeZEkgbczFOZPmLETphNqYhCXIJTYuornxrKkQtbjXfeDppAWZYatrnfFdDp': 0.0001666944490748458, 'RlrVlbZGloRftVNkSPsJRgAOYARjtshQVHlypLQNYojZOiaLCxfQFigTxAKZqmZdDvIqOvrXbgmjVEXRArcEibYYvPBwgSjxfhkAlgvEAai': 0.0001666944490748458, 'SJYyhATtbeAewfIndYOVzGjDVLjdpfQyqGRPHFvbXNANroxqwIUaswkSWybhWQCMhdOnRLtGukVfvZIRyQiYvFBgtdaEJQszCxc': 0.0001666944490748458, 'iZRecJCMwNEbsYNaXyuLnHbTNaEYqagQNwpoNgpeVucLD': 0.0001666944490748458, 'LJXaKBngQMmaYfkrpOdUTdHJMicUYUgeDPVVvdJASTPjKJhKSGwmEtJeWSbCgwOVMzgkzjPylkXhupVxcHTwqCivCIqyjR': 0.0001666944490748458, 'DPowFmbWEYlEyByfkNedTZQYBEEGLtKjYsNwDzSIvkXXvVXpUyvQttHDaOpezJIlMrKgUhSqUkunLOknjqauuzxswBrnLeqUfMZ': 0.0001666944490748458, 'vytkQcOemEYROCOphzqDFTdowpppDWwcTQOwgFSPzFfNfBSvblsfSfVSvztxJqdeHepGqOhTYDPNNhSwjlRBQkeOplI': 0.0001666944490748458, 'BawgXZTdmBQtcPibkK': 0.0001666944490748458, 'PhJJpuFRixtUqHsUHXwvZqDsoQUUoKmQIBQjtKMyAEyFTuMWmyeLI': 0.0001666944490748458, 'AcPfDORczUmxLZzCceFXQaNNedCkDpHkQCavZgtAkNFpadzlaGHNhPDBAeNKFSkMNrRGWnLXCoSncIKnCgcvOluykRsXnCxpd': 0.0001666944490748458, 'UhVmlXixsvNJGWWRcyKVcPBIKKXYNlLEXhNiRIENjgpAcIgzEtKyhyXKWUwlNzMejXSVaozRydRUcijtLjBwmZRpxVycd': 0.0001666944490748458, 'EmMHSkRiVpkUhAoVRDhBalhGHNnHuVIuDHPGEwATpxNPEsbnfZOORPJGcUvlYHBTWLzpgRqlFhxVDbtTOARwsjpJdLbqIAnyBZdzevWlUtiznTDVhlevu': 0.0001666944490748458, 'pqIHbCIRGKBLCHTWyjRTPNSCbnoNAQLjZQcufgmXmUrJRdvmvvaUFMONBPBGJewxlwvPanbDYesUZzPiTiJjo': 0.0001666944490748458, 'TLiFQcCUQcdBIzvaLHnFvIpPxCpcOkBtYhoeFARfsygTsGpdxzptoqmkdNizdgFrUaGkJGNPTHPuBpkxJwz': 0.0001666944490748458, 'GyVZCrmKGAsKrkzSumiDdvjajPmnAhuiGaAJN': 0.0001666944490748458, 'iqSTWkUzVLJHPflQqMRiPEiUjaoIuEHeupJcJJxWqnCocWOwjekwHkKNLzCeXEpnUkSkhuKjGHAasEVGbHIxNTuhjreTZGqVxCFgzZfLCstaH': 0.0001666944490748458, 'UcbsXGZqbLbehpMjXDKjKoQXpRgcaV': 0.0001666944490748458, 'iPWAkxqNbJnWNGjCOpXwZxBzMQxvMlBEgmpZVkrdfBqMYsRlCesrcK': 0.0001666944490748458, 'flGhFtFKprTUSHKVFWTTXoLmjBSbxYRQnbOQLOXlOqljOACchEFIyhCRcRWYKJPwUXkJPfFklFIcZvYHAlxMWQtVbFBMjgiKA': 0.0001666944490748458, 'UElqjUtMfONKiTHTOejFgkIuYiteitlxoLuyrwZqaaFNPnHQQZsAKWmbcuxPuuWCNvEjPpstRtcJmARAQD': 0.0001666944490748458, 'giTruKHRntikZYentvIeFdJoqptXKrhdfHwqnsZzHdA': 0.0001666944490748458, 'OdQThfxaMKpPQUBOJAEaBtEdOAaqiGOzCBvRJVXyBriaConOxPbxWFPsDANxyBWGIXvAJStZyPpNMmDdtIPkpRGZRtvcZjRwr': 0.0001666944490748458, 'wNExYjgaPvyvbkfjExrinjVbsNwrZVAdJQdtLeSzStc': 0.0001666944490748458, 'BTItSsWVpfohYztPUmqGfLPpNuoWiyuDTKekdtZzooAdvlGsCqKqlwcHRSZWSkbHZNEMNx': 0.0001666944490748458, 'XLlTyOHnWqbuNihEVjxOrTpUxyqVixdExodPkvBlLOTxWeJkvE': 0.0001666944490748458, 'CHwkSrLFrEMCQLEcUHUExRbPyWhwnlOzD': 0.0001666944490748458, 'jHyxKRoXlaXYyRklM': 0.0001666944490748458, 'sDwyGnHCfbpeFNRGqTlqTeefBGSsutu': 0.0001666944490748458, 'hYAYlaLsRsUoTvzEvZTTVwSTWudfwdtomUzUuawRHjeCYBKCWJDTwGKrAKMtYaKQYhBXVJJPzrzvHEPhmspJJZWHUJxrNDfLygjawIVAvLIyxzCJYhucPv': 0.0001666944490748458, 'OmOlrvTNLvUgEJxPIeNJOJGVMRrBUhasPIMjMfSek': 0.0001666944490748458, 'WmEGpwkIyppDgEnVvHdVSCzFoYAjP': 0.0001666944490748458, 'mRVZGChzuvNVkSkrkmjqOOrphOVkrnWwuwRZefnnBMBWeyXTkQNSklZsSfjXdnDgGxIyIN': 0.0001666944490748458, 'ORfsfzGNDkqAwwUKDOjKdQoZaExNPUepmlgyD': 0.0001666944490748458, 'keJDwUZJzODmqbRMYPRlaNnHZXlHsZIhaeaeatXrpHeQBcipshjiUA': 0.0001666944490748458, 'EROgaADpOXQZCJutZegoJKpNvsKWsovPqzwIkEEAecAJfthGzKXoI': 0.0001666944490748458, 'WxYuDaKYKQvQLIWHrREuFXeGuuctTnRZAktoQktzJQqoOVAKYyQBRQqQvbVjcunDuAqNcqYexLUeYXrSBVJbhn': 0.0001666944490748458, 'yleYDltuGnzqOMmomdajTiflrlwbdfglpVJGgLUEmbzfoYWqLNHwId': 0.0001666944490748458, 'RylMujsmcirWyJ': 0.0001666944490748458, 'MtzychtgFzDHFaQmMmzbdqZKCFeQghEYivDayZukNdaKItfnrACMRZBiKSPtDvxeYEsFsymLmdIOBlMwHBQUwsbDDqlkUlDtcCVQV': 0.0001666944490748458, 'ewpolieKrXxxTVkxZmmAVnkuBrQAiuQaMUwSxyuOFRhNKeIkAZnIRvdJuSraTfRLUwxdiVJBLMDPiObLSVQkdejDfRIPYvZbWewZACVGhuK': 0.0001666944490748458, 'RQIsMQVBfPhMXpU': 0.0001666944490748458, 'IkqeKYSNaHCvfdEOHGfjVXZ': 0.0001666944490748458, 'oSozdZnBCUqnebkFTQvDJINONBqYBHSVBJjeKbdsjdeVapLxRkZwpIbyOkWqdciQonGqdCqUuRFQWmhIjGVtXw': 0.0001666944490748458, 'ajYSVobbUDGxyiRZltGSgzUvccYzzQyIQgpPctWuQsRjqQGfJxwVLbBVvNQKxOZxdiEAsaODtXezdrtgeCvVrznWmObOPycZYeHsO': 0.0001666944490748458, 'UG': 0.0001666944490748458, 'pwiaeFVGWwnNRZCAhKmwWewilODloTgkdfUPnOKpCfxTSQIuYoIoWckyGyGcgRNaHDOyZVBXAQbVMQuMKjgMdnGHVlaIugjNzslYRqlpaunDKJkom': 0.0001666944490748458, 'RIvBScnElYXYMkVCjGsUepaObXPLzYXZbIiBkCCaLkwddyWbRicLhiylKLoAAODJk': 0.0001666944490748458, 'RhzNOlfFiKVfDYvzBnimECXpGiIswtDAURHoMLgwtojlmfpgkbmqVVBvPadOYsEGLuPScnajMFLbmmyYIeevcWNlHiaXL': 0.0001666944490748458, 'hXovtjivlcKWOJSmXDoAPnoyVKrhoEBHESCwHUAWYRsLannCRjbGl': 0.0001666944490748458, 'HMAfWaEOnGBCiuKjbjXgzEFrYbIYksqdKgArQLFeERoFkBOSsikfWFJfNY': 0.0001666944490748458, 'QEisfjwRzKiyQoyNWDeGYJizkQUaspehchTcxlJoirIsgABzZcxRzPBRlUzhQBFugKGcxrWaEyVQhFrSScFBvXXsxElUhSRkLBiPyafCrVBHwRJwk': 0.0001666944490748458, 'hSDuMsSoViphEeXpQwnSkHh': 0.0001666944490748458, 'AwwxMpZNpPTfGosPqImLMdNftyTBeBLvQZEqklWtZVFinGxNpJFqUUwTPQ': 0.0001666944490748458, 'ELmDcttnEazQkSyqCzpAoguXLPNwuzhbztuFdWOsbNWIHeDGDXyGvXiUygNBNcqwjAuXyrEEoZpdXSaOWkCUCLEVBZg': 0.0001666944490748458, 'hGkIrRBHJudICjRsqdYeUdTwvtejTSiNvuQsEYAMolEjfFmNbRWeUnrgegFLMhQohjslXoUqnudIRDZEJLkrDZj': 0.0001666944490748458, 'znkheVcXmrAMXWUdQssWGHQCcdcIYlzAhUimVlTluVdsynyglvMOSFHrgi': 0.0001666944490748458, 'KBRxeCbVTCr': 0.0001666944490748458, 'kRClRpXMxGF': 0.0001666944490748458, 'DUNgAUaXwgfrGQcJgPTQTCVGnVphosIECWeblDuIlIWGwVbYwqZiOnvrRmmwds': 0.0001666944490748458, 'yjDKzuHrXCBTtNMfDNMOoOu': 0.0001666944490748458, 'XJEwlVaTcjWKfhdIdtDflvryGmzqusaKpBztFjSoKwtCHDnBdMsrKPygFKmTgSxhHQruZKPpjVc': 0.0001666944490748458, 'UGFjeJIQEaWdIjgBmHVCQSUJldfkQEwEvrkOPEHlqbTOadEnwsgaYuTEyqgTbmqimYYsHwrREGczuLNfVv': 0.0001666944490748458, 'slkYwWknLUDuhJKVgrtwawCUFjZGTzJvviqQFMIJNhSZqzSCBsiRYxrMPmXbpO': 0.0001666944490748458, 'qUWWiODrhiPqpoNuKGyzqJsMGotXlNmRQpsBCfwxWhpdwxWHBkQYhlvebnzfmdjeedqHZQKSMHLuYdlpSTsPEaQmsvOmSIHDZ': 0.0001666944490748458, 'JpenezrGLrtfSIUYernXZDyrbzUEtOlupwQAbDJeteqFaACZQnvUZclGSKpCexEPjGLqxwwYBeKpuaNkeFAojWzEjMdZRoseZHW': 0.0001666944490748458, 'rakBBbPKdrAFTbzUTFhmEFOGSIUPOLoCbFfOrQbvCesxQtbrOMSUxAervPUImTkNnCYGcfMfJBgYnpc': 0.0001666944490748458, 'TMbcEnAqvrudaPfOJcetLaJFkixSNiiLnTGfcEKYaOGiJFOicdBFnuzIzotWxwRLspRSXiLYSGOwIFZYYdhrhAKJeshMWJpPBWABMTPfntbCyCvAR': 0.0001666944490748458, 'dpxEtHULIEmlPMzWvIDXFjimwejdbHbObbjgKUtVeJntrWVqoANrWO': 0.0001666944490748458, 'PwKlhEsmFnqFcNkpqLHYJlHWFrBcabyZdMcdYAZxGfd': 0.0001666944490748458, 'ZERtJUxFfPmFTZlMcdohqeWTtLwwsMNgdAodLzbsZxaOlO': 0.0001666944490748458, 'HIbIfnvdMnywHQOIEgBcOEDBKrBlOBDDuhZZq': 0.0001666944490748458, 'NwyqiFMZSktzQYrqLKVsbsKsLavIOrfriWVOkhraGcuMZWVfztMkYtp': 0.0001666944490748458, 'DLFBiBWkWXUhWfLuLURGfYGkSnkAQbtIqndXDLRXIQaytWyLpPzTKjBDXXWLGMrabMHswKmbzYmquWawdzxva': 0.0001666944490748458, 'xUaFkhzCXxcaVRtVUawlnQt': 0.0001666944490748458, 'QgmoeULdbGHcGPOjCUllkhUthuCCrLZrBtReRamDgjfVlSaHzwlGGgkkUgPiageXsEbGftBpQRHfNxrMtEAoNgEvyqnoiWEIgbZedHLLhrbSpRyBBS': 0.0001666944490748458, 'TJdQhIreAMxTFmmWYVflwzHyWLBuanvQTMCNLviHMBNuRAQIkVNukyXRYGgHjBIAQTxHgsBAfIDCcyyBNMjhsKwmiqcmrdHuagIDrBKElUUFxb': 0.0001666944490748458, 'wepbgCTRkVsIRAOcsuIVzdWXrnJCquuvrrdbHtejqeKfEEobEmayKKgDGZJJuMmozRxjaqlLcNxAXcaFWnzVqfSCEWgifEtjBnebeLQmhaTVN': 0.0001666944490748458, 'c': 0.0001666944490748458, 'kmRLewOSfoJwBsCpRJbYdxyKTL': 0.0001666944490748458, 'WtXtxopEiKZHqUYktZkiyG': 0.0001666944490748458, 'lLvWf': 0.0001666944490748458, 'MYzRPcfEGsMCqJXWRLVzbZpvEeYYhkEqqTPdbxgool': 0.0001666944490748458, 'tWlizBQTkFSJLL': 0.0001666944490748458, 'xPXexVrwkySrOtoIlGOphWZJxuwsUIJhDlKfmTXJkQFWPSFSHDhstvs': 0.0001666944490748458, 'jOtXuFVHUJfdWREzZSnyDsfIYQaFZOOgCaIAduYCuVahqujXvPPGY': 0.0001666944490748458, 'ElMrJMFrqAixsmwsWIQsqGwnCKIsqfVfQDjuDxQYylxhhoceuMOskHilAzRealgksEebPscNAUPuuatQh': 0.0001666944490748458, 'uSzahQMhreAKjGfqYyFVmmzSoKcNtdxxwIzvTcmvUZVsSpwSPAfxEBoBrlnqBkKtMZIrORBICYo': 0.0001666944490748458, 'AmYquvuTCSIPvgkNDBcmEHwWHgRYjVsStxQeFQoWJQvzcFtmh': 0.0001666944490748458, 'ZROWhPlTmmHPWKCNUpGpUSSTWHXFJojdRLJGBCaBVtXAKKzeDqXYXyhXTegnzbnYNU': 0.0001666944490748458, 'qJcIXhUHqzDfPMnZeDEiaJwaDIvTZwwrljeIzwwMQGisiyjVPqayNFlWmXviEUDiAySvQSEZRvQxNfQxWADUNcUXkiIjJTCLpwDQcPq': 0.0001666944490748458, 'FFjYIYgUuTnLUuVyVNEjzMdvyElIWjDJtHrzIMjlGgalY': 0.0001666944490748458, 'NFpsCMRroWhCMAUQJGCNWFPKJxuMcgWsEeUpfeNsCUawJFLxVnuaxcUimIJeZstVkbkllGnrJsq': 0.0001666944490748458, 'ikgYGariQXxnwMQgCvXxOypZNHTHtyQGTUunBZksOUKlopjpMxVPoFezPpzdTQZNFjsmkCnFREGWRNhjUGRxSQQIdQgpIsQSNG': 0.0001666944490748458, 'VYPbbAsyRPJZxDhkWpWXqdtgoTZVTICKQqasfGOpKaeIZoveDHXhrcLnurLOsinCeUKPXBVxTunypsthgYnFrfqFUweoLyZ': 0.0001666944490748458, 'npjraovOpRNqffmHnucTylLMVypaFMhQP': 0.0001666944490748458, 'jDYEMNShZQwiDhNGRbQFDULdVWqIfaSPOrZRhNZYsltUEmggLeFykBLRdOAkZOEJHtrdXk': 0.0001666944490748458, 'WDfVmUUxqawqXxRHLuqoNxFEwJBNQbeXhQNPLHzgC': 0.0001666944490748458, 'FvIAlLAClWzTKqADQTbsLfmvlsUxfuoEfGhFACruq': 0.0001666944490748458, 'BTRJhDuyRVYdtFPtNetINcAhyROxgxUAqQUKuJizfGyLihxKyLmecfojLTarBaksaPSYmjvQhpSucSIpnctDjXveFkLqeR': 0.0001666944490748458, 'vugNXaCapNvkgZivQOYHHnhzEJDUtHxJTqAmmQyaKVAKMYUxfOXlgNexUzKUsJMnvMZzkYaWcbStZFWsAHUdGpK': 0.0001666944490748458, 'VlTgnKICQhclwYceXRAQeomvJMJtBnDAxvbZfZBCUZXeSxIZag': 0.0001666944490748458, 'caTyAyWEQPDgFORMwaQlYuxJlRsNdWiACgspaZXJnGgwroBEqqWpnRTNgUEyHITdc': 0.0001666944490748458, 'nfGYyaNHullaFvehyN': 0.0001666944490748458, 'WnGikLOLBpMGmcfgAnnixTUCGCoYDoNEtDpqIbgjTBqYNvYsvOCAq': 0.0001666944490748458, 'gzhIBswzCOhPKcobZMcWOvuqfPZXmzanS': 0.0001666944490748458, 'jAZszYYRCQRrwyHuNljpHiStVPqyjgyyywkMtWKIFZVyKUQmmbmlXNSCdPkoELNqjPXAaifAPSwZbCNpTMKrMxdeIkaHKWXwbQxkKa': 0.0001666944490748458, 'ZVjGfuXTQqCaYhrLFqiiCdXGqFpvIiqJgvwOdLoXAQZfAzheSPttQEkwfyIEKicshQYUS': 0.0001666944490748458, 'ctsbCfQRXeTTuTwtjXnPZrtGSsyeVScUlwgJzvgVABiiVTIqkJkBYWSaRjwLPmyDdSVgeHGEISaHTVKDq': 0.0001666944490748458, 'YMRJKeYbOtMWquRAbPdVQivJpsOjvLuUijvXhZoguoDTiBDXTAZbfVWDWUAbeSxzA': 0.0001666944490748458, 'd': 0.0001666944490748458, 'spXEtKdQengtOvnWpkpVXPDNQjPWuXSymtXphoIPDtlpssQdRTBnNsQHjiCYxwd': 0.0001666944490748458, 'zUiDmJpXNARqJGkJtzerUNxoLVKzNlnsunHNicXFLnbtAYVaAITGywKHwcZmzNxSWNs': 0.0001666944490748458, 'xWBxoePExYCwwbdDrFkMfBayJMfYxvMhknsrz': 0.0001666944490748458, 'Hp': 0.0001666944490748458, 'MglxNWfDiPzlWsXXTRDthrg': 0.0001666944490748458, 'NGqCoyevjCqjaAvGjOIvgsyxQZakWuPkbVJICDOCDhtkcuKQuKrcUX': 0.0001666944490748458, 'xDXStmjoSbhjnCvLHaffygVSTBIznFZNkjwPspKGzywnbLjbJe': 0.0001666944490748458, 'dUhKYOEmBkZustwWIfwwHiqxWwJdlHxdobMGMXkQROdRXUwEgeLybumbxVaymElgPSulrlozeFmPYrCtWo': 0.0001666944490748458, 'sAMsMzdNfmOIXBjzraKdRSyqUgzPuCjsEPsZEYuUDnFZLhOJprFADxEZHnoFWeoMcgVcJZpMtyjfklhaBHQNIfuoLdvbQErSlgZoHzXohcVRTkA': 0.0001666944490748458, 'yEyhvwjgwhrtXkSAcziBRkQNGDxxYiryqkAaVIoxxHVvwjPMYNdIF': 0.0001666944490748458, 'inUNWugKGMNlgmFqVfxuMIrvlZZvgOjTSbCQILtGxfDkBLB': 0.0001666944490748458, 'wKxPXKOkgcDWZLzTuGrlLcXVcSajLBgKuTYOouA': 0.0001666944490748458, 'HdckZPy': 0.0001666944490748458, 'PkSHihzGlDsOoXQNEybVGQmsJwbmkDQKQrlWrKXdPrMlHEugsNdXfYcUYqLAbegNNiZYlaAfu': 0.0001666944490748458, 'AsWOsHfLCXBVIpdNYBYXBFKmGMCuqDuElwbdWKmdRIlyyeEQapqtTMJpxwGkDWcTGBiPlaFZbBWNrajaJmLEnHBMjRXjKmcAshePLXPCX': 0.0001666944490748458, 'MeMncyNSScvpKElBYJivwHeExqsjoQXOCQYHbl': 0.0001666944490748458, 'BypRYeUGICdeqVOXVvPdkKtvashYeaolqheciYuBsHhBRQTaQnZpDRTNDWbnwzV': 0.0001666944490748458, 'bKDhVLqpoddCXMpaBEfOygdAYqwEdv': 0.0001666944490748458, 'DrRByLBaDKpbNsrZDTDoIYTOOtHKhGHgysRluFmJyOEQiMEcOmEQmacBbMAQVxcJrGWmyKjSXAsMvvldcWHwvEGBWcoVwpcDMhlWHoIEZTehEOEmSn': 0.0001666944490748458, 'hvkCeXfdYOXVwshWjYNETwkpnWmWAAynajztmiebADBWmwiBcPgObylHYpkapt': 0.0001666944490748458, 'nClKuApUbIwSXRrPhgZfsDnsmhWuqXMJFRnEBqBfSauYIqIIPXIHqJDvglXFNEVKdK': 0.0001666944490748458, 'jByqjNBHRjWrSsCwbCANaODlNWilRnXMVEJLPljwIuFihenvluyHXJPQfshiNZxhjJwhfDxRjPsVaqhCtPdvypsdjMVLJFk': 0.0001666944490748458, 'BSMbxgRBAQVQNPJctBLxlXKsTPZRhlNbSEOqUdmtWfqRxRgrsQnBFjnBPScfrBnSRKacI': 0.0001666944490748458, 'CDjgERmMKegjdUtHDvnwTZVkIjbmtVkBWGMAqFcKWJFMLdBbibxfNrWdJjwKhvgWYkgauqYaAgopytc': 0.0001666944490748458, 'lWxSLIyTCqbyDYYUaGrvuLDiwiAqaElfVOYsihKWbmWWZyWCZvSsHzjsCtnnpXMzDXHojRURhPhbalPpkVm': 0.0001666944490748458, 'muehePcNMpHebWBQXVHFOcNKBiTSCqrsAvgaLqqgcMs': 0.0001666944490748458, 'FQJLLYxESfCgyxUNeHJLcTZesOftBZ': 0.0001666944490748458, 'wjwvxkkVLCEWPsgrNdVCJgPsWZaetsdhYxCWkkvEUMBezNaswF': 0.0001666944490748458, 'fXHzgxMvDPEEezpOazAgeFRZZCihULZNnIQwySOMdNPgMpaGBQRkQQkhYYmpxTmnl': 0.0001666944490748458, 'ezIUCejPJMenHOBrlXsEcXRbmQnxaZocUMuJjPzvEMbTR': 0.0001666944490748458, 'PeCSkgH': 0.0001666944490748458, 'PLegYzOksEzgUyjfdxjLVCMghrXzCWjwHnzMOiySjKtWsFyDtlLEEVSceKPrkcQYGKDazBeiOOcfELacXDqprYGGQoVYtgAwz': 0.0001666944490748458, 'anQRsuQpwErxkdxVJtDlWAnLMcohYh': 0.0001666944490748458, 'bknXSCrmDKjXUssqsSDpwXLKrqkTqtIWVKwLIjZKocusqvfSYHw': 0.0001666944490748458, 'yvTLbbZvHkEdXxWVqgoJmLdjbaXUcETXuGyNCDgDtOMTiGJfruxLyo': 0.0001666944490748458, 'zljACtnPrnxHoBNzrgsMCcGJIttvegOEEaIMrCMiAhWKOxsfPYPJkKUtumaKRnpqzEKYlouojhvELiAfqnKUawCHVqujOdIZUtLytDOMrXTUGItwitGBwTn': 0.0001666944490748458, 'aFpogOuwygjPSLhBAIjkUFuFWMtTMwXveBCJbTwRUJKNfDMpDPhXCYVTDHWjUTUBAZFFONjkIrDiUoGPBiPBdpnCPAOIUBNSAZzwYXZvzCRqL': 0.0001666944490748458, 'KKXpkbZwpVzYwysFEAXmyogXb': 0.0001666944490748458, 'OexmyqqUuAjyeKEOKjiItxysQKsVmNfggQjyVcDiILWSuKZQWwrpeCdrmYXrrFxRaXLPAtuMYvgLFEgvPpqqoL': 0.0001666944490748458, 'whBXgizZILSYtcmhdkZBTgRpyPYXhwmwmlktTVpKOaFVUNWgSvblJvylmyJIKowmT': 0.0001666944490748458, 'DUxpyGEhaMAojWstgWLJhJlXMDfpjlqjRbIuycUXEKFeciPuJWnLfrafPZUqVgclOwoAgzjMggmsJbZAQNMmgYxuEE': 0.0001666944490748458, 'YtbUMlXELCfkowZ': 0.0001666944490748458, 'pYgWzcWvm': 0.0001666944490748458, 'MTbSHekiGiRfFOlfgVEoxDXhhpmekrNYmQftLCpPcEwGJcerEyHKbsSWJvmtpJnbObiuhjlwummghvwgWUUbinJSEtR': 0.0001666944490748458, 'sjspnrBMstmEVRsYzmZsRiGIIXkyxkMXBiAgyNBbhsEODZsJJlNlfdItZs': 0.0001666944490748458, 'sahTMESALWsRDRbfLBBfjdgLdGukWZSwaGLlIzIDrdcAnszceCIZUGywVkgYzTxHKYXKnEJSZSl': 0.0001666944490748458, 'ppgmCuLjdMChJSnUYIEEOqVAIEDGTgbTaHDcaGYxLDDQG': 0.0001666944490748458, 'grQkbURDLuCqvoyKZjPTxVDPsTFgDrGGtsYAelCNwgfbCGbpdNYnAdYnSfjEGayWkMwNsIjVKl': 0.0001666944490748458, 'rGkaorYaW': 0.0001666944490748458, 'ofAwpTnvKQfbAbLToissFQtaqhCWaVwiquKprZundoOAgwJpNLCybgtLHuMULzDasgbJwURhjQHEtuscivoMlqHAkEYeoBcdWbxpOQcGx': 0.0001666944490748458, 'zrbvnQRIBgfOGOcVqxVnNqVfbcGPAAyRsJFpafzcArxIHkwiAlOZnmtZuJxTJUWwAuMwRnWqViimaBGNthORieJef': 0.0001666944490748458, 'bSmfQROcxaAnUomZrSnSsdOAxMbHeBjbMFcdaWoWmnXFQAGeVosPvlRvgEkoKdyRLiXPohIMwYwfJFdNgavgkqfHYVqCjYkBFTL': 0.0001666944490748458, 'RjSKumietOzJPERrVSKSXQBqJXJaxbjAqvBIdlEBKFDewAcvjuCPuerLmaAGEalDlibcNmqveZGWMKOzHISSLvfqfxbpr': 0.0001666944490748458, 'zwBQnXnlgmXDtLiEAjWFwKtdSvnUGDaec': 0.0001666944490748458, 'lvcNHsjUrIqxTlgeRRNFggWDbpTorCgJEucjKqOxtzTICjAYRYIFPv': 0.0001666944490748458, 'OfWJBrVDgKOZpSimlhEDPADfWOyLDAPFlDzAAXpPDsOlICQrLFmghIB': 0.0001666944490748458, 'KYnzNUPlfNqgyqOroTQeIYbBMBTsYhFKyBIVQxfcEVdWRFfCLUPnQxwAkvolBdBodUBevVfSfLuhvGpHLneTiL': 0.0001666944490748458, 'ypBsUDQYvzIvVUnvLVnqeGCYHI': 0.0001666944490748458, 'TeHygxKkQmutoN': 0.0001666944490748458, 'gNTeTiTtsgKilaGoMoKoOeHXhecqsRSfKYlXxKhyXOcyDONzApOMoHTnrFWSfHpDADwLczdIUpEXAyrsvBTsQJrKmjhnd': 0.0001666944490748458, 'jCALeRGnGcURjlWoKbbVtvzMwbdLcBrmUkiByWOSIzxoVGBmWFelDuMbgRMNYNcqhuxZMRMqNBbUlgCCVWpmkVuhwHlrftzLAhypulSXj': 0.0001666944490748458, 'FfcmbLeYDpNIgTWVNEVhMKlxJFMTtLaiqcQJiJKMcqfGXCeFJNWBMpkWpZYEHaXYiSCYRHgkrSzvKSUqJnoGk': 0.0001666944490748458, 'FNbFSucTCZxMQTH': 0.0001666944490748458, 'WzmqPDmacWNwnzGMnJSmmnxaxDbVnjAyGRwzWanTltICodDnieyKcipJoxBdDKsCPspixWZDZqcnHTTfNMKGPsFGjHWaBzCwtTkEreqgjonqFqblfW': 0.0001666944490748458, 'GYUpwYFDqyTeAvzSTUQniYUyTccGNwiTgOMkHGIYGWrTPZCNjRtddqtdYLyRVwmFFBqMwgMrJVfwwzNSFYMmCORIyvLKdvYbkGWhJXqQAwrXpFphvY': 0.0001666944490748458, 'nZZGrLCuwuXYtyjwkXIRpcjWANFVZsfLTUIcHSfrhxkNoOtoAmSkbUPPzYqJXTnfaTQouRLKmUzcnbVPfsoTQcvvn': 0.0001666944490748458, 'umFYNsrbtNiCSiakhFHHlPiBhhDFVPaTOtvUCM': 0.0001666944490748458, 'jHXQNRvyKywIchsvQuweFPINkXqjvdXDz': 0.0001666944490748458, 'MkCExREIbIhHSEjdwDsueafMOXoksJDNloLktnZIaYURQmGfFMHlnEdNflRqlQCgygeALWUvKhUVZFILxUcLMSzWMAuAIjxnwUpPxGjjjUMTxsDLxJS': 0.0001666944490748458, 'gNfgGafOu': 0.0001666944490748458, 'GaUATWIcOuGMlqQviSEMtauuyNMRKgnTbzmlvmN': 0.0001666944490748458, 'caWxgtaDuiiUkyySdwJDVpqoOLVWOzriGsmSJpAzLVkjqkmmclnVVVKTGpwTEacrW': 0.0001666944490748458, 'gLosRGfRqmumCkazdJgUSlCKHwptCmXtBuEhnjjPLANMyKSFtlR': 0.0001666944490748458, 'heJiuerpuDyCUPpAxMvnXRIfExLDqJscMJGJdnbTDkXDHFesHgzPOmUCXukBevjMyNYyfobxmKBsJvpixPOXNUwYCxnQPCsnDWzlgrEcziUOavkalalkTum': 0.0001666944490748458, 'fXxFvmzUcCskodcSwpMjXCODexYBzCJvplzar': 0.0001666944490748458, 'CdJsRpTTotEQvygRgbmVjvNuXbOlvIYBhTLSDNJGOpnopEflMlZxajGQRthPzV': 0.0001666944490748458, 'RkyyPKjNHPvBcoOuNisRtOBiSuueBMnEthRcehqAsFBtF': 0.0001666944490748458, 'UfTRGFgFzrcJalHGGXHjiuccvwJendaDJXZBrpOMnZcDCVhpnnxhOEnrv': 0.0001666944490748458, 'uXxbEVnkhtzqoVtKdOkwEnhEHgqEqvQrGQBfuKiYToIiwLbvKvMKcXffXHuKcIEfcVmcqLWglFGTzAvhPMlceSlTTkG': 0.0001666944490748458, 'LELZTFNtFeTjyrVGMunfEDpGMvkqhU': 0.0001666944490748458, 'fvByypPSUOlCBpSeuGQGZTXsaIoOFEejaLxiXrhJTIKqoaSXUailQyooMUKDRBUpefkCHpGuwhvNBZwHLsPWSvKdslprRSUcMYZZXmFTd': 0.0001666944490748458, 'QlkicLHduBCfTWupMcBpLqk': 0.0001666944490748458, 'rUXTyExtDTgYVIzVBEDJRP': 0.0001666944490748458, 'sTUlPpxzHrUdpceQlAKkkumvsdShdKG': 0.0001666944490748458, 'dvqQUJjLxmECdTQgIYDaKrptSRxTcwlDYYOhAIJbwccGQrWZhmChfAsutLKrdNaPiNaMQPpzktwjdzIOHrMpJCfHsdSsdnmggWcBVfvYXPEpB': 0.0001666944490748458, 'eraDJSJaSpjXZdiFzVBOsyDpdvuuoLTmrLFhDRuhGMgyIJwQandrdNpAezwoplO': 0.0001666944490748458, 'aJWAGrKdpXIhTFl': 0.0001666944490748458, 'IFwAZRyRgZhajtyOBcQpejOMboHSZOqWggScJuXJcxrtsfqUARIxjxUGPSyEqzRwPDMAOKrJguCjQNo': 0.0001666944490748458, 'fEHPPxfSySYARKUFsSEaoIrzutiHPBjvknoxtqJRxxLMbrQOLtWbpOUtTETIBOfFiuNtFARfZecqqhxPszjVORNzENkQEyycCtnlRxpqcLAzVqgTv': 0.0001666944490748458, 'XdGXXopzMruXeVmXriyIJveOudOIJIcskd': 0.0001666944490748458, 'JhSXfx': 0.0001666944490748458, 'iLFJlJBVlXdUatrysAuCfODOXXQFQOJPTiiuMweycMvCDOVqJKeEDjbvrOwIIQVYbnUbtbDhMrYlUkfmyGTAEDDvItXyMWXomvwakqxwKYaDy': 0.0001666944490748458, 'BlMEBglTOEXDQlnPpbIslcbtZUxDTbPwtPzQLQDrKhGIgEALJGZQGJCEposRhadyOCzzP': 0.0001666944490748458, 'oznjJXGrRFjjvlpQyYZgVhQeLqpmZsEuXKHenHswmzVmFaITpEIGGWVimzWluSwXnLUaSmvsLmgMlnNuHjqTvLOGMknpjjLjeedwFHxMehXcqIOPCLpFT': 0.0001666944490748458, 'jUlZVEoWNoEmpOckxSMzIOONmecYcZXvIjNgiMYjIPuXIcmXaaVySlaNK': 0.0001666944490748458, 'wBsDameXgVXrcENqyrNTBsYOapV': 0.0001666944490748458, 'eIQSwLzlgJhLnZxgPiDcysLUfepOAxsqsyMqcsNxRNLeNeZSjSZnUhEYUGDQGOHMtdUMVJ': 0.0001666944490748458, 'gKwXDEuIdxxzesuUaCSrVaAXtXlOKjdCAYAOkUepwAHunnRtoeEMMsZCNqLOsKHmXkZLMViqNMElfHqXvEDkSjmXOgrbhJDdVugLfC': 0.0001666944490748458, 'DOgFjjABTkcIXlyNZOzOprAeWO': 0.0001666944490748458, 'GLpcQmksApmxcFTKzWnhXasxzTtoEd': 0.0001666944490748458, 'vgCsNgKTJYHXkYI': 0.0001666944490748458, 'UUFdTsAjPEMSDlShmrkichkAFTuGqtxVjgaGdUVaoGxShdPxaqKkQfcTRorQmdPpTLYmlDXAtOLeFeIpuprOmySDzJCmqirszi': 0.0001666944490748458, 'qOowKkeZhzVRZKBrljgersSkfmmDrzkArFPdZqJCLXqDmPOUWFUmUrPifwxrXEQbnAYZpsQykCBvQiXCjjpglpRLYwvuxLrKGzSlmbW': 0.0001666944490748458, 'yzwSstETigreCzNUvZbhXTdADidSmVPIDVZcwrLKONREEeUhrObeGyhklMnUFeZNeFcQppVYIJuRujqbOoUefiuWgnpldYQIUGgDedO': 0.0001666944490748458, 'GwszIBGHnouYcYLhjwTvezeOrrzZtIPqBGk': 0.0001666944490748458, 'jHFcDXMocHWCLujRyH': 0.0001666944490748458, 'mFLDLklISxRIKopeqIRhmvaeeCswLCTCNJeiV': 0.0001666944490748458, 'jKkFrQvKlsqaJpIJDynIuahFhtpCYAaMqzOmEsDIycIUMOTRPGSxTgipGwpNPIgVYrPZbmPyUMHiohwyzHIePkYKzJOKDEMsiNsDmSOpTQTCia': 0.0001666944490748458, 'TNObtYTSMBwfrsYXXPcwmBySxeltcwdyUNwrPzHQimbdkeeNLPlzCmeNONtkIhEpVbxyUiZFRqFSFKRPREBvSLDIOqmOblf': 0.0001666944490748458, 'ZdLDmqnPBB': 0.0001666944490748458, 'FXpujnjZlZwBHoWSZLMzqZorkeBMwlCRTNmmptorreHbUC': 0.0001666944490748458, 'KETDDBvBnlwJTSSieSDdRlqRDQtafgJtcUuYstYhiiaKLlXltNuZzHcoByAyExHvzrurXkCajUcMUWZqIKuvocPPAhxKJvKnysoZFGbFhXbcgNG': 0.0001666944490748458, 'tUJivSqBtrzSCqnRPDstTlAPRKIDEnKYoOyYVGCudZoXMdwyahCwSROhZKlJVCXZGPIQQVrkmGCjwHvrGvVHGz': 0.0001666944490748458, 'ABgzOIvbuJjIzWHNCgQQwbtFkoILJAZDmsufWfJVsUN': 0.0001666944490748458, 'JZMHIWZoijRoNHbuTRxbNbfKkVofFrPiZgfwj': 0.0001666944490748458, 'cTNlkkDcsnGkIdDznBMLSKxFZSmVzEYurjttMLkZIZOnrcJfOYLGpfdPkFOOgzXbM': 0.0001666944490748458, 'zhI': 0.0001666944490748458, 'rTCKNdAbJEqVjv': 0.0001666944490748458, 'BSENGzBXvHPzPwegErTAgGhxJFejKsEkEcqLisAEAsSZUihyWinLQabnDtzicnXNSiMGwbhdSnjnnOEAGQe': 0.0001666944490748458, 'SLHkfUwlsOACRHxDmbTRnZTnwiNYcOXHSTnURwNOTFqBPZdKwdPflzJuMODNYiinbyDBsxbGrDHpnHleKVmBxAOMhzVCrZYJwqlqfAanhnOMMtp': 0.0001666944490748458, 'tBpVVLRSQzWFLKixOONdIaMsdbEYiaqGRamDbUeLSro': 0.0001666944490748458, 'sZtMPJzTURRPSeIcJZvORAiOupgqggcSnzoaKFgLUufCwYITGczxu': 0.0001666944490748458, 'PdwEccaAeOgzTASesKaqEUgShCUZYwAUnsviMTSfUOHVXCPqvmnFjiFLSsdsAobyDPPGkztxrtkvTqqEjUGWgrUerpB': 0.0001666944490748458, 'hFyyIWmeeOKJlqqdPTSFalYWxZSfDUwwmOgYeizoOMzlTgUYaasPPYoAjGmDCnANFJUAHrPoBIVdXZDsbYxHCOkfzBr': 0.0001666944490748458, 'PskERhetQcntpCPVhDsoxUlNDFXqIzbCkIFlMqcCNJy': 0.0001666944490748458, 'biwVKyNsACmfDzWKs': 0.0001666944490748458, 'FqMNZdGlmzlKtvxEVGjjMyHqGQXDouslFOifoeyTwpuidGPusUchPLbpoVXBKDl': 0.0001666944490748458, 'MKHWKrfoLrBoolgigIMntRlzJVGkxffyvVlCNvODQBnxHvKuXQLKUTWVMdDBmcP': 0.0001666944490748458, 'QIhQeKrAVdmslPNyq': 0.0001666944490748458, 'PzvWfAXmbwcCadNOacZNDrXMrQmDcPOLNvKiHHIyKlzmqYbfaRiwx': 0.0001666944490748458, 'mvhcPCmBdYQUNyiBEBYRAwdvmhQXhvlyQgBOAODGfahHlIwUGaOgNqFsCXNflZXyyvAurYCeoMzPfIJPzhjxHMbsOgfQQBFVPDPdmsVBc': 0.0001666944490748458, 'HlShWdCoPGIVOlyQkSitsrWFzhDPSZkiLQkCrdiOkFRvVYGZaqkDpqAuZP': 0.0001666944490748458, 'eSZIcMwGNBsheiyifELBsfENXTdVlgPQFMFAvYWTWygUztfpCAAvWpFToitwsRnsZppiPCFRkbFpCAWpMrQ': 0.0001666944490748458, 'UJqYLTNouoNtfnPZbRFMxXubUSGczElIxJmtXnlbdPFdyPCwQvCrZj': 0.0001666944490748458, 'IvLRVYYiCsjLxuixSjpZs': 0.0001666944490748458, 'yztKaIVtcEX': 0.0001666944490748458, 'ccKiZWaGDGyrontqVOLMaGbsOIzrtSAYMqhAnqlLLNpFqxJGoOLJItrUTATyppfXuk': 0.0001666944490748458, 'MdwsXMRnnlprSHuJbdFtsddKuWEQxlLlNnyKczixsWRcwBRwkFSlNGkUJveOCAabAgsyVBGTUQMqNkGoUdiJDsMemadxqY': 0.0001666944490748458, 'zzzBUQEvUsWOVFCAnVASKVcreAhgiqPzXORKxIlexxsWnKeTMcaTKbWhDDKOCGtsXevjCqo': 0.0001666944490748458, 'uVdIIGpIhqDPOZsgAaULMzGoEjKIdLqKobdSieZivfyuivdPYbCPuRuAPfFPKSxbaLnnPaslIwRRDMzbxsphMbPXhpTiA': 0.0001666944490748458, 'EPoxqYMyoPVCfvwfTDoseVvZsRVIkhSLhjvYEoDDPuGSXxDJeyFnquuffxsiEPcULmX': 0.0001666944490748458, 'KsqppBANIszoddJnmrdyOPDNHwWaCaKOg': 0.0001666944490748458, 'oqicbkIjnqjkHmiQefwDSpSmjfqDUfVUgjbRwcWnbGmOMxPZr': 0.0001666944490748458, 'nxkpsKKJhSgmXEfmllBHcqllOCFtZPswVEALCsIeEYbsJLdRqZZBybzEiRV': 0.0001666944490748458, 'wZHkZEqqwWcrmOoMkgAiSlBIahXneheUIEkPdTFJWTzUupTmBCnGmktdSdHentOXHSzbx': 0.0001666944490748458, 'CXPKcXDWBljwiauQcxJeIlOpjsBYzNJSclRllpLcqfuUUQPgNLMvmOSugUiQNNZBPy': 0.0001666944490748458, 'gAoKdEPcRdWHNlcMzTjfWZZzzFZrBOSuCnLCTklzacDYUngIJYPiKKHgYNmgxWQBeCKcDOzmgsVVKpTdhiUNCXFQEISIxFDWS': 0.0001666944490748458, 'ggIsiDCXMq': 0.0001666944490748458, 'gWXkPwMHhZujcTwmUudmhBKhUwsvTzWczZSVlQOLZYhyMradhlJHgjEfjwQhnPPsZ': 0.0001666944490748458, 'dvZNdjfAcOiIdjlJwOaCcasfuCILhvAAjGqHvlDynrNItxGsDMJbQpUAQlsuPFFTmHEWLJiJMCJvfPzqh': 0.0001666944490748458, 'fmUdsT': 0.0001666944490748458, 'LpRLMXltOMxGQNiwjYhberQSa': 0.0001666944490748458, 'ZizFdJGUwdqihTSVzExamyFasrqjQsJUtRoULfzIuPyyqtumwnQeVNkngjOEpyHxvsIlApZMbSgzCyxGHDJGvNhJSVmRAhyShdJzItlbpU': 0.0001666944490748458, 'XxTSElFkvCgIfpZfqBHHLieNynvxYlDCOVYqAwSkeCOoMCIVkqxIQwIELPGbNvikmwVfGZSTNzviQddBEQZFMwxHikSoYa': 0.0001666944490748458, 'umZaRiSNGWzKQchIVjqBAawVaPHJYnZozZJQYVMYHbACFgYblVqAsuQqaHFuNdBkBbloQKEXbFcPhMsHxzzeJOhJzPIwOWPcvCTaQFDJo': 0.0001666944490748458, 'FHTzEYfTjbimeUrlZEnNQZIACQssNhj': 0.0001666944490748458, 'gNzMrp': 0.0001666944490748458, 'GRnxsEXjvsRmcxnwfbCnekLeRjBKlsfnkNIfzyFKdQYpPestzGIVchzrEqOwGveuMyxilx': 0.0001666944490748458, 'UbdurVEIuwoUdihfqacHPjFeKvkiTyzYZHUvWpcBwKYBfNJTMCNYJyJAovztYeXWMmrsBfqWveCCpQttIafMLuGYNHDOaznCfxBuj': 0.0001666944490748458, 'brinBFvdoYqeaJaoFBiefSaRysqePmterJwKhYqawOLBRL': 0.0001666944490748458, 'NYOCkZgcAjydGDGxGCjTndVpANHogW': 0.0001666944490748458, 'JSrGevFjiVaWgQQXyAhvrsQFEXMiSWJQTEGnJWTTlWPXGwerKdJWBmYbWBvFBjZEGvHiBlAkZoYwURrPBntjUAqEqLjIflktKeSbb': 0.0001666944490748458, 'BdTWozOhUOOojIHuGlwhrVlNfhmecuYRWgGPNcmKjQcEfbGjNTqYzqMnoXGeFRvbYecNizLTupaZdCaosqBIKhlRfAtmEJZaHEzurA': 0.0001666944490748458, 'QkmdSJDgbaSccdrexeIFzKyyYZAMNpd': 0.0001666944490748458, 'jLLNQGOqWF': 0.0001666944490748458, 'cRipxAMYmztegiFiwsn': 0.0001666944490748458, 'OUUOQGbLoFkDSlPYWSdXAIgRwqRjcWKrfExcLojcjKlHUJDZYfANKEgzwIyaNLWZPpjcHGdRzum': 0.0001666944490748458, 'grENPcuDGGrVXJUXSfGWRntKHOrOReGRquNONqqboqLQzJkEzetjDBnJtsoxbTayKBRUGEbOqVrZPeXdNzTYxwNRczPqMsMiINerKOwkwol': 0.0001666944490748458, 'AKXCSAwPOfJJOoeJqmsntEVTxQcXZJsLLdJqYFooNuRwDWcDZPYklLckuJPIjWBvCj': 0.0001666944490748458, 'drrBhaxMqbCpoWltq': 0.0001666944490748458, 'iRqht': 0.0001666944490748458, 'ZAzvlJEMGDtvIfrPlFsodcDUsGsoTXMrTEpSllmhAvuvBGLNtVrVkKiDbzcDGXwDdCQUhTqcRkHTKR': 0.0001666944490748458, 'kEzyjBxBmIaAJGypkoBkdeItLJfsmBFMNTtWahhFDxtJgKcshwYRElZDQTgRPpElBCncVcqldeYIjcPaivcXCEUTqCDFQdupEPeCsxnECrIcbNRXEtdRKmY': 0.0001666944490748458, 'XVkACmLPlmVeOSeAPoG': 0.0001666944490748458, 'yBc': 0.0001666944490748458, 'MDYvKYjahnKsbgOCFoseMasAItTYQObYuAukKogoHBYQgzwYJBhzTielzbQtBTamRdeDFhLtQaIVLpBsYWsuPcRbLcWZPvnUadsHUmPoH': 0.0001666944490748458, 'DPZzBtzEJJNzaLz': 0.0001666944490748458, 'xwxaBsnLcybbIypqaQMPD': 0.0001666944490748458, 'MByGUMWxcleClcvvqkvZhGjhcIWrUMyAlxNXVhsbJJNZYlWGdFAuyO': 0.0001666944490748458, 'ZDURgCMNHTulgILlvSqZQDaXQWvSmXVQOfVPOBbBPvVCrUAbpuafftoCoMwrWrGeGkMjAKFIthfJkcxbGKsHYczHajboUHMeyq': 0.0001666944490748458, 'nPnyIxhDDAxgtqwXmAJGyHyWOrfTzXmUMkTbHGMLPD': 0.0001666944490748458, 'PCgoqBFvMohffxnajNCtquoBzZiPfdAqESdntxupWY': 0.0001666944490748458, 'NDmDkHYxlCjGaMLEsavFzIfrSKJsatXNTfMGMjJWeapvEKWGCNGMSEAwWtnfWmAltxKnSbjqzheohcgdMiplcbOgfHIeiaJLhYr': 0.0001666944490748458, 'CAvYmiXRIgFDtnpgcuppnCSShHHIBHIIWCktEcbLpeYIZBNOqAXxPTPpffWdUETWdtseJ': 0.0001666944490748458, 'RFWxkVotHwtGkFMoqBqCjHZFCTGHTVTJGLjIkPVpiZdfNwxUKLToSupnioDBrAK': 0.0001666944490748458, 'KBsseeIAAADxjBEDPSHNjgpbEAJkCcGloTYYDPVAJKgdMczjyzcCqbWPBbxTPadipOiTevykJanwAAlwNoKwAIGINOLWgSQbncPWwNEXEjNyvdw': 0.0001666944490748458, 'fDdRGTjBUwKIUeNaGbTzgEwbtTYvHbLCyLXHdkAHQpACdAoGnINiXFFiamBLtxNvqxNAqMqXYuJ': 0.0001666944490748458, 'XoRgHadtaLWqHYkwDHBQDaJEmWxfLThMLyWjujEeKLSaYOOCohP': 0.0001666944490748458, 'FiEPfGzbRaFsMdJMESCBKpZeHsdVrnAROMoDirsGvsCheaBPCLRfWOzySgvwPPWLNVoRUwrdVjNAAqcjrPlWLBOKsLLGnxcqchXqmlFRw': 0.0001666944490748458, 'PBAvmtNWwAVtpTnWRWYDCADgZrDarC': 0.0001666944490748458, 'tDgmCtHGhii': 0.0001666944490748458, 'wcBxKQULhmDUBcmEGYmOpWZBoLafqIsrsslNmofOHiefuZLcZzWrVIaIyNRNNgHFIsmxdWtSAGdGyWJVfmWlZglME': 0.0001666944490748458, 'EYoszQNZZhdWQnmltdacmdcAvxivHgOHhSOqalGRAtEkJFWMRfAsmuFBmJcdDNYXRfh': 0.0001666944490748458, 'psGZykNErjffknYBJcafadksmsB': 0.0001666944490748458, 'bglPZwdSlpqpDpaEGWyksmvOVfCRnLnrjfzsWTFeegDiWh': 0.0001666944490748458, 'BeJKPc': 0.0001666944490748458, 'WhLjdFQmPEcIqACFlhwqqr': 0.0001666944490748458, 'hjUFDbVrCxBPqXlfqsVVkdSWOyQaNYLHxEmowCadQOtRGTwVInNKbsI': 0.0001666944490748458, 'DeUtGFDXgSOjdMReydGAAAVumwcPaQUgmOWvsIEqhESZRMguCvNzFXvhNpFrUxtZHHshuOoOsRabTujoyBNZyMhrzbQiEHlFHVqHrtXIzMmxCMCZE': 0.0001666944490748458, 'NjqwwXoTkqbAKVPPwjuRIJicJqByBhsIgSmYQHxekfbZPmyWNnUTG': 0.0001666944490748458, 'VqyPbIUbVvGUhTZoGwrRNFweXHcZEQVZtlEzcSefTqJvdpZQeAMLRVIKFxVqbv': 0.0001666944490748458, 'cJzZeQIwcgoRexbIIacmouqFRl': 0.0001666944490748458, 'tUDzWMqAmDUoIdRNhpYokjpibfPCS': 0.0001666944490748458, 'XghNPXYkNmdlaXohkEOJxdbCeEYOtrgzlcUeXXYGPKFyfXbCgyvNjYUipcWGCMJosWVnDWbroH': 0.0001666944490748458, 'lXTIvhUTnYrcdOLEvzMDOIBsIRuGhXVKlKDUzywhgTAgMsRReNErdukLR': 0.0001666944490748458, 'gPIDkqFkEjljbqGKtNwuzFUAbzWNOAmmbTltdnRcCnqQOiaNeIUFChoSAhiCPkBgPofartQZnlPymZHkfoNNtssQTudHxs': 0.0001666944490748458, 'tCxJnJuzTBCrqUyCoFxazPTUBlfUTGpDXqUcgXCvykrnMHJzUhqHGqUwfvUZfUrRdLdPDJOZZpSQFQJwCBFqekEmMAgQO': 0.0001666944490748458, 'lWVsXIeIhJvZGczDMZXlJrEZzmRVXkUBZuOtrqLXZXEjcHDhTDprNzgisWLtANBhcSTxUjCLjKnJeYlmZT': 0.0001666944490748458, 'yEiHiHDxhrIiYAaJNPfooxoJZSDPZbGAjmt': 0.0001666944490748458, 'JkAxEArHHEr': 0.0001666944490748458, 'UdQTaaRFBjqFppEVZmmcVgCycoJpQgPBtZxgQLSpkKh': 0.0001666944490748458, 'vagqHHBsRjGmdJuGvlqOxFVFvhlMpWFuphtYczaIgIgVJAiHxaBgjUioVF': 0.0001666944490748458, 'EpEQXqflWrKxUBQOfvuODbLyXiBWVnSgYpXiiLqLykpWDxnPsvZcNhrJtawyMhicgQLjeAFeHajBwDUNlOhmCHMKmUIxMpSuIIzlScyPPI': 0.0001666944490748458, 'eYwIYsqHND': 0.0001666944490748458, 'IoHSwjNfMDpwahNhsMaHzCptGZBSYpEjFtPzdAFeVyzHAtPovoFowhHPQYKRWbocbCdRjnHCocwTkmEMtwlJyZNHjAYJEj': 0.0001666944490748458, 'pzFqUEpBGkheCFKfoyMSzAdhpRr': 0.0001666944490748458, 'bAxxisUYfwlIVHpmQoHOhyZWDLNkBgJoljPYetEeyELMIbFhfDtjzpWijpfzS': 0.0001666944490748458, 'yCLzuwKfPwGiBSxDHhJWtqUPHlbROBMOmPPoaPRkBwffPLslB': 0.0001666944490748458, 'BiPXFbqyFrHjSfVLwPBaCTSaSqylajVvDPWGrLMIFhsHuHXOKJLvYgOfyVaxuzJTvvbdZoWCNBAdOdMyfTnvwaxAAalLPT': 0.0001666944490748458, 'naKlQHPnSxLECRIhCipVReUadvRVcPRWNxTFPnIAAHzlXtsKFzv': 0.0001666944490748458, 'JfWzsFYyMOwTpTixAUBKdPjOKlUvfekWJINxKlROWfmkNgSIWYUHFestxgusHAprpVH': 0.0001666944490748458, 'oZfTnNgeRfUxihUxbEZbXRrZQgHkrdo': 0.0001666944490748458, 'wDGuAZTfMZDwDqKZBOdEUSkzzEeXWbefnaEMYgapwFqnsWbRqHofawNYGwQjJQvRsVThzcgQIBXYONEKQRosxuwNlkJJtNmGtk': 0.0001666944490748458, 'emrjuXBiRGBrXyEBezLpNtfijvTeZpfefolGDAewPbYppfnBFBWXXQaDrlaXwNnKQaiDTONkSQEmarOKoFZFUpChGxkPZ': 0.0001666944490748458, 'LjqkyUNLQemDNxslAWLAXxTdkqQlPLjPnkKTPyLVTFKbvQwLpPDXvjTHVqzetUHIHGecXnbjLVEUCGZUbpEOjalEHfvtSWuttBnkAWecIgTPYrxQs': 0.0001666944490748458, 'jNL': 0.0001666944490748458, 'TwUDJIuoMJmTIyAIKZMcucapwnZVFuMfGdEMLgqDqMVgMLmPebbPBXdwYlivNGmkJAyqjktOAgyAhJGyASlgFVznCSJTqyD': 0.0001666944490748458, 'coqtshOLdbXSOFfaAjYVckRkKysXBtwzVJYdhJDLYZYEURY': 0.0001666944490748458, 'mFCUhMcjmOyGYMtBBlUuAoSOJrDutTcOZEkCoFMXJpAXgbIVgCdgUKRmqblJaFUHhkYECLkYux': 0.0001666944490748458, 'qByjIIqXyehgHfmJUSBdiUebYEdzKcotBceTTCFdXkrLyHQKqPSOvysqCrmxHOnXyfRtorifTMnubmHVZXKYcmjhMVtdfH': 0.0001666944490748458, 'miDvgLwlmORfyCutaLYIjoqGwfwctSgMcWRJtZXhHuHVoBlbYHwhnTtriCXwytTVMr': 0.0001666944490748458, 'qAbGufUkLJrcSmbul': 0.0001666944490748458, 'IxQhWRBBCSstDoCDdhaluOJdMESxYxAskMwSGJaxYQWEryhkEVojBXjcHyMiazKzxxQOvDqSPuTeCLmIshzincQfAVEqTA': 0.0001666944490748458, 'hkgbUemgwtmLVVppsTrmRxZdEufAhkKvljbQNNrEcpvUACzzOcmfFxObiwicSoAZChxlIEDabFFQqAFXIcGgQmeYfxpWhrNIBSidRAJ': 0.0001666944490748458, 'aEpJInbRoHlLWhnrbyetUQIsgyBewDtKVDXrXGtZYiLsECLOjifhlUUBSeoGFwCvoEZJikXgOMFxKwChbvMOUwRtNEkyZxJPZwebjIBWGw': 0.0001666944490748458, 'hkMMRNEqecfaTZyMyUUftWUstasqoMTxQGSpkWxWBCY': 0.0001666944490748458, 'eDkmFtUXWzktdtWAMTxLdbaMAmaEhcmEXTydtdZWFGIUOjEjfnouSbfYMcDJMYZaqrzVWReoQJR': 0.0001666944490748458, 'BEcCPMfQECMhiyhSnvTPgVDQStAUAtyKZbEQufZISIONLSqUQoDLIIeSMVZNUirXctbEzjYHLkTZtyBCSaVnbvkWkABJTmmacrMxAEImDlMKKf': 0.0001666944490748458, 'GQhTHCYPbwtRbQczhiaFFLzylbsJDiWVvHAQscZJeBuFzLe': 0.0001666944490748458, 'PIUEDZJ': 0.0001666944490748458, 'CyCFvVVEfJewahrbeWzpsFlTAtlGdrNhcwoMhdRPeVSUkxcQl': 0.0001666944490748458, 'JYsshCpXFpnGxzutTwHgKiEgrvbzEPTtFKTihBKIdLMvVGdKNptIRgFstEv': 0.0001666944490748458, 'YEEXaFfvyiIEkLCviqtVLngMqPTGeuDmpEyZmFugQVyVDptKw': 0.0001666944490748458, 'hMtqRSgZAbZZdlDQVNGMHFl': 0.0001666944490748458, 'fjwFpeMqpwEiPrqze': 0.0001666944490748458, 'oGrwmWEVHvuOeBeVuKAwRNayFdfAJvlYoRjZWUPPfObOqEcoywctMMGjjTAEOQDhcm': 0.0001666944490748458, 'rBGOIXwCSCwmhypZNJNZVNYGFGpWfIEGUJoGZmhxMNsnYOTiuHGVFVfVSLoytWPkRegCMGFqRwJirxUDfqyHCRnoPsEbUgKWiAIQxDCVFqX': 0.0001666944490748458, 'NBkCxrhsStRyMzsXQTffbfCqilv': 0.0001666944490748458, 'GtNMbLCWlfyKRlyEEbobsjFVFSTPcjECggPihCZejBrxunpYsDuirLuYZwVvAsIpeeEIqCwxtWmmnGrjnzdPKzSamuwwlXoyvl': 0.0001666944490748458, 'aVpNzCuVmJKKBAMXnqvCBOtdbwHBVJDeqyWBzenvPGGnMdJMPTzeqbVeiOrkzreHablzzm': 0.0001666944490748458, 'MlMWZJQaockPGduxRQIAFwVXcIXaOoDHUPnoRBJWxEZPwdEEJvonuXwEzlmrcMXBJkTQSxRBkdMfDPruUOAiLMeIqJlFIqfSAbvJissuCUAcPdpFqJjEd': 0.0001666944490748458, 'IxSYiodHn': 0.0001666944490748458, 'cF': 0.0001666944490748458, 'XdvSVvtSoPvFWImTMUtbCSSwbnhCqsfZnTOSoKGsJTtaiOlwBtDGmGw': 0.0001666944490748458, 'PQofOBUVLFgdZZrkLfGANOQFRXypWeBdaalzVckQfiKwdUITDbmQUNsLaVdlJPXdVowhnpPXVISTziNzJLBnGOqqnXyoilA': 0.0001666944490748458, 'LNIeZNcijNqScoZGiJDZzWuHVZAYQEHHaQOAWTluxlriGFyXhTwoVgAiDnLwOAfYYQrgXBMwWOHlOADOPvJoksCCQh': 0.0001666944490748458, 'TWeuqOWcMkZnEzSXNZyQCUopAFGJBGnQv': 0.0001666944490748458, 'KuCQQijiuEuKYmloRLBUlBRIdjXKsKTCQxwhGIVfoDdNfxzOYjzJNJXEdCopKDoHjlB': 0.0001666944490748458, 'XCobhSognxRWgOOJMfGnQIKuJNbCsWDxduNaEnKYBGlPWbdqXprfYrLSkqGrsDZQZRHQVuWnmnCzHmHfxotKLKcYuVDOdD': 0.0001666944490748458, 'NBiaJIjgfXBaDYSvAvmUbpfEAsQwLVayzzwiYtgFwQoANnMFfeKBABLTOjZFdxSOMmPlkSuaPysgIsjnOy': 0.0001666944490748458, 'LtomgbdmXyVlCbagbGdJTbiQGHAEnXyMSoMgYBzhZcUDkTzASzKDVoGazEwLLVNIQjtTPSSHMu': 0.0001666944490748458, 'dvEpQVnJlkbEuxlKsyFCBkzgumlMhcDupoZhEqKPzLcdkOdqFmOCwxtnjsZkcZgZLsFpTd': 0.0001666944490748458, 'daFGBMMpSRuuQrHLuqRAGIpHbOEPijEhKAGekwZnGDPhrScWQzJqWekYNQkxdbUcQojZoyNXvdXOIQeWjQMbxXkIlXaEev': 0.0001666944490748458, 'jWzsgbdDTLkvrTUGDmLcVooVnPlYpdUCwhIuAXHsNkqoEczYQHreCoHYHUIyryppiyuehxhRoE': 0.0001666944490748458, 'QPXMlghHbJGFvjfmBq': 0.0001666944490748458, 'kYiWTAILS': 0.0001666944490748458, 'ldQQWBHzYzwPAKyFSkDTbSdzDXwoTbPHMWmObLlglZNwhSHaPbuVFeKSUZrjDWiGVFCseAdrHSbaCMaCpfSshAnxwsQrRgJmnlVAicyFSmSKptxAw': 0.0001666944490748458, 'yHqPnaidmoYiOg': 0.0001666944490748458, 'unOnjZ': 0.0001666944490748458, 'hMJXRMhXtJBiWonArFGuYDvWqFVAwNsBqKooGamSRh': 0.0001666944490748458, 'sqpcOukiJusqPemSFjQVWnIYdgJszlwSdalfutVHltxESeDiOKgQINYivCeHWfoHIIoysmBvOpEpaStfjnlXUW': 0.0001666944490748458, 'NcZvhftsTehkELHwEwQNkoefcLVUcqFzr': 0.0001666944490748458, 'IGHqXWUiAYPchPuWqShPjSVVNQRabhbUShRzgdyxHSicDOuErWcUxqoUruohxmHfrsTVgeukNCheNHEvOtDniKQwZSfgozvBwD': 0.0001666944490748458, 'KRltCVVCPGjehmwZVitdGJQbAwcKHdlMoNRIYiDBCnVRHhfzjMaoVEHXaZXdFnmRIoTMzAPvftRGpVIUQlxzHpNJWvf': 0.0001666944490748458, 'aZWLnqoitCEZaWDJeDAVWUEDElZhvMzsjFPJOCFjySMlqAmNbGWzCivTaCsFHdvmgTvEcbCVLculGGWHQolrqoYduQxaaRfDfx': 0.0001666944490748458, 'QoNklxVLqhLYWvQdcAblExQIdDQPxJixCueelyCfdMqSAR': 0.0001666944490748458, 'zBvNzDHrhgnKFXOHNVZafrxunZpQKTsgyINEnpTuWJVirdlhGtcovGWQMzDkKbsoKzjcZjH': 0.0001666944490748458, 'HU': 0.0001666944490748458, 'nAmVGlBSOxTHJl': 0.0001666944490748458, 'EtpcrxeVytdjlrwvdzSjtgsekTzYLoaMYIcSkstFZQvTqufOItdOXniqXFPJYRjBYFzBNpdfdwclnOSGRyEFfCbfczSucubqzkmOBsuhSGQiXJe': 0.0001666944490748458, 'FgLEiGuEfUjTldeAThvpQrcxEdVdUMnzuRZaDfhHyDrpKalfARKIrxnVHdLAhaDCFChVdsKFgxrwqrnezoeupqWmkSNUnWOFvmjUDHCfTLFhmCM': 0.0001666944490748458, 'IPPCNzheEHePdMSMosKSIBgcJDLpDCBcyAJjvDXYrvhZGAFvdRmQUHdUuJWBqKAqSZSSCTtahYfgwSzrxUsFmPLafEJiSzUNbWcMwVJYIhrLCyx': 0.0001666944490748458, 'COlEduqarumzkxhMWWXCXkQSZMaUEvXgNVAbuiXbTWbcrhsCOjOyexYFDluiejnQaHGqny': 0.0001666944490748458, 'FtLRdlwTIMwbOFphynXpKwONZnyKqsfIhXvuBTHlqrdSGEkxZbvYRUZjLZoVATrFtNSRbbptNduSPvhyUvgbxIdCtLgyUEgamdgwflT': 0.0001666944490748458, 'IQoHJHxnUWIaGyphAoQYcTRVPvpMQGkWVGRABvkXXJKubEomTaBSlTbUwkXTXFLQhXkRhpsAQMzpvKZqxbyYaFbVpRqpfHgnNIIhS': 0.0001666944490748458, 'uPAxMgjgbQSlQUJzUIIrGVqHsCmnPwmlGvrTvnYFiwzSUYAOavQkFEteHCGdfbIlxJxXgLsSgfDFOWAoEJITxyNrPPCxrruQJ': 0.0001666944490748458, 'KSFXcstVsLFqyXSGRXUfOxXyhPvdjSeyxadwjEubLBIhjtMJPdtJyBVmndKKyRbgEVjASjranWiiExZdwTpcfcf': 0.0001666944490748458, 'CLZbfnxzADoREEtkdnFVzGEVpsPFRizFbvhOlWllPpsOorqulxhPhnlKNQrDE': 0.0001666944490748458, 'rRrFWSKaJpirnCqWkmmtfEwESzqSNRV': 0.0001666944490748458, 'hyYQzOoRWVWIfZCYnHJSKXgexKUcVjscmEMHYtJgIMFrFKqBtJmYjcoAjdURrZTllHDTizAByxPvSgDgBYUmZIxgQAFqvNr': 0.0001666944490748458, 'MucAVXAmnGhbALJliRzMfDJLqXnZTWmmuoLEJfBAHRgiOzPbfgaZpswodbbVwCISIaWiV': 0.0001666944490748458, 'BqRTisjQyYRcETDLPvZepZRfdGcCtymEjnriWBlBUdAOXpvyLuDOnJAVRwfOZcDRjypXGrEevzbwYaUSQna': 0.0001666944490748458, 'cwaMcJMXsuISjqrPmdKEvxCoHRIERnVbTbXsVuUHxTtNKNCJNQjzHDDwjdDLAcXtwmFAMbUVSBPSYYFmuNOSOwqpPvxxfGlZBeTKlZ': 0.0001666944490748458, 'GvxwBzFomZxZHpzMgysaMOkQEYQaFghSKGRKHnfDPlsqeKaupBF': 0.0001666944490748458, 'NBLolvOyBiHyzQrIofmmxVWdcpzHArqyt': 0.0001666944490748458, 'JnoABokpRlEDByaqKVJazmHhowITjmDZPZticGxMmIPQHOdHDDnkuBwDeLtuYagerDTJiU': 0.0001666944490748458, 'lnutvdyUDFRoWQGAlZcQGUURqDXtbxUBVmfKXNtrxKjKQtmQGsIbDaazOhYbahSTGxYliigtxLkMDdhoUxZ': 0.0001666944490748458, 'ZoigRwioyedISSUygccPWdQoVvdfiIfMnSxWtdmpYrBTm': 0.0001666944490748458, 'bhhAXAhddmYmginjDVcfmiQUFgZwmvlRTEvpjzPOsGjNFYHPmvAWycabnEMvUdxwJBSiqtQQbDvKWeOzBpfMYQpAmqWycdbcUPkprmiux': 0.0001666944490748458, 'yhrVCBTfRjoDTIIVspQgExXpZtc': 0.0001666944490748458, 'pXUFAzjUNKdywdjEzmgCURRhkBjgSdtQUnVtRkkgeBE': 0.0001666944490748458, 'mekZJWEGnTrLMcYaRZifRNaFkwJkQXgqanQUzgb': 0.0001666944490748458, 'dgdogGWMyXwUOBDUwRhdDdicFUIDjHwRZYyiXyDBoxPtkfojZEzaaAkumJzxpSlgJc': 0.0001666944490748458, 'HNMDFWfnjVJkjzDfeKqBplaKFRdrYZvngfHsGNTyFlRlZNiKfAsBBgwpcUhczhEYFYOKvSCWnzjmOpiXdIKhBY': 0.0001666944490748458, 'pHswGuWxgCAnIINwUbkoRqIOCrMoXzAnEOJmlClkaPvKZbLWEUZORAXYHcbzeSRklWRbFJLNVcSImiFuyJARxuvdBDOKYxcUdkMOpqHlioJrg': 0.0001666944490748458, 'ZMYvsjxpFnLNUFfWvAQBhhbWZGCgCeHdYhuUoYZyJvSdqbwdLFbpqVsZhqSuzLnKwyosEekGaofeWyTTGTddajUdePIecexXKAAFyqEeaXBlkIOTQvnST': 0.0001666944490748458, 'aOY': 0.0001666944490748458, 'FxAEKBIlaESdYgTWid': 0.0001666944490748458, 'sLyIPhtuoUT': 0.0001666944490748458, 'iTHgPNdOPgDiyyTosqxWUoZSJufiTCiziNmryfMCcHMDXrhidFBFJFFhNyKdycYDZiUisqT': 0.0001666944490748458, 'GuMislyfBqvCvi': 0.0001666944490748458, 'cTAxOWJBNHXNvvHOFjhKAcdSjJzazsXqHagwVHkFhwIsieQKIN': 0.0001666944490748458, 'CIVJDrSJyvrypyhaIimQAuGAJNqRUjulYKEyHScuZufEYPJQfHzwTcbIUxfERLdxPJwyvZBvyAgyMvaVfHBqzWThueqKxgvvOCSLxqAQiyxqCYxuGpphie': 0.0001666944490748458, 'XCGjjRYpsGIjSzrREpvXJQShouJFVidzBKbYKGPLoFpmlHPjvyVfkXUEtODnneKWiXmTPWxTypyQG': 0.0001666944490748458, 'qAZSSiJxeDYcjtYwVOknCBYdPSKELyNFxmoZeFULoBYOZHqxJtLqecfEtcsGR': 0.0001666944490748458, 'RtLLATSUUGOdVwlFPhCINRMpTTreEIPWjuXcKChKtBJIQbhpwhwNdiqfHORWLkfJDjPlsT': 0.0001666944490748458, 'FFHgBgoMx': 0.0001666944490748458, 'JCJSOuOvCbqMDEFwrnAAZmhyfwSLITFvdzwcNJhOTmCUrVqRHwgqDkWxwybKcAjxgjztCTHoNJFVnkFbvIx': 0.0001666944490748458, 'HuidBOKDRtrajKcZbCOKtQdKPIveDaoKxGYHrTTRTbTRWIqRDeZbYxSxRO': 0.0001666944490748458, 'YErkNPZTSGXARlzGvpnzrDDshEBPFwANYaCapXqTtakWJNwgZQyYIjjvLVSXLZkLQbrilQcbfUlFGpTUYbXElsikxdrfBhmbjimDOOvtqEuDzhErntXkEfo': 0.0001666944490748458, 'cIpLiHRTArANbWWLFyYNXcDeTcn': 0.0001666944490748458, 'ea': 0.0001666944490748458, 'acoAxcVJmirkVJMfFJhIEcUbSnWaJbaUEgRsPnQSQWxBLuVva': 0.0001666944490748458, 'eLoitqmSPmuHOLUqgGxkcwXAIBULKLAwjmDrmyRomPAYEVlcfhqWlZQZgfqhgtXeYinmeEftNeXWmJtljYlvaFhLD': 0.0001666944490748458, 'FXjryefkaa': 0.0001666944490748458, 'ZNvzGXDxrnpuHuenTGQcPzCdtKAAOuTXocjhFIvCGkQdxZmVLyzFnPVZXVqYGTQTeQb': 0.0001666944490748458, 'aUgITDgBPYkjgAsFtKzDfeb': 0.0001666944490748458, 'ruiYYvRwSqurjXofFMUeRnEHphSCUaFPIRvWfEyHjiUzUzhSrYGkTpmsezVETtihOnhRpQhpPVSErQnwOIvqWgsnMQuTvQyamxLZKZJCuyAmPrqSeypkqeg': 0.0001666944490748458, 'jdXDPfCbuYoihETnuRxQefmQvrUkoTSfmgSHywkkLaCXSjrLxQDYaOvydfhwtclXiFBlMhhruZesdoojcXH': 0.0001666944490748458, 'ciuZpICrtwwNGkEvnFEyESkJB': 0.0001666944490748458, 'xtvtuVQeqxsnDhoRzDdPfboJPWWHNyskeeJmbbYDnTgOkOkwWKXCrKcaTwknPBljKXwCmkURUIf': 0.0001666944490748458, 'doxEhjNKSqiFManNAaLhkjEhuABBgMZytASCiIQAoPRyseNWHoisj': 0.0001666944490748458, 'xTiJWmPSwCSJXDkgqaMzXWPJorsZuULJfdTYlCkXSzYpDBhmvAuOTGUcvsMiuRbzlIvYynPCbVbfvxhVelYgAqHObmSfJQBKM': 0.0001666944490748458, 'fQYpqtvdJZCITMKAWBfbSaAsOhsiCKocNFaKFyQDUGXLIoiyXnvgApE': 0.0001666944490748458, 'IwCftyepTsAtqleiZkIvcDPNIbjQQgxGkaawAOPCoHuGPWrbYmURKbDjYUkioktEnqsuqNszivAQOnaVLaxBWcIaPKrosIBQcyrwkUALpvAKDUwNLv': 0.0001666944490748458, 'McuHiRWigCtsRXjuMlHAlxyPtjQwpwkTePsnbolRhSXZEsEtfhcOFRn': 0.0001666944490748458, 'RrTOOYFVmGRkkeYLyggjCOKmOEqGSoBWOQcMxmHQfHBXbCfPtaCURvp': 0.0001666944490748458, 'uSjkBlHINYAtdxqomIF': 0.0001666944490748458, 'CTKzCBuzlrzZfVzfMKWRqCfSIHbiILOzNcQNSUocRlqanKgxADngIEquHCtyCfpkdSwDVtDiWLzLQeROmpUEqWrlLGiAkbYpdZu': 0.0001666944490748458, 'mxoCjDNkuPHcqAuSkPVNfoJqjw': 0.0001666944490748458, 'YOGLbDBtJsnVXbe': 0.0001666944490748458, 'dqmIOALAZTXajapLuKGFlqmBKQZdBLtnqPOMFyHjcaPTkDCrLEvZjOnontaZBzHONljDQNXkKsPVdgxHdhytDopVIX': 0.0001666944490748458, 'qzbkDNDGXYjrQQukCqGzxIibEcICGCCuoVgYcGthKIqIAiUAowhFtttIkcMDki': 0.0001666944490748458, 'JRingIZwLGhTvpIQvTFQGJxpVeqDCNAGOlWjOdiBLjiICghcmDfKnswtcUcRFtcRhTYjswiZDdVCbNgqVIjSlPEfDsXRclvWhKqbOqC': 0.0001666944490748458, 'xvKvghUzxtsjmsqjNCWkRzSeNNKFxlvbBiaszBAksnquIvYNGOwGDsOtK': 0.0001666944490748458, 'TKijir': 0.0001666944490748458, 'AjBrBnIKzs': 0.0001666944490748458, 'dEfaYgDtGOqLzXifTWHQmlHQqkgTKPVqDVWLPzpbYbJCJFDMfCdCPGaBhmHebQSAcFnJhG': 0.0001666944490748458, 'nSaOyzxqKVwVsEvtVYHZWAACUKXWwvBFmGUsN': 0.0001666944490748458, 'J': 0.0001666944490748458, 'ZavDrlCvwRdUzWBzwzxmUaOTxpcPNPRLcAuzeGbuWRPJWvIhSxIhoclxDkIPlwKiENseKBjfLfGKoAfXLIEtsNeIdHOpdGBUBgdNKnC': 0.0001666944490748458, 'KR': 0.0001666944490748458, 'gWFpQBqAXFwVLOmPXlFohxakcqGOxsvzeswrzUJVbv': 0.0001666944490748458, 'twWqMaDEhAMPWJGIeAyiZSkBvUqVcSdWDvznFGgJaejKxdDkFxrODGXsJjAcPuMTKzFTHXUNECwuQVeMZuJzOaflrLkOlDX': 0.0001666944490748458, 'iLkyhbKIcDTHCwVIryHLUXpXxwIEtVjfzOdSknKBwERIawHZzChEZvMYFVqeVWvsxMFwLwinsZCTHeiEWvQLLCFQSVTSM': 0.0001666944490748458, 'QwAFhAUIyyuUeSoOkBkOAPvnvwqCKbLzgTrMYTTSKKpRtxjoPzRpZhsxTlpiVzNEKUyaSPJBoldnbSyodAEDmtnLxiJELvHiFqyprqaZVOABivvLlJBkqd': 0.0001666944490748458, 'gcoJYJIJyDswVnvGWVNyemozXANTGsbCTtuYZFocwXpkHlfDdF': 0.0001666944490748458, 'HSUtNEMleHcKsuwNDdKNgfBnYMrUEXtOkGMKfjZOGPcxkXMUxuqvaEkiJIuwBxgoWthZYdVIiziKLXHihAvYtE': 0.0001666944490748458, 'emPjukrqBCIKcxjkNlvBcwQsMlPETO': 0.0001666944490748458, 'CcEguUusZdlVOLyUqIEdSFgaDVuzrrNIUmOdJZznyAblGWhcoSwWbrHJroZSwCSFqUdjYiL': 0.0001666944490748458, 'dRmioBxbLIXKwCkPAolEPaGeKbIereamSPxerLXMJvWaFkBgLFWLnnZLSpKJGcZzxhRxDaKLeeGIGbdIlEVhRLIOQZoEDGJUxYRrq': 0.0001666944490748458, 'ddhzynZEyhQttClztPNEwzsJSnQgUjZQLnRgcCgalTCwOoWsroDAcZiKkgisXkuYyqazbbZGPDVkKAU': 0.0001666944490748458, 'cBCTiqeWwWGdGFiovcdvvtPXbgQdkufAiYDDsVDnYaOpqigOCrShBNhPhBAzhPOrfrRtMZY': 0.0001666944490748458, 'UeVaSYbqIaOsgWejymIpIIVkRdjxlmoXcxHLJITnvAoVSVGACBfJNFy': 0.0001666944490748458, 'fXWqMoCIPQMJObYLi': 0.0001666944490748458, 'vxLiJoQgZsqdzyqaiwMufqgWiJIyEPWsLjAsobRNWPdrrxLoHumKBuItIXzsyyllokbNIFUkIlwSprZBsdzxsiPWkHeDwajgg': 0.0001666944490748458, 'RzDkGYpujqWIoCBqgwboBzpFkUFLIuErWfvbuZJSPSzNAIyRIOvCRWASgGwehH': 0.0001666944490748458, 'dSgZEsxoMszqJkuAYzxidrwAGwpPoJugFvweKECDUjGVPLwEbddCAHxNXfNWhDGbesHLxRqJScxJVjkGxYe': 0.0001666944490748458, 'rMEGpksVpXyukxqCZXTdwxFBAvXvvJAzvFyJZFCUvFFfRXeXTUkOqtRfEMHQYV': 0.0001666944490748458, 'qdnwviwBrGECzHyXazTYaLBXkIZYe': 0.0001666944490748458, 'ZkMiQwZFqQGniJAmlMcTDxFbTnXfYvzyHcoaOAaQtkdGZlVlchObTDdtLUOGbAhHOBTfn': 0.0001666944490748458, 'TizZUbujZMPPimfIvMYBwQcIefhCpqcTCTcDPDVRfhKuMpstrPACoHJJAOKrrXOrVrnfngwJChZbndiIQbargawpOYaLdlbRSyAMytfXG': 0.0001666944490748458, 'DpWvuaCAJGfyFiwsk': 0.0001666944490748458, 'nuIfihCULXPJmwXsrNyKNnHHddchhsFCUKHPnMtwLbwhBkvnPqmeBsIKzqbRAktJkNVRTgojvJBzLLSVWHeYRZrTMMqrzgzokBfSRfwfzDFXBjD': 0.0001666944490748458, 'yDeNaImbWSAxfEpIreFGzgDBhnURImHDDFJWwFSjdvtwloomQkQhOnKyBGoRzCZtkuGlEIhDmXFSKIydWTxjr': 0.0001666944490748458, 'FlGMENmISDirewcTxcieCzGyNWUnIedUzMNGpmgaviVzmJjzQKQzaGVbhZXes': 0.0001666944490748458, 'ftGMmNNFgLBJnRzCxZhCgLzVDANdowcwWQ': 0.0001666944490748458, 'GwiWaCCDwrDldPBYJvsuqxZsUHffxbpFCTLIeqpeoMgAvPjKbMfFsLZkfsdzHoVxOPgbGcVxWjSSvYYFyEFKnV': 0.0001666944490748458, 'UtmTUYGQoYWwWSyRrcDYisxXwpjBUQkIl': 0.0001666944490748458, 'QjYeEAAIGHTwx': 0.0001666944490748458, 'OTRGeRXgPoJuirVxNZdSFqaIcOsoFRamRUzBdnskOSfAAIgqbZnDWolAHOIMsRpfmfhEtxCKcFoubr': 0.0001666944490748458, 'DJFQHUwzzNRXUTOMTHtSdzemkCejzEwbqYtnlhjJoDynkPdrXfnBPZYkBaAWqzSrluEtiwhOWfJQOudJgQSSUmUjFxKORvrEWLAdosTLEqWeWaO': 0.0001666944490748458, 'LanxNCaLJqqcHxCoUKCskYCElzMXQNdpdDDHOgrIugsoRZVUsLWIdOYyHbcvHYusMEmrjOVuAVqsfOiOhNwzib': 0.0001666944490748458, 'ysnGuKvTjPIWlRkZxnpmbwirfoQqEOkAyHMiIbddtLUjaoKWSxyvQmHWIvOTtHuITtGqnHn': 0.0001666944490748458, 'iXXwzHsheiwqddbjvLzcDtqoQONQuutmSxDQDyKTxgvHzXlKGSmySvtQxssQKJDgkvzksjrFSMpXSYkiqgfJZPLFt': 0.0001666944490748458, 'mQLJgFKVZxeIBGNiwwRTKwXHfNpFziKfNXaHwGVQgIiTsDjgaHTZHPswwZbxRGMSBSsKiGYAcQMZEIpJYztUfWoxmJiplpHSEixlAaiIeJzTsSTiwBOhuK': 0.0001666944490748458, 'iTJBEMAZygFwmtpkhOauEyZoTkMuhiQHBTAkuQXMJsQcJyKDpXpcRdYlERZygthVLElbxwfWKjBgIuWJJtjbxNFLhKvmigJ': 0.0001666944490748458, 'ZkECuuGhCTPbILDWdLiCRRQBAmZZfnSbpJlEetukuMVGuAHkFMjklJsezVTIps': 0.0001666944490748458, 'qKNkIPlPKqjYZWfUwScnQgqauwBkXAqKLaYLDMjgiEyoyXAEuvhILvMqIbShjahnXFeermVzZnqpweyBKzCSUpXKAlcnpELrkIwIWBWCUb': 0.0001666944490748458, 'hHyqaLoBGWFtEuXFcpLKOozSbeBLTivzbdmBSAvOaiNfnKyODMrlYcLlEqSDQigvHhoUnVSvyXKJWfdgE': 0.0001666944490748458, 'dsfKvrNqogoGtsXNWpylRicbAyg': 0.0001666944490748458, 'BbTwPAtZjiqjGgdbZAlfKYmgnsipwjIewvOadqIvpydUhdOCCpcmszHbFkALuVkGmIOJhB': 0.0001666944490748458, 'nnyRXyCExbQMzTrphZtzBuOmesozVWbTANWxmLiXzVXQUYTCWhtkdBvtZiopkqIwdYsgpBvuSvisgjAqWTsvQkyaE': 0.0001666944490748458, 'tDcDDNedyCQumBEWkigQbMdZAfytSK': 0.0001666944490748458, 'yHtjKffkQqbxZSEWIaUPLzJeHJgJlkWGkkKnwKJGRfsAyJTwFIohsYTlQOarDTNVraKzCzjNbRlNxxOUnPuZBXEQNnkwKXJQBlIWqkEOGXFcAnveXRZHhmi': 0.0001666944490748458, 'MItYKfpXGLafERquUtOlrbruMOIaRRCUxykAJWmIFzzMNbiLzBbuskYIUNVrGaVvs': 0.0001666944490748458, 'RNXYMGgjjjanEQEOOUMHTfEyReAwVTbIyGsmYIvdZDfmhbNxzhFHluJEHrxhoujHuAUWwelSVljvuYjGU': 0.0001666944490748458, 'kiMuqlkVnIgDGdtsCdlXnSDvCiwhdAzdTLythmNZnBijdLJAehTPUjNseIqRrcymwgPleaLhGqsQlLjFxnXPlHapJaRYbBwfYeGxQWSFjhKDS': 0.0001666944490748458, 'mhbJHnLQaytlazqMHgxddPrcJQGqSEyErVVoPXrRNBPkGHhQORIjkcItCB': 0.0001666944490748458, 'cxRyooUcKABnHZeOhkgiTbuwDDMrQMGYLMxmdjQIHTTSfiBNdaMeMGzyjJGuRvhxLpygeGKCDNHUMRRceujqsqiPm': 0.0001666944490748458, 'RkRCWlMcPZPdiEKfWRhATLzYsqoPwq': 0.0001666944490748458, 'eMvwUGTIMFtyqWLEKYJaGSkbKGThAZhXWhLbSNmhXonxkOQUkbJCiziwNAeHSK': 0.0001666944490748458, 'LBhsZuXcFFBlZRinktfmrwBxbZwsOpKDJmZfTjicFSHRdAW': 0.0001666944490748458, 'XoAbFKRldjDDbrDGgPhfgvmmyBoVwYkSatNDUXjGGfgyxymrcQdUlPPPUxVQIvDkQqTxoPCXjSalN': 0.0001666944490748458, 'hPIYGNMeYRgxdhQcrzNLBJibMUzaja': 0.0001666944490748458, 'tuYgpbKRdyUYbCxsbAsvnaDPCXNygTVsbHdesVpiPWpUSUhiByHojeMOAOhwecVGPkrqUBRKFYLuRiZhBzjjQzUzSNJShickLWfAVQwxOuPjaHEWetMHBj': 0.0001666944490748458, 'vBNvmJudxTSqvIefQbMxWpQgMjpanZuFOzDdOCbXYwoOURvysLpNwRaimqsnPtiJz': 0.0001666944490748458, 'GNuSkVhZBwePjXknUinWjmswxduBlhzysMkqEplvCQTyXEKNsTYAlXvtwlkXkvSplXYrflwDSvouyWHsuKxjaUjDJc': 0.0001666944490748458, 'zUzXPOdbFGaUSNJiYSPUUyMJBNfNpFroHKBnmexAFdiECPwNoconEM': 0.0001666944490748458, 'mBRAAzpbPqEdcJBoXkvqsaAGkUstfwmKjTy': 0.0001666944490748458, 'lCoTqqudDcKxcBjGCJKjyNDdekYpXZIAnMrDCcrIgCrfGydXTiSEkwSRmXCtbglAnQTJZ': 0.0001666944490748458, 'idIJeYtXySOTImLaEJMxSTzgqHHeeYQEBW': 0.0001666944490748458, 'PrNZpxjBjiPZviZhNKeTlvgzPpgnSwJxMOJnPqRCWIICyaiaWhoxoyJpF': 0.0001666944490748458, 'EzWQelijavgBxtmuTiDWCMpKgxzmorpuvVQsnAEVnsmblPaBfbwGzXyKzjltzTPFtQEeVevhZMfxKHbYhXMxTQieYoBfqSH': 0.0001666944490748458, 'saeWeBPOIUJDZXSsEjPobUKPgNqbRtWQdGbBVpqKqciGeqELMPA': 0.0001666944490748458, 'AXyFGJpMgazyLLlominPXakWiAbVLQxFYENmCLSrauGBDEQjoaRubwytJqbmNtWFXIDKcbjCWrdkONjgInZHeMaCqaWXk': 0.0001666944490748458, 'wvQghC': 0.0001666944490748458, 'NQsYjPNFpHfoCgVHmiJfnRHuDDUoNFmaRhwHhaMJKgTYNItAdaaeVNukAsSmNwQqXTJptqHKDaLBDYayYgmCJRGxAof': 0.0001666944490748458, 'zsNmljfzcSOLoyFhZaTrfdQyfKhYeplgtssCmzlBYPbFYdMeXxRzIRTXaUWsfABWlobZZpwPreWQIpVWUhPWKBHun': 0.0001666944490748458, 'mwDYctioEzmmJorcsbFPabmStnpSYbSMpfgTOXacGaqGuAjxtSRPNYJiaqeetTAHOYHCKqJYmdMcPzmNWOHnihYQqyrlOwF': 0.0001666944490748458, 'NNqjVY': 0.0001666944490748458, 'qtwXVOWbVAPfTJmTyGezyiBVkfOxoNdMLXMgMoxyWsbPbNEwWPwbJyDeUUrUxcNsVOTkduabPjnbNimxOjFguagrLFjUZnImy': 0.0001666944490748458, 'BKBklKvhmsdmFNdllvCglhxhXfZojytgg': 0.0001666944490748458, 'VJykXP': 0.0001666944490748458, 'zWSZBnAfL': 0.0001666944490748458, 'KgcAjtLKlefeSvLsLKGbTbNDutKKBKJkQprslfQyURYXTfahOhUBXlaYWwwBeXOfLibetKQrVTSzXUemeDAgRQRUV': 0.0001666944490748458, 'oehpiQlsgEcyGKNkHWWvEEKOtQHHDKmQYCkiQZXfFCpSPEQjLzxbsnyzoNDkcMJfCrjMf': 0.0001666944490748458, 'vXRcfnwLFOPnRjUHRnIcrYoctZuOEwNIdJISlicmJfVObNQJVz': 0.0001666944490748458, 'gEkdAwCPpZJuTnLdCyZXXFNoMwiNXpTUbsTjrwLvgrDRhENEJIlFXcHAoJBdipNMkcJCeL': 0.0001666944490748458, 'vLlTAPzmHOWCcCtecSNJCUIzzIsNqOsaYRsUKNtyDqWlmgbCWZRGczSfCDDwqKkLvriVdSBqYZYsXwfXQtnyrAQNomSosSEuhxmbugPcfCblIPvnWofXvP': 0.0001666944490748458, 'qzgdyyGJfRrHoGmRPxtWmGdssjwbcYUZQKzzBAwIjliuPrJDfQRNjxmaihyvmmAXasFlQEKtRbuwEPBedcWxbcAaEKmizILmSXdaykDPoDfVDCjWLNicO': 0.0001666944490748458, 'KLTfUbfYhLVHkPHRjiuCOzRobIFPgsfjwutDBWvsr': 0.0001666944490748458, 'ZZfJiDHbXGyIrYU': 0.0001666944490748458, 'OVXybILVnJpwlFaeyvtFIouBlkgzlRynDnoSOVnOCKYmPlWTKtzvDJadhbhNwvWoPmEwGWEIPdEKh': 0.0001666944490748458, 'ohUMgnVWBZnrtSSntfLhWaKuophzoMcAmcxlpZVPvXiDuvQZetNOrZbxeutgEvNRKxGohVMdJthNDksTmEJlicwJjgSWNgtlKyRkqbdFVAMXMRYpmGj': 0.0001666944490748458, 'ETBlImnfMqIaTOMQfBLXeJAUczSqZewqzferlEUNrlNGavzgILTdiSWCwHIvEJejcimZlvakxBtBxhvbRdQzVsoFoFWSunPGXHkJk': 0.0001666944490748458, 'cIrvTtfjdKHeKZaGvoQZsJTcCdFnmKFmMSUHGvVoHaCEbixeDFoPamAVdFKkiujVjBvIdMhmm': 0.0001666944490748458, 'mOJqTupgoiEtpxDPPPcLpGhPLziidRIbtziGefnDungzQdyvhJsZtXFRqUwtNphsngOGJrYoiFWrqwBbJiwRkzVQXkoEXenZprDSeqbJC': 0.0001666944490748458, 'KKjpUGlfDzFEZLSjKMeJYOPJCnPJENJnGdFdFEtyZnoZltXLawizglppDq': 0.0001666944490748458, 'JtsRxLTwcXCRCtFYTZjrqBgCNftUQuDjsdUizvirGKhISdbTHrMJGguDboOKPRTKlP': 0.0001666944490748458, 'JtbMYdnCbkSZLJiKyupVvvjkIL': 0.0001666944490748458, 'osKxUQuKFWMFLwRpHLeqczIKkcCmOyJbMvtbDHKtOkUmYRK': 0.0001666944490748458, 'iqmamCdwjBHiWzVmYCTJayqYuNivmTyMCDbsObpURWhiqdLzfnauZlilXVNzjYQowhGqWFYtvpT': 0.0001666944490748458, 'xRETpXyqZgTLSpGofgXpXCKXxPX': 0.0001666944490748458, 'LYsYfuVUbRnXpGV': 0.0001666944490748458, 'icneNzgpWutdX': 0.0001666944490748458, 'ipUlTyQqZFUcDUEohPXRIvboKXT': 0.0001666944490748458, 'kbwGyZoFRchNaCOSwjYHSKA': 0.0001666944490748458, 'xVWaRZKtSDjKrTPVObXjLZaIhTzJltGqxumpaVNDsBAbVYcuhjgQjzUMSbynCFxTavZsVMNmsRoWKWYoiXjrpGeFBoLiioQHAPxZJIzbHRnlvrYRL': 0.0001666944490748458, 'mGBuAdfjgBtTjHECzWbQAXZxxRKyUppevzqzITYYcgJzOBSsFntyhVIaKKWYBIToFIwNVDDwuYaIfZurfIMuSQctIKRgROXhvDnaeWzitzImlVoUGRCxE': 0.0001666944490748458, 'yBJFGPqSfsntCZCnhzJWmippznIoCC': 0.0001666944490748458, 'ccaCsdNMwKDuSkNRqznrTbFgQjYAAPGFsUAuuAvPFzPYEARHujAFsu': 0.0001666944490748458, 'aKb': 0.0001666944490748458, 'IStlerNFVjrpsxndAcMbiSvuicWeUXgqEQBxIFNMFiDgco': 0.0001666944490748458, 'mDfWJf': 0.0001666944490748458, 'qeYNOWLzrUIuaCdxyajmcqjCROHmFUoqSAlrqWvtIWVPiljLNcTRCXmcFEzPyplTMIEzuAJCdcgRcrVODFdLWJmIgUVBFSnXiVlChO': 0.0001666944490748458, 'YMNUoLQEgqEnmWfCDqeOmRyqnBCuwxUNDmNnOcpHb': 0.0001666944490748458, 'kKWndUUxMHplXyldjcJrfSiveAcPJifcTppbplHLZkWZwfrQHXPiAEyNeL': 0.0001666944490748458, 'tyouZVBtup': 0.0001666944490748458, 'YJybQPbXcnaAegiyrVlFM': 0.0001666944490748458, 'mdNSIlAFrhZ': 0.0001666944490748458, 'xlPhlGfJHgZmSbRiXFXaDIreAOGVSeWyTlJDnlHWDusMrkFzQFJvwnxugBAWnpPTCPqnNblxVG': 0.0001666944490748458, 'aRPkaKAyLEdpaxpABEhMKMHOVPhMfMyEfugjnmkOksKkrMwgTWvAXCMSzYmOHVUeeAHTorhCuX': 0.0001666944490748458, 'XfqGcOmmIVhaQjziNcTSIaHHnSqWcuiMDuCPuNxgKTzniMuLcpuPbdfrsGNSC': 0.0001666944490748458, 'ZdSAwQBaZwSfRijCtIcpolpcAFDYhHpAcaATTXISZPbgbUeWCqLeSOYCUiiUqqFmhjOImkCLQaqxXamYLWaWwMKeB': 0.0001666944490748458, 'KbytKvUcMnRtjYdYDilDPJXSLpzMBgHcSROCetICeydFHLSoTupjWMNlCOXVV': 0.0001666944490748458, 'DwPbswfdgulMcFGUxzFXnbsgOsNrgDKoeJKxXcWTihtfaWyASRWBUOlGDUrhLJhOQwOamvrDmQhdhWqZpMjVQNRTYtAEmwF': 0.0001666944490748458, 'QirTrWuxKceDnKpQZcTQUTYtBTErRMWLqOPeUKBrkQtjsaatNaSBhALuLTGtGteWOInKPIBYi': 0.0001666944490748458, 'CYiwcaDowgMCczqyzKDiSYQsc': 0.0001666944490748458, 'wzxrIFxHHrAbySUARMcSciDAMsnjzXtHkXIVCpOeWUzdXGtgOjStZbhIhQuUhddZuEcwHKNPKAAeLfDIJLKzQf': 0.0001666944490748458, 'WABYTLjiZkLaQRYNKobjxqAOvIjXzCSIKiIbSOFeVDXCXdqfZIYsgVuuhmVvgDMEPcdSPfXuER': 0.0001666944490748458, 'HxhCVaU': 0.0001666944490748458, 'mosoZrgQJHNpHulMmJIhRuwBSEcpvZhQhhwncucupGrbEhokGNIMHMORyBAPszQlyIeBxEyJsBMNSBGVpphvvzRgxTGfqzNsWzkpACd': 0.0001666944490748458, 'UMoGotvdzDZbHluBtsdJGLukvdOasnxcRAcPzRbDnKXQQWDGgjeriSrtGeVSOhmQmzErqTExKptUmPDSpiwigCsqtDuQWAb': 0.0001666944490748458, 'sIuDKdrPYkzbUIZWXdAbaxmTeRFDfosNsUCRoP': 0.0001666944490748458, 'TgYsuGTIwWdQtMDKEeSFKjEyWNEwCtchvjYyoVxhyzgFiiXLVIyJlbcZndk': 0.0001666944490748458, 'jULCRUBFGmjpGDCdVvQZFQATSQhOHKcFCRuRGZqYRHwcMFlNkWpoDVphtTaLmp': 0.0001666944490748458, 'BTtQhlYwjATLlgLkkHbQexgDYfEqWRJAMLEWJ': 0.0001666944490748458, 'DXLuEvOdDENAIIZJppOvttEwWJWeQPddxZnsThDdpnacWZJAjXjSoZskLRUrxtttTRuCdsJfsoqEULKLMgOzMLPIxVmDCLdavKFhfLWzLvkhkhQHO': 0.0001666944490748458, 'Qz': 0.0001666944490748458, 'oUsvsXOjqMOyukOhdevANlYlYPaIFzMTzPiWAZQViZRnOiIgPGFvHOFnjb': 0.0001666944490748458, 'hfiyqzHKaeEdGxmvxKbWbXemKLuKgddOlBbvZyFTEoKAVGOyJRKIQPGQJadaPyTPhKU': 0.0001666944490748458, 'XbqxOumMNQQYtfsdYFSjNfDTxMLsCzAJLVlwuGUyPwpuBO': 0.0001666944490748458, 'cnOMKCxMiljhCUCqoycWrBeOStaPSTRCIpLtUODXzhMyHIJNWNzRTPeybjbAFclYqZcjLBwgnYraEv': 0.0001666944490748458, 'EQLNlsDPmoZNQeoczniXbWLuzgIxnZhLhFkhNmWmCdkhXlSmnwuojaeiFyGLBraoilV': 0.0001666944490748458, 'JLyKBRS': 0.0001666944490748458, 'TgLPdbRGlUsxf': 0.0001666944490748458, 'bTKOEIxpgxzJpTJYGrqGEytPztadFusoOfDCreVRCjuvZWbsvJSeaYIffqSodbwRdiAlfaAhThoCw': 0.0001666944490748458, 'walRWxmZezaSDVmHzLtzVqbYO': 0.0001666944490748458, 'GZwbcbrFXVPMsXiTWiSaIJaWJOsYAGdfmzXbTWBZNICKVohapzqoybqobzcvRxtibCRNDUukBzkPplOEromiDkHuiRGmoDbTaUjgpKsgweUvKO': 0.0001666944490748458, 'ZnaLgyKUKFPNqyGRaQlMnr': 0.0001666944490748458, 'IPhXlLaHGhCrY': 0.0001666944490748458, 'nKUdgqPjCwqpbskQDhXFGBqWkk': 0.0001666944490748458, 'RMFuYGCVOnMPmspisRTnnZQRddSvdEIZZhhjuIhmAqveWtXLJTyqqWOriWYfwsTiz': 0.0001666944490748458, 'UvxqDbdJOsJXnbkoWXzBXLHBqMULPXHPnxVgDhGvMwOuYmkxSaGnJwthCUVwzbeOMfwWbMszkabADdsYcHaXOMkGZitNfDLpMag': 0.0001666944490748458, 'UoZgGwgwnOisryGmRVBAxPLugCervdcDFbasgomiXzwwxGWZDwhMVRUEeJEemsKZhlIpbMJiCCqIMnYiVZEqx': 0.0001666944490748458, 'WJzjqcPtjlOtEDoJnsbVF': 0.0001666944490748458, 'WkXBnvvjukvFkHRKZjRMBXasJUwNTIEpTFDMvytWzHsBlIfyxbicedbeeIzHXwBSneZBcLVSpEcsDIyiQbnUiHeHwcbXCBatAIbUaHWjsn': 0.0001666944490748458, 'KNvFZPFPwEduPXKcPoCblaGLqqJbvQpDbBpneILXENZdFICBW': 0.0001666944490748458, 'oCgCPPPuIYbNnRnXlGwZPfuPrvaAPQKCLDTipzyRSQGxKqIcOhWFulm': 0.0001666944490748458, 'CFBAILaQtXHMp': 0.0001666944490748458, 'ueJLMvBtJh': 0.0001666944490748458, 'wDrNbWCDqcjtUZUZLavIAlaabDFdqofgtaNrPrXDMbchJrqDRvahbDnmpsyePluxiWUBAxxRbflSNJ': 0.0001666944490748458, 'NLmtZqQYgurVFtmIiOAWenUXtzmUcqYoOchMTDlHkcgZUnQMrBMVjiQgMNZgtXopmdgHCmqBdnktRDM': 0.0001666944490748458, 'TjgyWacOANojMzGIjislhJIbxKJVuLCHNppVqcwKeztqHBZfQlhubBvKlmRmuDDxvK': 0.0001666944490748458, 'HipfvkeYfItwCfYjDkMfeGneNEgglyAmHHWfbvQcHVlkdBkpxFuSbVVSBPlxkt': 0.0001666944490748458, 'HMpDvOjHSMFbPkNGetSPsKwzocnfIihruNyhBSLyysPpHOnIWNhEnClxIkFBOHSTZJSVQ': 0.0001666944490748458, 'lFPivDFhohdjcukaOnaPuefQVcXxeciVBDcyBllIYS': 0.0001666944490748458, 'YYdDsVDWedvNCXlDogaIymMjlLjQidKJWrhwVvuBPZuSsUeqDXXEdRxBDwvJXhrnAdpICHJRpkUICPANJNUXRJYdbCreUJLxSysAwmmFcWaVUPHuUHjJR': 0.0001666944490748458, 'DvaKAiiIuXQYMAXILHFksKeqLQdJZuaLCScdQjCmQnaVgmywgKyZzgrcsXxOHgwTuUUwokZglCXoptWUhZWYWNzauLacJEwAAnIohslAVaAFtwkgiI': 0.0001666944490748458, 'iMzmFBGqhsrvvesZlsSzobCwnqmGtNtJoUHobbCwxlJBQXdcfDrvmyJrvkymydgrpPS': 0.0001666944490748458, 'L': 0.0001666944490748458, 'WbmHdmTtlJRuXtldKXSCymxdvbSjDWLaqngQKsJHYiVJQrXqpRDzhIOkykNSdk': 0.0001666944490748458, 'hbDLsQucDiqKrfyXQFzfypMrayeOiLboPERBkCGMaIBOjWPYBptSfsiMqTrsgYIarGFOokddiMsZTSpNcNlmLWxNkc': 0.0001666944490748458, 'fFEganwCcMJnCUy': 0.0001666944490748458, 'UESflHmiJLLOQFLSqzgprBuXDDUoeWgJTlVRbwiBpErYKHBFvlxqvXUngVPvMfufvPjrsAfobDFUIEsmWJpdzfhAtZbAbbHZhtqjfOuTSRK': 0.0001666944490748458, 'prPrWRCoeYL': 0.0001666944490748458, 'rqIuMzHhIcSEDYidYAFSNbzHVOswQtCSnbLGNQjYLrRZidJvZatuegXYmVXQIYDeUBizBTOEjuuSfBhWwslZXaevPMZLiQgGlwyruAuflNqgxNQLz': 0.0001666944490748458, 'bZfljNqdqTETWehYYKhhHsIdDpkrcioUVEZqVwFKDHoYDpwfTDSmnsDGpFctUnmWKXJJmfPXuBlPAxzDmGy': 0.0001666944490748458, 'ShiRLDJRdMBqCuSjfZZCrtmNyAcAxyzCItFRNmvPoglLGLSqswNZejuYzpyCSeONUUNvTCNtLDWxkpzekcP': 0.0001666944490748458, 'azTZkOihhdmZkGelwkmjyRBarMzNslJLKdGPedrATszOXByDePyZrwIrCDOLOGeKaRmVTIBKOgYbws': 0.0001666944490748458, 'vzxvZmfzXKlhgTBYMqNgWjSyPNLPOFpfqezYdstyARVWcMgXbynphdUnOb': 0.0001666944490748458, 'zcVqbVKynS': 0.0001666944490748458, 'KBBiOMcQeYtdhFUhNCRmeK': 0.0001666944490748458, 'nyjCdeILiUvAapDsFtUfDH': 0.0001666944490748458, 'KbinEsIGysWjnO': 0.0001666944490748458, 'tMEDzmBIpvkaTEoXRZ': 0.0001666944490748458, 'pTEUtsUcNdmzPJkSDMUfoDItAFoQRDRhBfpLLOOIZtWdwrsgnBYQuWNVhtzDbmvvCoDuUIpHYtrFpXAJMzoCSAYmnSTjuMyxKtTNRncfPNnXOaMGVALFsX': 0.0001666944490748458, 'pL': 0.0001666944490748458, 'sAtVDRGRFGAqJCjeyUjsjsrPZyYOvscPPpWVQnXVnUehPyhhnOnzReXWOLGwKZVkFVyuuPbfKQsZQUhiugyZKkTxCOXVPHzrMVjNkWoLmAYcZR': 0.0001666944490748458, 'bjMyaxcvmSNHbJtfqlXzobUmWLOPgbUYeABikygYHFFkSQYbqzfVoUxhYAYdczulhZkAhvOVPqWJERjlbVDDthVPSodsCgKDoRsesoJmrKhIvCq': 0.0001666944490748458, 'EerfFNzceBiCwZZoRUopHQWEbQ': 0.0001666944490748458, 'BKFyuWxewZtRaykwnUnNgUlupfhpproTgTHca': 0.0001666944490748458, 'KNOilNmHbVJouhbJAcukKMDRUtWaWcYdXDnZzRD': 0.0001666944490748458, 'TRmVbsyxsukCsCnEfEqcJlPBqtKTXitydNABKRSYUjMudffmZGNiSNtYbAXSPumPNUiUeCRKhLtFhVVJyZGRdOJAClgASikVYvDAtaddHZkeZtDOpOVPhV': 0.0001666944490748458, 'OiuVXrEyPgOsUXCodVZMZdvAiTKvYoHSfNZMdnXCLESDPmFMDHHZUqyAfXzNjSNhCWNHWdCuBNldfsVWGnxUnJaJD': 0.0001666944490748458, 'bvRUVLcvOAFdyhhjxRYpXTqvpKPAflIlfrJIBwaYrZBWAPHLGeLYLWCuPACBCeHiTypQgNp': 0.0001666944490748458, 'LzmeOMbfPLwdbjJewtqZULGRaVgRBAZDXFnvxHaAhy': 0.0001666944490748458, 'vFMjUAoxYOerAx': 0.0001666944490748458, 'PseBnKahJTUdqLUHQeUBSZXpalYmxe': 0.0001666944490748458, 'DdoRqOmQIJOpVoudIVfBUEbVjZadyZOkPdATboeEVccAJPBhSMbrsDKWqwMuqRxNBToTycUHBjFzwWWPEEnmJ': 0.0001666944490748458, 'unxenAdhbYDCvUuImSFBV': 0.0001666944490748458, 'DFahENPoNTfnmkuBLXdpBNFZzPFgPVHLveGkmPcglDuUkJsQGCNCiKNNHKwRkLUwQmQPIVvTmTaNV': 0.0001666944490748458, 'upHImeXozbwIHrafaIOwFHQdKQowoZeLRcuAubizAcvnJcmYqCWkzBSHBTIdoRaFZuIGVIFGEKpBXEvIkxSxrGVqyHvwRPZIWJRidjWKcA': 0.0001666944490748458, 'OVIiGtWSmRPgNnRdaRtwgLTFQJEEKFVTpXeGqKQgNvqCgGggyGlrUeEdOIpjeNWzUcZCbuCxCoQKNMHcjSIyFIcrWmTVfV': 0.0001666944490748458, 'YtpWsZFQaXaYynfWAtFWIMGCdrycoGLHtqoyruZGDmSduyoVkzrHnSzROzPeQhTTzUQdqIHFtyJatLenFYrQgQpVlYpDXWqNdmK': 0.0001666944490748458, 'iHFOsxkDjHgyenBBhBYAfqAizTDINXkfiFe': 0.0001666944490748458, 'gQsEnCuvlTrvQxmLkMoJSJeqcBJYHsyNarOrZAAFhUSIWCmTXrDgnzzydPiSSTMAWYhtaupnefYfg': 0.0001666944490748458, 'pHXmDlPzqnkwBmcItNcEPysjYvhqpTcvHCwWrRtJueGwWlDydK': 0.0001666944490748458, 'YOxpuRnvkaQqPxMTL': 0.0001666944490748458, 'TpwDNViAWKLLqgexQiNheHemUcyKmoTtIsNqbbOYvyKvpQILQlyoNwVBHkKdwHtfgNgVAwMDhmUSexYXhAg': 0.0001666944490748458, 'napzNCVsayDvkDgnFtrmOsLObVdLwOWYOsAoxuTRELcBfvkDqHNBemWssr': 0.0001666944490748458, 'OPTTHvqqlOxEExZTGxdfqJDHhFneNdaLUkdeBamsXACcVgXEnkhYShZYbLZnGQPzvQiHHVERmOvmGXFjyECDMsByhOmhRPQFwwsYG': 0.0001666944490748458, 'jwYhuLSTySlILWQcLoOZSdRreUNWwEB': 0.0001666944490748458, 'MuF': 0.0001666944490748458, 'bAiAYdcsZamVavFOafsjMFbkYzHwWaxTjMBTvyYDywLfGUEquhBAtWDfRwFhnLBPsIwGcDpJVdhKEhxclwylgrknjTdPWAIkhqBJgeKhgjTtejHzoy': 0.0001666944490748458, 'GsFcaaMGXmMsNprPTWUldswlcXLssYwrBkSCidFzjfOqUhvGqCpkrpEmjsZoWebwwtXQEixCeZeWeVMecWQCgNEjmDDgRLTfdL': 0.0001666944490748458, 'AoLhkmekzMHKEtqcsAqfyXgCrkRAJRmMlzacygYHhrPQJhnEMdacBvymzdedmO': 0.0001666944490748458, 'nNmSeyzvWRDsOuqJEENZMiaYhsSCskzpnr': 0.0001666944490748458, 'ckwYlMUYjrGagoMasQQZDUugOsbBWwcdYLwVteBhtHcNBLbxCDGUWctILPyjChleGtCRTTnAJVVcSLTxWEzxjMjCUocsSzcNfmLMhqpEjE': 0.0001666944490748458, 'sxHGMLeNfZqDokWgvWGsKJnCzIAiCt': 0.0001666944490748458, 'QxpYlUMBGFJmvqKgBAXCtgVAUsiCPtcmHvr': 0.0001666944490748458, 'TwzRiPWBQ': 0.0001666944490748458, 'orv': 0.0001666944490748458, 'PxzCGIavOfbFsWGTItKKrnAGpovLlImgKLzhGiLTuGCuCqSGZppBuWWrnuRFAZYDypiHKCgkShN': 0.0001666944490748458, 'SAeIEBkSSeqzzMNkckXUzTMyOjDPlDCUrDYThHOhTC': 0.0001666944490748458, 'geHcbwodfgLLIkTrgACVRMfkjisprYKZKcMcMbAFNAaGgzWxvQUfbMO': 0.0001666944490748458, 'FBJzfDfRyQbUjaPDBlgHoEXjLlqSUNybyRNGiqmdbTZhaXonAiOaeBsUlAXSlNXolGccSZRHpMmOqiOjvNCAUCXOUJJWAQFuhpiqsmdMRBYHSNNVaFGUuzd': 0.0001666944490748458, 'RjFfbGBhEYojeZGHliVOZnrgfsDXPodqwqNutgFwtoSNydMClOYCprqGgEoYbTIhzuensc': 0.0001666944490748458, 'RBzphXgfJbDfuHdhRIlKxAnbAJaJfpFEgaAEdedhdPiZghaZpCRVFygDzLlpnKywMTHpHdr': 0.0001666944490748458, 'pRtqUCMDyoYipCmKbBTpqUtvYrEwAAtZlBgarlFTwEnThrrFZMhoTfnQvLxdYYmrtUJJiAERXiAcdih': 0.0001666944490748458, 'rtEJZqltRyweIREvBsDBNWjBjWhSkvksGGdTyZb': 0.0001666944490748458, 'QKQmaEEJxwIYmRRGhFLXjsBzfMUreHpLNTOOBMNWwbQiKECnPusXBiSgCmc': 0.0001666944490748458, 'ESLEbmHgSpM': 0.0001666944490748458, 'Io': 0.0001666944490748458, 'WIoeBrHQSXgmkIdPBHeAgKjfaknGgKqwVvlsmseZjDVwUXvVkDripJnOOCxwoQjWEMhsFoO': 0.0001666944490748458, 'cxFyRHdmcFaRpotqKIqnckAstTMohFqyzKCQWLSmnfnVqqWXSkdyFdPGWxrpESNUIhhVisWitjnDb': 0.0001666944490748458, 'jnxjDAlBDmAcDYEURPfYDqVSOfl': 0.0001666944490748458, 'CPwMaXvggdidgjUjwDkGIcpOgSoyFfAfxsaHtwjhgBvqdWmApKz': 0.0001666944490748458, 'zMuRPLXKEiVAEmmpsUxKyHQVvDfyftiduKSzdvyjDaOwmtDbbBWYkcRvJdpiFXHGpsawFnPKXXc': 0.0001666944490748458, 'mXnVyHctGhfvVUzCMDzcXgFIQehessbKeYYPVSomYagysskgqTzstGsPByRwGlPRIDfIXLSZJLGhfINpzbrExmfiNvSotg': 0.0001666944490748458, 'IyzZUtOHGuzlvIoPnHgVAUjTiDeFmLCWWRzsCRwYmPudfbbELxLrEMPcOBszEEsBDbcveOvJAfthlzzswHqrOHWPjrsmbktAvDYLXatlLsAqFJ': 0.0001666944490748458, 'nzUlJVM': 0.0001666944490748458, 'iflvLVBxZZDNcqljrOBcAhZLwvIUPSDYhpxBIQpvOXLKPSgZNgQkAp': 0.0001666944490748458, 'BVBhndKtONxFRiMenpPrciINpkzFbqGANeaNDEdKXWOqJjrpWQIrLR': 0.0001666944490748458, 'YAkotjNtGnaZqvgmCEUxhUivDAmDFxCJvBpwtPZADaGErLRTCqcTcDAxgDCYG': 0.0001666944490748458, 'wNXhAMcDckOpKPFLagftEVQDOlgGG': 0.0001666944490748458, 'nM': 0.0001666944490748458, 'VgBMUx': 0.0001666944490748458, 'WJYhgncKXQsfcKRhafnDZgEyWzioRxEppffYOEO': 0.0001666944490748458, 'MZjFxbdTdKKamKfMwHBgUcHGhIiZbsytNtlapmPBQfrrHpJUpReZalpiXDnehODFMoEIUvqmQZYffzDVYIIcoMVebnHWjuDoVBNajYkVrMWkGKYvOKtMZ': 0.0001666944490748458, 'kkLSQuLPDkOPuEaDWSNKezXlDHLrARvgzqIIoGmgRCEgxxbejIdypLyFOLLKOD': 0.0001666944490748458, 'eLMMgjbpOdQwlOv': 0.0001666944490748458, 'JvFlxrnZuUncmHRFbJBlgAj': 0.0001666944490748458, 'OKvhaoZNkVYZRjqYdKWlyiRKhUOqslIbPSSfBiBjEsBFwFYHoIOnvAtnkmuMzdPecgakpbqaJtvUQZZqUzpgAPWdzakEnO': 0.0001666944490748458, 'yarZgeeVKw': 0.0001666944490748458, 'sdcJyubtY': 0.0001666944490748458, 'yElpX': 0.0001666944490748458, 'vQGhQQoCwbRaDztQVfHHnWsYXLLYNnQvMRnCcHhvWzzQSbCjygQvEtHdcauDvvuoblAauPURLJXWXXioEWb': 0.0001666944490748458, 'UXZByOWIszMWHlgpwqKfrByWjRUkqEBjkGcuHhmElPoEdJnAnaduTGuctLhDkbaNnIzSbsSlIDJKOLlPfCqTpAVxXEEpmetwxQUyYUEtqNFjxM': 0.0001666944490748458, 'ktDMgURcnVPONvGPGLFGSVNHeIbbQEVeMrcCdHcbTovueYJWnqAxTDldEHlhuBOxfNBfeTlIbYLNWbWpGHynpFdjnJmxunHhVMwClarZALErF': 0.0001666944490748458, 'DmkiJPLKuhjkrjoSIwFHNjPlBVFUPOlXqwIYiXjStDjjTFSBssVCAhmIbHqECsBKinCqSFUAeAkJgc': 0.0001666944490748458, 'uZquVzmVLwFGWVlAfRdYwXlphVHgPUgKDOxESXJJOMEdKLiqQqSnKtRsCttWN': 0.0001666944490748458, 'jHoKWYvPaAOrYagQgSmivZiUyXPQbfzXAGseNvPPcLXSrWmxswUjdjFQrjavZIeiNBTwGzcSWSldXxbLfKAmp': 0.0001666944490748458, 'cHjqgbewdfVwZKKcWmdXWbNgsVHVxtQtzv': 0.0001666944490748458, 'SlfMxZxRuPpuvNnWAIgfYkrJSLheoyDbkkklekNPKqJTlklOmFAlgmidbbgyUKIvdZzKe': 0.0001666944490748458, 'lDgohZKUCzynRQqISha': 0.0001666944490748458, 'GApNvEJYfIunAuDnpNQqnDGGJcsxZekPMLIMNrIJYqvyxSNPAtihRYzjtjAUkhbBwnE': 0.0001666944490748458, 'JJcYWdVTdOPWDFaGFaasdOPcZBNcVYkzZdoUkMsGgIfWuPKSezmIfjymJkTRLrVpQOrvplitRfIQggBOr': 0.0001666944490748458, 'YzhPjPTRYBTquWocTjOkgzNgVMxQGwEDuolEXtWNzdjSkFnvMbo': 0.0001666944490748458, 'YvGZmVBFJeCIGeoQDijsboF': 0.0001666944490748458, 'tsNRDPOTpCefQkfWz': 0.0001666944490748458, 'xIVWiIHOBSvXqQKelCitZbxxXlCaTNHqGBL': 0.0001666944490748458, 'KaICwglDlInMrxAXitGcqIKOM': 0.0001666944490748458, 'BZemzondjQULPygRqDJFGHLjCwGSqKIIGbUPwskyZqdapFnnyBUVVmATOcjwipVUeSDYPkuKbstiiBKGqBCZMlXbiFjLdQWIihdhUXbaMCufvhU': 0.0001666944490748458, 'gQfyUNSAlVybeTNIGB': 0.0001666944490748458, 'xEsKcfEgnwAgjysUqCHMwOsLLPHtAgLiUsfWdbIREaAqPkaxlHIbqrwKVTXUxPpAZfdgjMjnVUAPubbfaU': 0.0001666944490748458, 'uBcbVeywlwibBGmNOYTbOehHBdxeVlwdQuHaNA': 0.0001666944490748458, 'ETtcWjZLG': 0.0001666944490748458, 'LouwWsOhpLjITrNhOQGZPchcyaQpownzCgSggSMETItXAXCOsOHuLxakzQlrLmy': 0.0001666944490748458, 'kQIylxTJVOCxYIvjDpkHDF': 0.0001666944490748458, 'aFQOifWTLqrhlhKZVDzAhuYOJAAnPPTQFcmwd': 0.0001666944490748458, 'HaMKwiLKQpcfVDAIcwjgzIJcfaotEcrVrVURprhMUbXasoaXpflARyKehUWbidsufhzAMqfwnyFjowbfCIttNWvzpfzREKPMuis': 0.0001666944490748458, 'vzARbZGIgDULTvSgXvRIPIjHqdUDZIvalfcjhFftLrWMMhlhYIOrXWUwDCraokeYqHomHMUxZfzIisvJf': 0.0001666944490748458, 'YehuuxqoVsAJvdxyTRdHFmTrgPYiASiUPyiwDuThHrsYLAdZioQWYrdRqmJQTSzDqVtUvRfEPIVyfLAtvVmNbrDWcvsSDnBCmwQlZUAIQfkgIcHXYm': 0.0001666944490748458, 'cTMvIRerLWpnemKYqWuWdex': 0.0001666944490748458, 'RaCUUJncgPXHNeKqRHwKPupsJtQVBrubLjaaJnOAnmxuVAgxopToGiMoIREZxyvCx': 0.0001666944490748458, 'PXUdEiWPEKbBjMHjMIvdYyCRpqRhBHHsxDlTJoidFjWfnJkizgZXqiJKsgOQUKrIgMFXOxbHEjYLWEYIRvesicnvfREgKZQlABdHyNRdfQLFa': 0.0001666944490748458, 'hqyPMeSdPjbjOUDpeMxfQaMJwsAxXTjgNyDIlwbvSsu': 0.0001666944490748458, 'xCGLlbaYbryXtSBCpbRlaCpiKhXwZgUiqRRGEWWIMBxrlqYCvWBIqREEYS': 0.0001666944490748458, 'QCFOOmnJwxdtnpWBaZJSlnHTVvQ': 0.0001666944490748458, 'HxPROSMDOBWIeyELUCcLjqjYpEHxrsQRW': 0.0001666944490748458, 'YHXsNIIQYZBBfQvWmlzFujqwRVkcu': 0.0001666944490748458, 'RTongvLNQRlMQqCRFdtSwstbcrhMrzPIVoWyevwkjxW': 0.0001666944490748458, 'AFjPfrlCPYjkDMbVpZiYmsOdUiUcVEiBkRUsgQSGXAhGrdNKEiUOwlYAGjZkfOtHEXCFKGzctCTzDgYodhtbOEZEXVoPKbXCKCn': 0.0001666944490748458, 'TVeAvzClYzicvkjapyuAtfgSScnzagdicNfUBhnNOWZFQMmdxqzGOeSnBoOXXxHVDsBNelunrIuomkHYvOfABSzPapl': 0.0001666944490748458, 'oJtNPFwfo': 0.0001666944490748458, 'bEpuFvXHGmYLfosWrBGBWCjmEtMClCybgvOKiwfEXaANOyXdnmfehczHbQdsCbGpwoT': 0.0001666944490748458, 'irNcPepKohLyIUyVnRQaZxRcLPIsFGuOTCgdiLogoqaxadDQEyUNnRMIvhDuNhHjnooQVhUCZ': 0.0001666944490748458, 'MQjvjShksrIzEbiSKrOeHUYUmZqPfaZPljhbMxwiFUyEPmDkuyddCJdXleyrDENcWjfwVxlqocoAUyQcDObjGsqUPUveUrRJLIygMGAXBTMwLcwGr': 0.0001666944490748458, 'regrTPFiOxYQptAQEjZiAHmZKFPntuwcdXTgYDkdlicNkwIQnPQPEUDKvg': 0.0001666944490748458, 'LhIUybKCxIUTiOXNYEnfSPjnnjosDZFvdANqDGEkkHAGTauRJUfGhhwEwuUHBsKROOI': 0.0001666944490748458, 'nCpCAKYDsUptIVzqeptheUXRjSpanJjKcAYXQpKmTDO': 0.0001666944490748458, 'HlNgqqANdCEIytYbjEANKddJFoOmGLqbAclAWBpLhcxvoMtFdQhNvPzVOjYgvdbSFLBRAYNhdzkEOtCSSBxGgHWZcZYoOLVaLVS': 0.0001666944490748458, 'HOxtlSnIPNkNKmTmEomffHnCfdAUFXcJMsumLnCHzCrMbfNKTYtoJRHrMLXnfYtvqVBEzNXxOLfBCSgcbftCnngIXAUfiPsAFabzLjzXzSjEVg': 0.0001666944490748458, 'JvxzKXqkjXCVulQdotpXspACeMzmFlWJOWwVWYURbhfyPhcXUtzqJzrvTUD': 0.0001666944490748458, 'IKLMJCeUrBoaBedWSvLSIZIUtoFFXujBULsitVMKKrjCQZBnqCsMRayvTIhACtBkw': 0.0001666944490748458, 'RgdbPnQjnuzxqvLSsKDzdKgFWWoedepQdzaVFbaYJEklZNMrnmn': 0.0001666944490748458, 'bBpQpMpuGYNzsxMzQnCTytBgaqQpeLzYgEtIAOmStEMiKZkCHhTCGJMdWuALbXnhrjNJCmEEeawVBfaxEb': 0.0001666944490748458, 'HhWxvClrvut': 0.0001666944490748458, 'uzHCl': 0.0001666944490748458, 'bdquLWhfYycHMgNtZVYSWWVQlpysTlukxRyVBilSpAJbavowpwEheMzYZOoloefAEhJvZWiSCBnooxnaqaOHrytqzqrUPnYapj': 0.0001666944490748458, 'mSWjfXXGhenzxMOIDCFlPqEFJhqivejwXkHLCDFIzzEnUCGNxtDTzJSfHBGCezaTBmYfXNQvOQcYZOaDMxFTU': 0.0001666944490748458, 'zCGpHlFYLOxAqJWhQqfKUkyGJnVCcAmBrMCTixtCNcAlWcjqjxaEyguDShNXaCUMXvSOVOApXabwUngbdPKeAWvWJcyZsMVtZAwSdzKjrfq': 0.0001666944490748458, 'TNtMMlkBtdnxKRJiZcPQuuFucbXBopekvgEjDpCvDgsmACkjehylNEGZztiPNLRaoGKnjpirdvlheIFquSDJFiuayxedfeGSbFMUTHATt': 0.0001666944490748458, 'GvkxGgVgVAhRzoGdqdeQLEyvryFDTheTihUrwzYyKVvxLbepAHxejFvtKSILlNdWpocGAoaHK': 0.0001666944490748458, 'WVpSSMNGmKhjspKyDkqQRQqYGtoCVXRIfQGoayqwTDWOnUupcjobWqoMcZALBDHguQwjMAnVPgPzKbtEJLQtSubFJdJlcTxvMouKkchmdD': 0.0001666944490748458, 'AZRsWyTfUUQqyOgbWSaIIwgOmFgLXrYoFMGQyySrRnDWFGFAktUPfkYRQseXsvUusvA': 0.0001666944490748458, 'qXsFuMvbLjXTLYRaDWltiCRQacQQYqCLmvxckEOSACBYpshmeyEuMLImNWLCbNnNQxWdQAHlUEqGcUzPfZIfaPRfGQXEAilCHrigWCjxmPOmhNB': 0.0001666944490748458, 'BnVovKzkqsOBbMjkCMpGftrAOgRqXpTJavWGipc': 0.0001666944490748458, 'sZhlBlWRPdMMTzkrptXyMYLdyjUckmRERmkOi': 0.0001666944490748458, 'YmOkIekvzKlfurVVYAMljuxgfwEhmvKpbSFwMeKCvmibRiWITwGNKYpiDxHTmcmNaaqCLkLMVUNRaUEMkEqRpDeFGznbbXljB': 0.0001666944490748458, 'kGOWWXoNwgcLWRiJhDwQtwUqNwtbNvxUYtgVaecbltdlXziZCnIpvrYHSXeDrJjnEnuTzZMYiYytuWHMnItTLMODcQbIsTv': 0.0001666944490748458, 'wgoEciBwMaqKyCudmssyCToSIFlzFtedNIKHLaWeHsCLPMEHuKYMaycvrbyHxxRCInnwMgeEGsvXlBQQQo': 0.0001666944490748458, 'ByAPSsYTQeGzPyEbeREEhfeyXQfetMXKGcwQLkxByIOxlizID': 0.0001666944490748458, 'yGnRCkPEjyLJljkWOnJqXcOlPFUzRbmQaIWOGDzoRo': 0.0001666944490748458, 'vWWbfQIwarPKPbQQzTLuhiXxEdPwqlYKrgvLZWSjVerpHorAVVmJUTs': 0.0001666944490748458, 'UnDMLtqlmjwSvQBiwrXBpOTZpNnOBeXGFyMEKiheAVNsYutEaZGrwC': 0.0001666944490748458, 'FUDZJwgRWkrCoCGfxOBOGMqOcAHJvNjqIp': 0.0001666944490748458, 'inVjoHApJQZURyZVUEKJzkWWVpuvkoQXwpEhqFsPVFN': 0.0001666944490748458, 'FjzDsVnOXqhwQrAScmWnKOrwzkHUIUkskKyteuUrbBZpBSGsIYARbRKFRAHWFlzFCMlsqmdJkfoPhJvUvhtrIWkZPAs': 0.0001666944490748458, 'ABJnoLUTzDPxLKaQLZXFdLenyQtOEpGkEvhdeRjkzcsHoQzRrAxmxs': 0.0001666944490748458, 'tOfRmrjAlCssAQgeJwdRKmCHnWeifrbSaC': 0.0001666944490748458, 'gPwHvruDsAvOGOXbVQgtUHQuWsIastPdtGtZmfHHTHqUnqRNQMirNVfWpJXZupCKlLBlwyqrdncUaEUMUsoTirnJzvHwegsntGjSdeIaOkAMbfY': 0.0001666944490748458, 'WDlHOffXbrpvOKmRavJNctZbeDgdlbwrvXJELulIS': 0.0001666944490748458, 'qeQcGGHqhbtwpaozAOAllVfQwkIBFBWWTevMhOgeklVyJzZFHPFuQnbwZaJIVQdRHvZnErmctvNAYTwRvAtjeBqJkQGnKEzUMLaVvVKdCJruJTzzzHNGHLM': 0.0001666944490748458, 'tIOcFGfezdanpoBaBIWZcFtfOWJMhtAvT': 0.0001666944490748458, 'ZrBhOcqoLCzcwFhLfpFizHNJaInrAdLoyRKyNXupmMvnwWvEnxuPLrxpBaPeAunKKoPAcRZQfjBfprKsrgw': 0.0001666944490748458, 'fnLVhbAxRbFGSwnqxCbwYU': 0.0001666944490748458, 'hWPUFcJJhmrydshunuumurFNRESCMHtcKYXeZdbcTIEtDjpQrkXiKdWiuVUjcnHXnONqTNtNVKgTBWuHxwWtnvWIO': 0.0001666944490748458, 'FNrtJWoWkzlZtNcJTUbBdzGePoTCTGqpHZQBCUgFJbALQRRRZqoImKGbnu': 0.0001666944490748458, 'jMNSwXEqrGJCxH': 0.0001666944490748458, 'tnI': 0.0001666944490748458, 'rjsfueqAgwvvbDLvMchkmvgohYCpHOhtZkMyvrCBFBjzyBbXWJNXgwjToTcScIRCiTgfeGKCuej': 0.0001666944490748458, 'ujTbpmpewPNEknectgBEfIdOQEBrGMziyiLDLPATuOpeAdTZDeancyytISGliHffu': 0.0001666944490748458, 'WWgULhLCIRjAsYswrTP': 0.0001666944490748458, 'tbMqDFoxGxccXNiWkrZcZNzsIswxOS': 0.0001666944490748458, 'YGlezfjGbhLzIwbfcZPfxBLiEwoNPFPybUMeRTeVISWmgIBngpqbhVCieHTErcUCdeZXQOIWnAznffatOq': 0.0001666944490748458, 'JPUPUQLmVOVsNzxvtPxGcihwocBhKrtqDZCSBaizmAwRmGCPaWIcpu': 0.0001666944490748458, 'HPHbxBxCRjfqSyxVclUrHdEfUiNNF': 0.0001666944490748458, 'swHgbleELMpjjbbGExeNBHMQkJclVxJeWeGVUKhutMOAdZfwYiRrodbAaOSWDwwfKBwBwemBScnoddsVmBScBotPl': 0.0001666944490748458, 'BBwbQvfcLJTBgp': 0.0001666944490748458, 'FXlnGwAUcPziEXyBvoaFcEpFqhehGGvEwwawSwFPuXWKWQJLLTbQNy': 0.0001666944490748458, 'ivIdyv': 0.0001666944490748458, 'JHf': 0.0001666944490748458, 'hfrbHqgCBWyrrbjSvAyEuqmyzMnTrwE': 0.0001666944490748458, 'sEYeefVkKWADTkVSiWHFkZVPTvKRJtBdFGLLElEGuUgrvIVVObnxfZkDCQNrrdwjDQTBhZhSLiQRPmDJcaQVNabaYYHirSpSmNROyfc': 0.0001666944490748458, 'VutlAkcUUKuHpbppTSWFOvLXJZCfBoXKwqfFvGXprOLDNm': 0.0001666944490748458, 'mDRKEHbllsdCrzdHzvjrzZpjbguhtusFw': 0.0001666944490748458, 'SVuDMdwPHgpJLUHxOLOEvBeRRsWFkdHzgLmJXwgXTxLDUutiGTlgmBvZbjlclVObnbyjylCFRlKfwAfghWnWEIoxtIkIfnwCngu': 0.0001666944490748458, 'qwYzGTwOonVwChpRfDERSFTZfMvQnwrDuyfyoWuPOUU': 0.0001666944490748458, 'SGthurwXQNGXqqJqdOToXcvDZxeYzNkBkFCKgWqWe': 0.0001666944490748458, 'KwuGMI': 0.0001666944490748458, 'sBIhOgErfrpFpsuWB': 0.0001666944490748458, 'CCUSgmzwBmZwvAl': 0.0001666944490748458, 'IHyTtrxDeJXYwqnuBSBtOxuzmrRbEsVtuabLFNEhjJR': 0.0001666944490748458, 'rOQWyuL': 0.0001666944490748458, 'TTZITrPSKfypAXCiQECMyjbrYpalugH': 0.0001666944490748458, 'DxNSJSDDKn': 0.0001666944490748458, 'AtaCswIWVYsVNTCPJHGbHhRElb': 0.0001666944490748458, 'RwqQUVAljMuhMAmnPRJNwOoyOQbJtRPZvGkHoolmpZwCnPm': 0.0001666944490748458, 'HStAOxCuyKQgzKjUQwvoGievYBrkj': 0.0001666944490748458, 'fzMyJQBvepfkERgpdhbKnAPvgDOiVEDUNEXOTReaqIuagaW': 0.0001666944490748458, 'naUIBEfBacGrvhN': 0.0001666944490748458, 'rZRVLjTAbsKZJWEWIpgJAQaniSWSQwkRqTPJYJZcJpXcVEtoYSOtfEwCIXACge': 0.0001666944490748458, 'TmABMISzcbjAzzmxgVFsnpncPQAyVfmRVfCxOzMCgHmioPIdBoAsoEiruKCDuUACeorTMvLuLCYukAwfAKodi': 0.0001666944490748458, 'XlQAvsXzgDbzLdmXfDzLlgbARKtmBMjZsKKFQTbHtkXotdWfsANoEkoWWmpDkXMySLNDKyvjmePJRCLXHVgxaLrypRaYywBpY': 0.0001666944490748458, 'PmyPMrwMDroVKcIAaSNQdlvGVTxnYmpPsvGWBWXMNEDHZtUYYxeSHXMQBXojlflWHOVFXFEHKuNjXwHWlWnjNlHEYUJYyjboxqJztirJkhw': 0.0001666944490748458, 'gCPhPOzbSJfzAmeWTOswYwjJfpXvqcpBBfPXrTHUMLKKudwAyGqdWbGYTpHHELNyCwdMWHxOCOvgIVkuS': 0.0001666944490748458, 'IaBHecSeiSPwHUoFsAjHDFKWcqePHtMsZgFJCNBnOZraUR': 0.0001666944490748458, 'DvvaEHLAaEdbxkcJBwXsl': 0.0001666944490748458, 'IjctkeXZgxrFVojGINojYTQoyHuGZrKNiMMHpWgvrrXoUfNrbTMQcWTOLhanOudRhymuXpuhghTKjXzxqKRrABweudtecDkGl': 0.0001666944490748458, 'OZvUWeEZhRFYYSODYWtIdilJoAbvRbvOvcukzBOZcMfBFshDshevvNUyTQZRROCorRYkThRkOITzSiNyjwIHqmQqoNCsYQIxqQIMAURbLWtNlD': 0.0001666944490748458, 'AWBzPeOCrDDqQjmGIlRieiupKLMCJhgoSRLFEGfPveIIjjUsaIWFTziyPOSSMkxIWLNKrhjDgsBJCpLKgQkpxgxtpFVkbDwswIRnmWa': 0.0001666944490748458, 'ET': 0.0001666944490748458, 'rlUdVTskIXBjYftqUupoQKtDXZoAVDVBeUigOZGdjUjsILOMMnbChwLzZMQTpGFGwxEXEvFQNyerVaJQYdFIaIvJTlQwsWiFqFJvTCdCbWBPhCNfEM': 0.0001666944490748458, 'NqZrsWvEaQoEwqYzjglVmKIyLjZwlosfGluTRMRZxyntWccUkmVMHFnJJqBUwcRAittzvWuBBeRpxNGaZpWfzJ': 0.0001666944490748458, 'xPzorhFUJdzNdVDxlPsYPjIcuHOQcxLELRdwBNzWQjq': 0.0001666944490748458, 'xNeJfZeyqTEMEkCFoTeBLFrQvUNDkdIenenZAuRhMKzCFlcEYyFLpUolGGdgMRdXEGrjrUOCjLUnFqUrSgEuOpJ': 0.0001666944490748458, 'bcyzyZMAYmntWAnuYDFWserbXbhWmWYiSDqnmEpueiAdhparCPpjNMBqgAYFGGytlAGRJqlopNMVwIlfTPgjvr': 0.0001666944490748458, 'BguqIBuyBJNEhfelPoqwZTWLwnOtSZKMUaZZTNjhYZudmtUCBoLcIgbjuEmFCKQcKckqrrYxZzSktptlflBdwEILdlOwXsgOQZdfYS': 0.0001666944490748458, 'YSWwVNUJlLiAKKBRzwRzZrIFaHzNQIPvtPuDXOnthtMbpbLYYQoRzkOHMNxpdvexZSFOaFYzY': 0.0001666944490748458, 'QSguRcSdQpPoggqAJBMyfGXaZNGQomWwmrrlkJAulEiLfgqmaRDjNXjQlywMsrLdkLEVRfkbVMyFhacgxpZbRCd': 0.0001666944490748458, 'YGGVUWqjXlSDAwuVuJfTBcmFHnVvkVzQTBwzRsDVSyIvvuyVbcJxnmnlNKlYesY': 0.0001666944490748458, 'qhaBcpkdKaomYEKDANiccLwPJXtxxqP': 0.0001666944490748458, 'BjSXUbblnBMsTpgytmluS': 0.0001666944490748458, 'DculCHiGkfDQStdNzTlgiRcVtTuSmHwPOGSpxQAtTgTnTNdwEKaDUwhRNkLfcOckdbFXALatPmDAtLUMUWGFiE': 0.0001666944490748458, 'kQcOyleXVmGNgxChQLLazSyHumwbSoVwQiCXValkHLQbJMnhCnVbgXVmwyCnXqhyKdzoGieGEvBDwCeWvYHOoDBfsvitosCVgsbQToZAlbZCaOtbob': 0.0001666944490748458, 'VqbIIWSEiMLTGEbcUiiGbOfYJvszRXFBkDxSKxtLuETMbrcFYsSslWGlPQpypRnTK': 0.0001666944490748458, 'SbMBdbanrtznqJdLCgiLigHrwzYHqkKNxwkpMYgOaqBuNgtfDefUXZCKJGzMbfXjnksnatnpgPeDJcSmOlhnwxIpQIlytXktFeLQcBcMIHKoYntKQy': 0.0001666944490748458, 'sfobWyqODqSIBJqfGWNLbGmvuuYTndbVHIumoAHXNsMpepZqFSETnhnkhJtWvxsGAsYKeRsMlnfFHYCiBnojzzriiseLBuFhXPVSFLTpEzETwko': 0.0001666944490748458, 'cwCXmQzgr': 0.0001666944490748458, 'gXUDkIMUxrazmfDdMQJDregDVUIPVzQoiBGztqUmLZ': 0.0001666944490748458, 'MoEHXIKzOrLupQzxVFYMhKrWczhncXGgkCYfDkUJpSfvgvhiXYpgAqpWWjBPWuQHwUkQgyoOBBUfEAmpnGlrxTpDGnJbLbwOfGcNtsUFbxDcopvJtQG': 0.0001666944490748458, 'oKQTQMZitCFrYcDQhVqtsVyElVehOUYNOIARRmtzHihuSZKtMjvpgTj': 0.0001666944490748458, 'lHuPMufcZcWQeIYXCWFPXIgXcfmVIZfGUqLqAphvFeHTekxZnnpKPMDbfoImbdCVvPzpDROBFVVnbmMMzyxvZeKbL': 0.0001666944490748458, 'NQbtKBTTCTXmCIIvCwLrdZPuBsjvqRWRGEcxoeBbDOmIzUgJVSuRDuIWAWneHH': 0.0001666944490748458, 'IROpfeMEilYXeLhVzVOAKyyRqgZfXEtOQRapVYQwfqsMJcwEVcGasEtFSRHTgglgpjBdPEAAkGZFGIeQlqDmygRyEStnAhGDrlvlsGK': 0.0001666944490748458, 'PQVrTvCgmkrVtPaYaOnKcCZdyGlCbjpCGDiTfsUAXGNsuxjZXTqKTUCnPMiBt': 0.0001666944490748458, 'ejPbJPKFTriwFaPlZXZGLMDGjPszZSmjEDPcQcDmNCnuvMsGvjnxCucCdeKcayeCNfuIDgQyPfVvGPDOpY': 0.0001666944490748458, 'NoJCoCSedtRrBpclbScOCsrdIQPPgXtznGg': 0.0001666944490748458, 'gxxgqCUnHcEqydLmhwoxHqeWXQLCvogXsDQDDWknkuhVjOXWmeHWQpmDlTIBlfmVKbseumJqDRDwYvcsuQuHCiDWUSxoWkRAirSVHFZdsHaANvwVPiFREnV': 0.0001666944490748458, 'TPquLshxgBEbCfvLWsnaa': 0.0001666944490748458, 'pcRJNNfnpDkavGwWVjglzvMWOBIwOpCGOZEsrPeVRTDnhpXqHKKNbdWNleZDCqjwAGNBFDjRykDOPerBCcgmComeCRUUPAgOYePUPexuG': 0.0001666944490748458, 'gVHoKgqnecDSSIEkhMMMUpnYPwWyTEaMulkDAJpmpJaRRJFdesOxHOeTGylRRiOxGylXupbURWjsMuhuaSIXQGwIyRTDxOHtrujBgUf': 0.0001666944490748458, 'gsalzfhEAozSDIMGGmXJtKKTLLrLezPdoSrxgdhBQ': 0.0001666944490748458, 'ZurFfwvlZqSaBTPxuOfKVOYajBpAEyGnAZimAEmoSvXwbMLseqCHEWkJBKyMzZPSHEMtyKeChCjkaBybkUYsotNzPEaPoHpsWUoRmgqcAlHZDRvSU': 0.0001666944490748458, 'UakJLOPaNPbvmZsYdcclgGogpYgmzKibWHQOlymbXFXCKFwuFXCMzrgcDYCNkpZQDItXlCaKZFfnaSvZu': 0.0001666944490748458, 'CTFXoiIhXnOGxIDbHEArDZmjnVwseBCByygWKJW': 0.0001666944490748458, 'oAuEUWMlsewXTYFfcDeJQfdKEhOtflDdFTtQbqHQnFZZgMGeelfdUCqqFKprTF': 0.0001666944490748458, 'jBCmzVehgGRcRkDuxNiveFqidRauJIRANcmgAvqMAymdDojGaLBEhsNJreveZDSHqrhCrvdLCMQWYPLSkfLLsejPwpTzUwqQEubQLi': 0.0001666944490748458, 'zSYTthlqdovxQkMmiwWkovmoSIfTmpaZJmiejCpKUKSggauEbKOVSPXgoDPgaWArlJwdTEE': 0.0001666944490748458, 'lfyNmfhTobwwCShwuxdIGuvzhN': 0.0001666944490748458, 'pdpvwLaYYPypgShUFjJDGgOiouJnhgIjqYSPjfpjoVjRtyaCCciKkxUCzjCpGpCTyKcfAHlPNzMctcCxNztccvtRfgPDLmPFip': 0.0001666944490748458, 'XYxylt': 0.0001666944490748458, 'brbRugfBFBZbupkBssyLDCSLGvapRpYdZvhSjfEQSg': 0.0001666944490748458, 'rFZyIdXhckgDqZWMWTzTrbiYjspYcudCwJilSTvGPinnHaTnTQ': 0.0001666944490748458, 'dsbpHPcXXAcawTymbUGzLIvAFShXJvaFjDIUCDLERxOSGWNkxIKjwLRmSr': 0.0001666944490748458, 'EcoyboZvYsTwdQcYgNNeKMcVRQMXHNZLUFyOgkHRSMQgrkcBDucsTJkZvazQfIPgRPMFlsz': 0.0001666944490748458, 'zGnZghGNwVLABwgqzLGEZGKsSBwceepTIbLHgytZSIywPVFTVzXbZEkGxjQtIASBLCJFZItnAXPIPoKrrMpuxPIrPntccwjsJYTCWnlCfnhBSoS': 0.0001666944490748458, 'iJLisCnozdexNqcdVNIyxZDxRvNvMtWrRVKcjyboPZxWOnGYPwWePdaxjVrSWHYaufLlUCUKZEwrfTtiCVVrF': 0.0001666944490748458, 'KhCPk': 0.0001666944490748458, 'cOVpRRfJfwrlIPTBxeenlrdTKkYncxZIFaIEYbRKgOdiMtWeFFDBMWz': 0.0001666944490748458, 'JWyqhIgljRbllUtMkzKgcoAlDEDmBNMHdNrPJkuIcFLRDiHrWddTeFlraFdvuAABxFOlSTrMBlxRpdOZZyzYdZykpfgIziOBLWItCw': 0.0001666944490748458, 'AJYdQxxzbwLGXFWLvoHDRcEGaArAqyTIeTrnUUvbzLaxsvDjcJjLLQCzrIXQWQoWVrUVvUoZuOCUbRTcHpCvevKnEkWVKhqHzXcUfDfTKeuEtxrsuBS': 0.0001666944490748458, 'zkpkaEOtZiurSBiKhBrQlhVMTVEtIItLGoJTTnsJyOtuwoTvgtHaaRmRNNOLIvnnmUogBBeRWa': 0.0001666944490748458, 'jkPyxyAfhKuxFdxluPYfGfRixXBkDeoXRoyPvElbWuWTNluGNFBlqzufBd': 0.0001666944490748458, 'agLNGlwDxKoiofIPjhNhyWJAyifmbSUeKveeYmrTyRUoljLcxOKoSRiPCNGZMwJ': 0.0001666944490748458, 'lQYtfNpkbuwiWNuoExFixgKuSPzXBEdVOslmwcvUaZYHKoFmRhvnAGNXWClWXvLMYCLWjGnZj': 0.0001666944490748458, 'KGAbJdDmTrdVGoVvgoTpSTkvNxXarYehnrmNdjPSGJ': 0.0001666944490748458, 'kB': 0.0001666944490748458, 'jmCXYPUuUnFFfOdIWyRYWStPKExWVApmnqudDYbWNfftiSEKTOXdnnNAKSSJjCtWb': 0.0001666944490748458, 'mBPuGfKzqyuwLXEMWcsFGEFOmlzTtyrgnGMERGXvgYBClXiIxjXANjOirvWrkjhdjSjsoCbSpZYxRLaeOCotbHXzWDRsjK': 0.0001666944490748458, 'iZZbvihLmUHlSvLBviHzujbHQcJSzozDNzWZroVSHVQtOipSENxMWznyFwEnxCXeFXifUtZEZJhnAxAxJV': 0.0001666944490748458, 'gVcGbBcZEalRdm': 0.0001666944490748458, 'mOHukofzPqpsGYqLrAMJrJvkdArSczUZeOGKVwOfqCZxWdyPaWUdDhJvfBUvsxotarThBhOKJVIoMolvJvokenHydWGuxmYHPDEuAUyWK': 0.0001666944490748458, 'psbCZBCrQqzdGbNBRLLlqIPCdoJRDQBkriiaKLsDxqIWrOClUEBUoRDzySGCRvaOZoMEPeXlBaoFtSkOd': 0.0001666944490748458, 'qnlEqlXPNAYJwRWDvSCYHtuCSRQbkU': 0.0001666944490748458, 'WLfLAJyDFTWPgFULgGHxpuorxLAPMEERME': 0.0001666944490748458, 'VDEdrggXL': 0.0001666944490748458, 'LfjwPfGuvneOdHDDDQhMFKYKjrdfMgikLGIvGbrzHmsfQDcvOztVPiRyVUKOoKIPLjMKTBVIwGnLXQyVzYXXMtZhbAUsAgXVcNmOdvPGLDlZtUP': 0.0001666944490748458, 'YyRMZFqAEYiXwgmDMPJrBUfiKmPkdJoNEUqIQMC': 0.0001666944490748458, 'yycvTCdjNnbplfRFtqTBMYmrXmlfUbIhSlkxtiIdqvRmzXBIB': 0.0001666944490748458, 'iACqmiLtwrhQJkQhWWzKXqqqQ': 0.0001666944490748458, 'fZHvVYQefduEgjFfHkWQWkcKdltNUZMNkSwmMQBtrivVxZafHnnnZWDYQPWFWjPqAnIwj': 0.0001666944490748458, 'enLCTgJyGYPdIEcPMagVswXaTHrqSNGrfLlhkTDhZrffTBPxnvAzJdYxQgbLfIeUuGpCDGuUMBykgrTwlhJLSqzwxYlRfFuqClBCJzeQOiNQlHyhjUnwF': 0.0001666944490748458, 'iSJIexLayFuCaAHHUyITYoEsMFvfVuWSEemqBcBAJXfzsMYqhoVhaHuge': 0.0001666944490748458, 'EwRgSeBiqLJjpYRUVeRVfKKXYgXsEpEdrPOAcWyLzYaplVdQLNFfDPNCsxatYVNGwmBNYSnCyeKOEFPOjkDrXoOQDKGjxKTlZHahxXRFGYgUdNzYrZ': 0.0001666944490748458, 'hfcqhYOjfkUfiDHsdZAoSkknU': 0.0001666944490748458, 'CYXmBQmXuWdBzwgxqTA': 0.0001666944490748458, 'dpQsvWjjyHldyeQzsBwGmNzDKPLBBvOLDBwedOkzISSzwzdMffSkwLhPHldAECJxghoSjFufuwvUEmSfudAhkfPuiakGuYRHQDTFSrXaqVyvfOZ': 0.0001666944490748458, 'tXWKuQPeSpHhu': 0.0001666944490748458, 'OLVic': 0.0001666944490748458, 'ndtPGWDdsCXiJBOrUPQ': 0.0001666944490748458, 'VuZSCOGRAlhSIdlVVnHeTocbfQkPzSBGchaUKiRLZCrby': 0.0001666944490748458, 'TSOxf': 0.0001666944490748458, 'tXkccgoZnGqGqFSHJDZfJcgbbEuOVnGTIfGtpTTpYdPXVMOthnBSNVCKBB': 0.0001666944490748458, 'nMrAMpCjhUyojWDPqOtwnmZSpSeHGNvXKufiiErtmroeSFBIWvZYrHkMp': 0.0001666944490748458, 'FvxPhDVOxhBBlLTKZVWmhlzSRATXcCvrt': 0.0001666944490748458, 'xQqdBIomjQqFHUchLHbXLbUGlotmlrttfgLEbZubLNIirvIjwaJOJsUxKTCWcDnEoEFvIarGyHZHRxSgrknLpJnMkR': 0.0001666944490748458, 'bYwMiqeoLzpflKcNeyFrCzBOYPVDuZmAXbGkiuYNEyOmedbkKeYWWmnFQRPmirttygpGIVzUawGQLa': 0.0001666944490748458, 'nMmJsmPSrVXOD': 0.0001666944490748458, 'cEaApwbPyesklkegtYijlLebzagenEfaPj': 0.0001666944490748458, 'YaTDCfDJDeczSRdQhBjcjkpzoPiEoeriRTWHIpeWvhGFkr': 0.0001666944490748458, 'ozyvIwFzwnrRSQoopMwzqXAlNjGhDuLthVkfnCFlgTPhCAhYQyeLPbcNXneYWdcug': 0.0001666944490748458, 'KcUHhXzUaXqXPNwADBmGqBoErCCoSSBBPoooWzjMdQhLcdlGHXQqdjxlvGRDvjqcCFsusToZmVhshhZTXFKpxPjQyMMarwBNvaaozXvsvWaEdMWnUkz': 0.0001666944490748458, 'zBgEKkdIWwGQrzMUgIbBIbLMxMTBDGoLWfvgLRjzkYLCftL': 0.0001666944490748458, 'DLCFNgeVyylTdaPZThsGDNRWJJkOXWKQXqYauAPpyYmeSeMvB': 0.0001666944490748458, 'UKFTuAPRcFvFAuEisXRqtXFnoxomsoQqXVrCuSHfkLPiEpOLftXsxXawjUJYIFINXonUFlcDbaQPwHusdxVumPPgAGYZeTFeivAAcWzOsjPMjEfByppeQY': 0.0001666944490748458, 'nmPtpCVBETmVFyZYLSWmlckegVQvzAwNamudesuVagmrVomqiXNFoT': 0.0001666944490748458, 'PbbXSxMALVZDLRdUSIxTPkKOOajEXxGBawsjIZbkmjRVrRygqhfdMvpoayQjk': 0.0001666944490748458, 'IrfZbVSgrdR': 0.0001666944490748458, 'ceuiYjDvNLfNZGILlFiokTjOfVfbkXCgNiAxidhctlMGkxdpAwbUSohdGnOrkxMuZOHHXOgqhvELpDYrHEhtCPLddTULGmtxOoEeJO': 0.0001666944490748458, 'DaBecqXZudAcCWMLJPQgAttkhLJXlqdAofcmiGosDtyFfmnaNntnwDlkQJEojBZTQHZFR': 0.0001666944490748458, 'zGxNrViUlXxoMsYzcPFTogJagmXQt': 0.0001666944490748458, 'gZjHUigHtygevxbLQqtCuUl': 0.0001666944490748458, 'LjFDcfAGFJcQKTAOzxyzDknvnEyopPRanasMsvNEqSzMAQTmjeILrxrCoCFgHSLvQitfLXtdICqGQjpwNVxbNLbWJEXEEAqTmKXdJCV': 0.0001666944490748458, 'rmpbGcsRbxHwI': 0.0001666944490748458, 'ulsYhwOTVzXiAyRqDvYOoyJZssEAZNSymMPheBOrZHnAwGogNIWdlPcHgvJndjlZeziEosUhlbmwlBsZWwutKw': 0.0001666944490748458, 'XnvqQrugjmEAomgLIIlZZrJNMXthKrBAQRZeBUssgiDQfiFDryO': 0.0001666944490748458, 'QlGpIyWstTAFFWsWvynxJiLOSGDLRGfwDmHnAdTvZbUqYrqgKatoOsMKiaxfpjZzyxxQTmNkpEYRuyGYLSXzfVMSVAOGPiXdXotcBWGtNTbSZiu': 0.0001666944490748458, 'dPSfJUBtDWgaRxvBCvGaHUpTidxXdxwjYDCkSavRHOdBLT': 0.0001666944490748458, 'KyZekRRKpLvTeOMQjACIrHqWRkEAWYeunIyLZmoFcBV': 0.0001666944490748458, 'gmNVyLQsACdYRKpMMucxGlN': 0.0001666944490748458, 'VBWtgXMtgKslRMMgbmFBRvSiVMMjNVmAkAWeFJYfEeFCnotWxIJHAEIDFGLdJLbTXLOGXlCcoDvhvTOfBKuPgqiYAhRshrhHCEVhi': 0.0001666944490748458, 'wsEsRvjeszBTNZkDarSdqZuVOLhLEBKVFDALTKjQJAXuDNY': 0.0001666944490748458, 'PNaedHTnTeLbwtYKndTPzDZFhui': 0.0001666944490748458, 'xTDVEQqzuhsHMKenSOJCrDmZDCYbxAcoSZUqNwhNIpWbYUtNs': 0.0001666944490748458, 'efjsCVOWgLnJswUaRCFTafjqnIzeWdvGpgIEm': 0.0001666944490748458, 'ruDAqetwBCmFTTISlsazOkrVMjFcBucDMNQKtBAXHFAycmRNyTvPIdnhjXBdKowvjEJzekerTpjyoxEdqawLkQZtuuJDJa': 0.0001666944490748458, 'cggmOgn': 0.0001666944490748458, 'aamMeitFDZTvyPlVKyEykCNAGYPGGdFEsdNmbKpfngpinWQ': 0.0001666944490748458, 'cKyjATiXtegWXMWFeWINicTtyzwYRtdEonOMQqmvoMvnhBVlfkeigKiOvkGQWKZGEIVGCx': 0.0001666944490748458, 'tqu': 0.0001666944490748458, 'dpWChQYHzLetIwtQsHBndRQFrLNyeBleAxCpjRTLYlrKuM': 0.0001666944490748458, 'HvrIPtfPayDfBCUvfYTMYPOUsnVBNpsuOeXYxGZUwpkwyabJNDSmQHwMCgrnBSGqZTjopygddmzlwozWbILyVsjgfHRfRgOZEg': 0.0001666944490748458, 'OvVFgeeElLxpUiFNCdaaYcUOJHzPtxHXGwxJFxbaRUvRWPRlwGHnsBTdTrDQgCXnLGGzCALudePNOWdwSfmcOcKAXslpMKxEEElLyLgRptLFsrDGi': 0.0001666944490748458, 'hTGyPfriSJxcUSvuyzXoLKRDGrjLIrbmRFsYIUR': 0.0001666944490748458, 'iavtpcBmeRINXUMUqeoiinfhjwjKSUBLnOZlTbfQOyEFKgBFHq': 0.0001666944490748458, 'LwXSGaySUNGoxqMhiCSHRdAmYMEXlnGubUeQmBnyFmYFpDvfEJKvFPcstgNSb': 0.0001666944490748458, 'NZgrBOIxSRBYTsFAxrqyPnSBrSGNyUSPforqMCmTgO': 0.0001666944490748458, 'XjZCEwbDkuWrwmdCiItCxGkXdSowYCnWgDzpTSTtgneRvEoAGdvtMTmMGesGpdUQQDdwjCpmkhIfnXKsCIRZeXPUvNYlmGjALfgbWTcvvNvlOyZoWq': 0.0001666944490748458, 'vQVFTqaMrTisgX': 0.0001666944490748458, 'IySZZzcUUdZ': 0.0001666944490748458, 'lTOJQfuOOaoLjmyTrqWsqXHWjdlSNTsyqNxrvJMaCiHoDGLziWdKLZuDsoTobukRXfoStAKOoxilaMkZwPErMWgAEDIwtpBAdjMrM': 0.0001666944490748458, 'CgyFzQbhqljSTsfNDLuHEjdmJQDYogBfPbCOnqxOyGFCRHShfxWYNmHPbLTjFRO': 0.0001666944490748458, 'eVoXCYCRMMWqGkneDasODxkgDWhDDbtrKbVJcVpgGTeSIIyfvDkjiCdoHFvNNLclCqqEbYFXGLGptVpcFa': 0.0001666944490748458, 'axg': 0.0001666944490748458, 'vTjjhpUJrSNyXmgIprHDLGHSMpGreClsIVdmJUqZpPTzWEkPinY': 0.0001666944490748458, 'ucQKdQmlPityqWSqbKzgtSRNGEKcbGioTfwnDLQulpxqenwFSKFaibCPWEkpWbuyCOIROAVknnCQoIKKrbmxEwLHnserOGdstKDmR': 0.0001666944490748458, 'QtpsZBhFBIRCLfmVebvWfzJbyabHWUPpDJxYaLtMLhkspnZamyEiNIUQKaBdF': 0.0001666944490748458, 'lvHEmigBHNWjjdlZfwyLilFQhbjXbDQpyWzvkLWmoErKrMwAHAsTaVnxJQTadckuZPyeueddyEhWNrbCMUtqph': 0.0001666944490748458, 'fHUzMaNJwaVqhaJreNrMmkblTHtJdqSdHpVJeUOAoVgMvGBcTTbfzIikvtvDKhyKJHzIHimQRhLTSptUETcvATupMcH': 0.0001666944490748458, 'UcGUODysTOiQekRxTmKnubGJRcrVStcBAvWZFbMLiBMyUmULkZXofKFldAzPlkyusGXzvfivOQxMvQpRYfKyjiutdAypBDNCbaMYtVuJnfcWXR': 0.0001666944490748458, 'uOaprMQhmiJzpEqtFLYVblpDbndqQHliWCOesRjhsnFRDACEyDCcoJlSXRxACnzjygmgZIpFlvNWWI': 0.0001666944490748458, 'DHttjCAbgXCSMJlRdcRmpPbsxBhdBadDMzrnNhpugb': 0.0001666944490748458, 'oosEKrkEZWfAPMnbINkAflOxYQwwFMGJQQtCWSHObNYrBjGRlqqDo': 0.0001666944490748458, 'uCxlurfJKjzBFUpTbQtTDRPaEkPiPfzrkciCUdHlaHSXKFNpURzEMXRqebprwMNnUUKSghFwKbxsowPfwMlKFjvgZxopKWNQS': 0.0001666944490748458, 'WidXXTMFGxjBXmFVPrpwnbCcgKzWorGBiafgPaMMIZXOUvQwJkcrYJGUtiGkEkVfyTdCQOUcofzCZMqUeRaHtkhYtQTQhGOCg': 0.0001666944490748458, 'sAbHeIdwHHWir': 0.0001666944490748458, 'CyOpYbkBRGuZoWvzUWfyKxpgNSWYgSfHbZwzDTYJyxJffUSuzefcfTt': 0.0001666944490748458, 'RULFRmeYTfBGhbdCCBUeahDzZwkdJtmnjJdkliHhuYmnZvuPcFJsPpEiaKWclIOSEtodAvOXwacOizaTRQSqdK': 0.0001666944490748458, 'gdEvzddzFyGwLzbFIUvdgRVjwjyURgeeDjnWNNqgffzhnzNxVjLuhqwdkF': 0.0001666944490748458, 'XsrpLaZkfMhARmpMTQZvMNgCIzohPQxzUsJpPDutKqnzfWuWJAYkdkRIvERPmZF': 0.0001666944490748458, 'nqIPgOfuCHjhrNneXZVNGSeaxofPqJZWiNHPGdEOdshNeAlrOBvFOBqIXaMvKAeTacAZJjSYdyulkdaAzbVnDrgdMZTJuihlNTiXfqHIAgbUmT': 0.0001666944490748458, 'TeoSNlWABIwkyjIUSNtZWOENWzSqpEhyM': 0.0001666944490748458, 'gXauueZVWwyHVMEUXZpzwEZtQXTfPKPdwmQoWgCUwqWFivDPRqGFlJODGUTMVkkmmhTglQQbMdFlLeugHwcHaqdItDSqFhFselWmRRetUmB': 0.0001666944490748458, 'wuqCRUurpyjHYYxGkXHJhYBqDCXoATMvHlDenHVhdVDQUxEtvO': 0.0001666944490748458, 'NzgbxAFNsBC': 0.0001666944490748458, 'aCSJXlMGKmfCQHRmevsTOsrCaEDSHg': 0.0001666944490748458, 'LshFNRTNXppbt': 0.0001666944490748458, 'vrbWuvvuKuzgLVdhYGzEgaCoWCelddMKMwdiDjiti': 0.0001666944490748458, 'tBEldzVjfmplWuDMScsDGyIkPGxyMhVjmbukZSBtr': 0.0001666944490748458, 'qdlBmojdBjEcrJEGmevQaoCWSlzMYEqfBhUDLWHVBbUDdEOqmmzMMEgqvMmjYsBmUa': 0.0001666944490748458, 'yiqKOuWuXoaQpGIpUIcsRBYtNIkUuqmnROJnEPNoIaIvckzLMhUddgYpRlnQZNypCnsuCbxGfEEDqmVhoSgBV': 0.0001666944490748458, 'MLUTFCMYdHpvLDDFDzGGSiTYeVthFaSUCHRmCKckSdkvxY': 0.0001666944490748458, 'lAVfqAvCnkTcPoWlwWwHbbPbDsVGgfkDCHVFybyYHtGcjkQuNAeoWOzRcUwRpPapSIzRsUoNePBobUxlTymEyPcxVG': 0.0001666944490748458, 'k': 0.0001666944490748458, 'ws': 0.0001666944490748458, 'SuEoGjjrkHBTcteJUeBzFlOfrNZozVeiuDyri': 0.0001666944490748458, 'FY': 0.0001666944490748458, 'KEhQTmpAaOcgPMPDiCSBARUbdAivAkuglUqGJlBcmEFWSgpyjvohgPQQhcnVcIXYFadcXLAgqHHMZRcnfatROfIBQzZHTMvOrxqmqPRIWHuOqq': 0.0001666944490748458, 'fAYVbSliatNsWCaLraQBSAsQwganvfERaYkTJrUQbwkXjHffozlHhtiinQLmmYzTUiZIwnmohJiTTvHwVU': 0.0001666944490748458, 'Cu': 0.0001666944490748458, 'ECgTJSzJCmYdpvAkvVMiqHssvmYQFRSRjywgpYFsVOAAYUHkUYtrPhrpAMGMxSjbTpsWHXBiyzdmaOfCdvtkrJJ': 0.0001666944490748458, 'SMUwtgLVsOAQMCdLdtsqJblUmdgkNhkcReUhiLkbSFqEfFldXbFUnIPnXHPdxxNabKwXnynbulbYZqPjjAxvNfjQFvGLEfvWwGFisDv': 0.0001666944490748458, 'DLXkkwzKleoapuUrwEoeUtbtiSPPhfAyanmFSA': 0.0001666944490748458, 'zxJRWIcuZSKrcYbtauD': 0.0001666944490748458, 'qUfFIshbfJkYOdlaaxQ': 0.0001666944490748458, 'iubrRPXSOoDvCmSwzAxFjmFOEtlJNIJuRJpLJNWkFrMuPYCObFd': 0.0001666944490748458, 'eGDFRt': 0.0001666944490748458, 'UBAPkSWukzrPJuXPqkioYZDiwncunCbOm': 0.0001666944490748458, 'iUSOTsdMbUqCJHooYxhZFVxtFUNYTA': 0.0001666944490748458, 'UHjQuQCvTePgTmTJeIvHudztItYXtgNOARpDJahCBdUAOBswgwyAx': 0.0001666944490748458, 'YyEKIugHmmiCXZQQJzTJYBjeQYIiFRTNxonCy': 0.0001666944490748458, 'BcZFqpEaSkrsLPXQpvEjwckLFEcrkqmMFaVGKBOuFHhiHjohYXoDvKHCDDBhhVNkchxGojSxDFICYtSRzHKRasJFWr': 0.0001666944490748458, 'QgmxuCwiXpBXCeYrbGVFHuIjTfoSgOAPIIpFtFIGpghuFEhEAfqJxYlySyFbDOkoqbUkmqxFlhnzDlLdiQlSwAfgFMRoUKqOcXNpSboKgcpbrcmJE': 0.0001666944490748458, 'effFCzsMLPwysvUOQNHumQDDcQiVuyhZndezQLXyz': 0.0001666944490748458, 'ULwxFJCErGozAJcATIFcuvdchjYBFDwdBGuKmOIwohDgUdXlVQUqRmBFlLgubhuQfUcKJECDRwdXdvTXvurgqcefopMYQSVqqoNFGbsDUYaJKA': 0.0001666944490748458, 'LLuDKGEFkhGkNlLhuxxGtEsseixBKRZLHCxhiqCyRqGisWOOGQJxgAvTfGsBpxmVtWxhLS': 0.0001666944490748458, 'AakanhzuGLUnWmWvGTbxhwLPgqMJsAzynyjqvYfVtTlqkWRztcSkjryQVqEAEoGVtHVJVzBbdpSyEkwqgfsILpUYSdgxnEunKFM': 0.0001666944490748458, 'PSGtRYkkgYZgKTsGNOdzjolNbYjVYsTCJRJYUiHbGKNqyqmGQeLUhfIPHyTwwyJcGba': 0.0001666944490748458, 'mYYCxmgYsRrRaXCovGZBAfpWwBRirut': 0.0001666944490748458, 'kWkIdXKKYpdTGosMdkjVXmEjGhKSQhltlXAQapatuaRAgzeJtlUDnUHpcphaDLLrCzlrfrjhURQYrlnJyjHRAAMRevrumfOGfGQwYmMVzWpQUKm': 0.0001666944490748458, 'RMLYMHDfcmTdvocizRHrLVJfEaCcqaAVEWVIePjrVIMJvhIZErFlitYwFXXXouKRaPfRiODAnf': 0.0001666944490748458, 'lpcJhSxiRrLlSwmWASpunNxfNVTtvmGwrMehBpgaquudutxvbEKyPF': 0.0001666944490748458, 'IfSMQTyHoeLZGswOOFCcOsEIWTByXswjvfEUBaFvUcloyWkwUDLOeTrpSiEbMFcdjcotp': 0.0001666944490748458, 'JHxPRkikmQLGljhKdimMDYQEGzQVjvKWLpBXaIJSynUpoFKeKYtWuKHiwccPdCnxFkcugnGjJQiHFoYAebatqsuxGqzivdcuQQs': 0.0001666944490748458, 'YWBdAWiDAiTaiFXLnIJSGpfQmqPvGEwkYfCOBneWFytNkPBtNxn': 0.0001666944490748458, 'CnrswlMBUtwBT': 0.0001666944490748458, 'tVMEWBpumtrpXcNXDfzMYfIHBWweNFOLjOpklKQuezLithMvya': 0.0001666944490748458, 'ozVEYmiZUTsBpVXKxiakQWhfLwQKYgmRexizjwUBzyvxIBKyOELeXbQIUVaeSkkjpCJosV': 0.0001666944490748458, 'IBEaEXedcQckXVJuAQzWW': 0.0001666944490748458, 'uuLRvzSSWmAwArKOramdbRbCYcA': 0.0001666944490748458, 'ZGVLjSvSmmjNTBGlnCqOmMZBKcaRbCnYoSVnInpyTza': 0.0001666944490748458, 'errriBPhMTOPVPPoZgiLVvJpbCodiPkgySsoVHvPpoOzLvReSlbGnmNKkeAZeuHmFKiqrTJBjoYcPvRuDMwFlYTMa': 0.0001666944490748458, 'ospBOuStGQihwgRLFKDigyHiOTRfBtUqELvXnjezEMsgjiCRHvnvMwqIGkMuRQewMrJTjRhQPDrvKYigIHoyAAdIFk': 0.0001666944490748458, 'ZPaygYLirKgsWgaNqQOGGXgFAltFjWzZoxHUvklZwPSyIZZiiJxTEAuRKjbWfJyuqRZ': 0.0001666944490748458, 'aKeZzQrNZhulfwttWiKDJcUSqzhcRmBfySMUpRTKxMOvRmXmYSMDckWNsTcUAyJjqCGyCnyGQwqhBmoeJlAWsoWXgdzQcy': 0.0001666944490748458, 'jPLJGnpRFkLjTyDapZHirxtgR': 0.0001666944490748458, 'FYbmVyqJHDTdJoPvHKzdCWHwaFTvmRYxRqSDNlvpLiLaDyhHbSDVaJQdSBOHKzDlUBPvbTiatyXqRsfMByuylLOkfumrWQyhSAQyfkODdUgrLGqcaip': 0.0001666944490748458, 'THdRgqtQpRUByFnEBpK': 0.0001666944490748458, 'PISKqJUnaelUisquoQINflRXzPzHvMDnIFLMZmlKpDKTmpXfeJoKINPxvjCBjkgTXxlThmozrssJKZWqPssJGzjzeFabaFeeRUtuXduqORHEHkpyKLr': 0.0001666944490748458, 'yQhdaXZuRYrEeGPwhcRIysbipvdJddRTaiNyNDFBuCkQIAcGOCJiAbMkwGyjgLUkNAJdTAgBKGZbwsLGPGJoDJewkwDkbOQjZQVFbPx': 0.0001666944490748458, 'AfDMJEuQF': 0.0001666944490748458, 'kupfSWfeAqKPYxg': 0.0001666944490748458, 'YHWnOoBdrenwXKlwgczJskAEOMiuPUFnihtQZooPBR': 0.0001666944490748458, 'BdBoahTspSRVshhPKoCurlwrEXsGqOFXQcGUDQBxWuyIPAqVKiSDHjRToWwaRpQsyosTZiXJqIeLayH': 0.0001666944490748458, 'tVMYWvJKXYmnkFltgHbDRdrMZfEkvTgrvcMgGIHCWVvOLZQTlAZgbBSmPTcZqgffyOpkODSFulvNmoQStxnQCWLQNRTjackTnn': 0.0001666944490748458, 'yKCuvhcRHzlhiakUWrIBDbn': 0.0001666944490748458, 'OUJdBazJNJNsfNEUCXpxFPUCWPzAKRQcXOcOTlSGCaDyxvcHJsdFuGIkfDUjImOoTshvxFymFlHtPVN': 0.0001666944490748458, 'cPaBIcnvRAisLzGposCxYXnYRLQJEeTLFjlQQypXoIKgNeCLWBXNiIZLy': 0.0001666944490748458, 'AuJwKymXHeXeHuUVdtKLghbPNDzhudGwIHDVRssnfPVlfDdtKfBMEJLosiaZuHwskLMteSLHAemCEWhBzPsqSVRjeuIkYJ': 0.0001666944490748458, 'qYyNMLaWmeXIhSKvcOBeC': 0.0001666944490748458, 'jYnJONxqufVPAOtPEAwHvCgdBoKfFenwemFzvvjgvGAUKyzjbAuACYUemhXnTaraPrdbSLUVRkzWpZKIyOMEWZeWptZgl': 0.0001666944490748458, 'uNMQDFJNTzTLaGayWTrOqlyOEsMBlFYRiIoSUMMGHxcYUlNrgFhtwEgcaSWwvWrsmNHyJseqXbVsGtDDBBNLhmLsjbBMJQrFVjnAUQzLcKWbx': 0.0001666944490748458, 'DYoZDdSzCnAsmJlPtgnkGSyYYcmeXydIqkLzrcBPvACljwdMhIRNgShHdgaBHUAzqGpQwPRBuWcOCPaYq': 0.0001666944490748458, 'qVetNNXbYHIhGTxLSDygkGJoYihaqWaoWHDyAgdLFIQXRDjnbTUSQNykvqXNLsVyUlLzeBHbyWsvRdNbeoZqBIOfiUbXdUCLfSU': 0.0001666944490748458, 'nmegUGJkNR': 0.0001666944490748458, 'lVOOKocDDVBANgAyrvHnjaNmbh': 0.0001666944490748458, 'jrsRKDZgtqquVknvuD': 0.0001666944490748458, 'DjNelJBPDMHFYYQtylQAWGEvOhbfZhjYzxtdizSCcssouMeiGHFpTMRQsNkCGOyzQdoAswnihMHtMOCcALCbA': 0.0001666944490748458, 'CqQzCblTUgWZoSNZtLnWVNulqPndBZzwLsOdbcH': 0.0001666944490748458, 'pCNDdfKobTQbVpJVorKblIyeRAAvYQEEDCTUxaCtjYfcKVePgvuJWsxrBQvqodJbbCngQLzRjpMUUhFzXxetEPdpMEebUYEvNe': 0.0001666944490748458, 'W': 0.0001666944490748458, 'VxfKIpFKahllTVpBsdnxqqSfvWeytYPSmYRoLyOiVnchmFtTaRFgHRcJFEwdoLZblCmnPafegTBlKTDsfVNYBthsMYMDjMjJXqhXMVtvs': 0.0001666944490748458, 'cSngrygFycgHLUrmuhxMBteVc': 0.0001666944490748458, 'WSPFJgmLMjUChSsZuTtmNUOanJyPtcGUpyuMCymVOernycCyeUBfZZjoiHDUrsokOzQOAyeOoekorbhbkLFXQNcjAs': 0.0001666944490748458, 'nnfVKiPQCneuoExpepAqbeQneQhQBizIGLSLShhmkezmCbcarKvCezCrvtL': 0.0001666944490748458, 'tNoOhBnkYuuadNCsEcQLTYvzfdUGXPqkokxdZgwlMjruXSxDZbdnumBANwqzYtLELtdYaZfXBeHJIsSSKq': 0.0001666944490748458, 'wRslcLlChcRpOEjdagykZLPgMFORYmNuozPYogbruKkWzvGEQBbLaKvvgdeMubGhuxbBHaGsZC': 0.0001666944490748458, 'syLcGoSEUTxNqmukBfxfwemhZAORTnNJtqsDYBJoKqKEcMCxCAOywmaxyMxhhSFWBzSOfoYbT': 0.0001666944490748458, 'CcKiRsClKBPTkIMMwCFvemlVhLQjPvuaQLwDXRorDXTDIAtOGfVzbvlazEloAIGHWfWOYFuZByHiToJazxIhVpX': 0.0001666944490748458, 'dkWNlIYBMhaFaE': 0.0001666944490748458, 'OGTDOHzUvmPlP': 0.0001666944490748458, 'CUZdXWQakDoPdKkcrcFpbqfHsRTBGaotgSEnMEEyvMDFeEJiLeeZWNuHbumHouOjMuAuEOGPZQgjpYQxUnbtuyLuQTbIogM': 0.0001666944490748458, 'wLilesRRHsAkvQpImrROhCxuHQIfiGykkbvgKlanKotkesRvhfxTnNFJLIyCRCybaNGVXkEmmkIpq': 0.0001666944490748458, 'hhUxdfWCzctxKzVxFghGNZgkgHyaFhwdMEFRrIPdpHiAVUycWBzaLRILhWrhdyDVmTECoghmmSnBMuBPXSBtJcBfs': 0.0001666944490748458, 'QIQDmmXUmKGEwCnAaFkuzEnOmJcWsmfGdvNwxLDutqJYVMkrxtHkMtgbmhDaxlFnIeRRMpORo': 0.0001666944490748458, 'JQBsMJtYewkqchyIsEvPPSgoFlyFdI': 0.0001666944490748458, 'lUCuXHCEJzrahuLLdynJqXKkE': 0.0001666944490748458, 'ETPqkGxvIysSuhEhleTUJZVkdFkOnDQKAegUblpDDzIByEMOePkWAycCDmocxBCtpxUJTfAPQXuyHrBRngetMJjSSPR': 0.0001666944490748458, 'dQXBgmPnMgIYsrBFInDIdVTtWIsahsbcNDCFMMGWbGHNbtIaVBFHFwlXMEykXuZRszPVVsXZgEUGCcUGurvTnyXzevtJEMVgCIipWTwrj': 0.0001666944490748458, 'QS': 0.0001666944490748458, 'eVpFzsWWyzNVuvivWVpuOAGfqMkVFFobSxtgbcWRRASOdQGLFbKLPyNGxrBjvlhUxcOkdszyPadSWDvLxGjMOQtYZIKCNvgXRqqfyooWfKuWSPQHL': 0.0001666944490748458, 'gSMKQfZCcpfPbMFEEWYEeLZ': 0.0001666944490748458, 'ZyKYKhMBNXgnuXcVUCprQecbrqXpUVxokCBgsqDqqCjiGheZVoVyFvbkMEncqhcYLKOJyH': 0.0001666944490748458, 'SLagrIGMTSKSjWEyatJDDWZDXMRdbNPFeUWUFaWMEsGBzyhdvfjXXXzdeZUmFlGuZHjHElFLCEnldSj': 0.0001666944490748458, 'QidejdRTXmLqgCAZSMBingMyzlunukqspqgbDyrcHosgL': 0.0001666944490748458, 'ynxvUELmDrvhMUoaBWDVozymVotKvKLkAwkntOQyQE': 0.0001666944490748458, 'dbinMAWngQpvwdXNPnFFluIgxJVkmCrEfRKCCrAniRgpkidPJQekiqsnyvzjXweCNxFNzpJaYbFnMNQJwGheZRCsAftWcxTRtnTZnyNZDdntflanAYDDOx': 0.0001666944490748458, 'rUIsbHpjeTOMxJXaKKNWWDdHLRmGptdcRwWaWlijheKGrXsKYBgcQDuIBoOqqEPcL': 0.0001666944490748458, 'gzZcamtQTTEBQtwNSXzisSSOFzyFLtupTygXzgKFJKDMFAVGbrqJuZimzyywIESVWjR': 0.0001666944490748458, 'pSEIUwELdpCkmtZzdyTUaUtMLJdJIOIZwGqhkxKebduzeRclPkCnADHcPMPQiqbkSQCWjzMvShqtklWLcskYDZNgWOdRKZEIa': 0.0001666944490748458, 'bzfGorrvGJjGTMIxlOFhthTzkTdDqOZXsrJhGYLwNlsViANuXzSpLLHDAQzhlYGWWmXkQVSRKYvBXAwhSIdIMdjBhUerEwIRvVXSAXzddLMgOjM': 0.0001666944490748458, 'XMikiEzwTKNvWmRuekygbkOUEgNFa': 0.0001666944490748458, 'YMDJJHdvtgChFfTgSbdoSzODjtrGMMLnnCNuVClDHraKhB': 0.0001666944490748458, 'HjDsMdXRgnuQIxYtcFBiOfybgCInXjdioUtSVwYSfqdnLFwvdwyATWuCAlKcNnCFEbGbbpOnCOtiwPDiTuQeXjGjjYMsbDmmIau': 0.0001666944490748458, 'iLpIZnkKzYCwa': 0.0001666944490748458, 'OWxzmsaGSwbSxUFHzcQzXmlrsMSGvmzYTHQuRROkueuYIoTvisGSOYhLIFfEHiRHRAEczijTiesfHJCVJCfKGzZlHpTAGczRgH': 0.0001666944490748458, 'VNMrcTGkqgRXgkecQbNkKTofoMaOVfQcxTWATWFFRLJWFuIllsSDzHnlFDjRdwrIvRLSTDmJnsN': 0.0001666944490748458, 'EOqeyuixuVSkPpHtPDNadAXeukRiXBUsfvYuh': 0.0001666944490748458, 'SYgnrFpnkzuUqZPsLOzeLseOdBDTHbklGCsEVAkaRDXjuufQhFAnYyUJhoboTHaHMAXuiPOkgbvuiBXrhWEWSfqQTUbLSrHlxIGUaHUnRHblQvkgplFMKYm': 0.0001666944490748458, 'BxFXOjIEepiYcxYGLCnZZVDAxKOMZlozFVaxvpGfNpYCetEdqlcwdFeoWIPnXkVxYjKEJywlAhpbfDYhisI': 0.0001666944490748458, 'KSiqIvObZmuqcXltgvnlQvg': 0.0001666944490748458, 'rkWYwzdWGEqiHTVULgrLWHRQQpXaaUDXyauYNrsVkwQkndYlHSPTMgWYMyCcZtPXzXDYPYNQIFzaEejgCAvMJvvxjHf': 0.0001666944490748458, 'mfXvyPgZHRLyoBnfbyrTNhgsNcmeiEQsYaxcvwqoEZOaTwUrVWdzRqGfvcOXrnQaJXblmTJbJRGAKKMbyNoJQSgEhIYoTMxcDJMYwCVlIAAByJUAwzoSqhU': 0.0001666944490748458, 'pBrGLhked': 0.0001666944490748458, 'uMIgIiPZWxtSmCsPyXPdBJOdrJOjBzubvRgOkCnnPOUNSKPeWVhKCGnSQRdScnnJqKzUxeOzBlwpfoIxBhPJacxKpYOQzekAjDqMyFzDUoxbAYWkAvCPCmx': 0.0001666944490748458, 'YAnsXlsoPltSrtbzzuB': 0.0001666944490748458, 'LSzNoQyXJgETaJBRqgvXTwImSjBSRxpTQGlyvjRDVOmUllyMMdQjbfTZdpnQnbmdEAWVKHDQbKRaUugLjtCxWiplqnmSEotwVIdRSR': 0.0001666944490748458, 'exdzQAWSUYnaUWIqnVHGQNDmnoDVPTDrAjpojfTPEtVrXKUPXFlpSERdltiGaDBkM': 0.0001666944490748458, 'WxZyulqwiPBVATbnRhFMddyzaOaMPOaUelZgmfdZRmpXjNNyPBOZikkijIGtsXLGFBdcByVhvzJ': 0.0001666944490748458, 'rOxHwxOJhPsKzlYgFaSBSfIpGUWQKnMwqWHldmlAiusdoAVNlIyMSMynugBJOUsjvlnOZigPx': 0.0001666944490748458, 'SInkifuTruCvEpWKFcdfKttJIwIFofqylqAFadzsUwhiJsrpATtqXIDWuaBZoAODnxsPYuGzOvZyAlIUqeFOlDUiPClhEZcFDoKUEcmmAbcBrLFWqC': 0.0001666944490748458, 'WoTiRijfLOsTlRaBkFQIpLLAaGQmiazdoLpAqBbMzgnRyZfuJTSKBzruYDjtTtvEuTpKxGKddfMKTGDRMcUbIEzRgKuNb': 0.0001666944490748458, 'GTxuUNcyGPNkqHuGtILXjoW': 0.0001666944490748458, 'fEcauOSWAsSSKAvgyITkhTfDVvUrGvIMyivKmXtXVtjAOuPwzGOPxDvdWGWQMIIlNlGGbJbbFQtTLqqHIgVNwxvPlqsto': 0.0001666944490748458, 'naOJDWpilOPYGJBpFhdmGlPukdzFfctyZbAKjcIjVyTkcWheUUtEdRItXiaUHorcidLQLsVsZjLfiePTeRo': 0.0001666944490748458, 'yiZFgiTAtKLJaGBmyftslmoiNXpWOgwOOCjRakaNrdFovvADalyLu': 0.0001666944490748458, 'HrVfcquTZKsHLSiIQtLBlMwSRoVrgtbzEGwPpazbLT': 0.0001666944490748458, 'XhKeYgrCaNDvHtDzdCwxxaTggyIvNbTlyLDQHImGwHLBvCOpJvNEzyoROBSATcjSFr': 0.0001666944490748458, 'YkdRexilKwnrpcagSJOkNYRkBZbVjvvEZZauduyXXjayRqIMLhfdjJXghtHMwaHgziBhCDGAAHUcXnhzAbGgHCSOSPifMq': 0.0001666944490748458, 'HcIaoxjvafwwDbTnkuXtPmGhwwKqtvX': 0.0001666944490748458, 'yAuUwsHNmFIhJdppjgYABZpVzqIDqajVzIRWwnIBAgQLDgdXVVrdzSHtstpUGt': 0.0001666944490748458, 'lnnjWLeWXnxtnxdLSjTfkHAjOfhVtgYmQCvOOPHeIbwpKJe': 0.0001666944490748458, 'VXSbTzcTTECgNzEKvcIQei': 0.0001666944490748458, 'pNJSbDwlzStuDPaPylkSmjEJUggjHDNUEUPTNumhVdCQstZhmrNgnERyganlLmnkIjowQDYnbvxgzmFqCdCjPpdxmXIlOfaZQ': 0.0001666944490748458, 'htpqCMiawLRwKlLqpyLoYDTQVzsjLexIPXvuuKGUqZQeDj': 0.0001666944490748458, 'LIKNkhwtHJqzTuLNYgxdPRALyjAWGAipvlwao': 0.0001666944490748458, 'NRGTeUMYqueaJXCgxTymwPpQHOEAKIAHVMCiwoMjItNMuXuNTQuRSVVvvGxiNogiwuXUHyqMlco': 0.0001666944490748458, 'yaHgavtWab': 0.0001666944490748458, 'zNtZEAyKPUnieqegPkhasmThxIwXxRKcgljdvqcEhjSGRfsBiKwZsnGxAuxoVsLonG': 0.0001666944490748458, 'nNXxdfgaDcOYrcZJaHZEZMPvrLJgEPMxAPLmESXRNYhis': 0.0001666944490748458, 'dVIdOevqPLRscNecKcqdHEoSFaSHqLxTfmwPyYqjAnNFs': 0.0001666944490748458, 'AWIzpKLsWUWDWypTbIWLafuuaDiFNAEVqPhSambNJnYsarolWGAhjKwTUGfxEpAQYLBSaBKtKmDJu': 0.0001666944490748458, 'HtAECInJSfrrBhTAEBCcNTPPvChZnQVAIhICIqFoSIgqKHAIyuoxtZwihZKztSulfSasElCSqLcTgOTxXZoxVoZKvHnmO': 0.0001666944490748458, 'pifIiQUeQTGvUbrZzNYczcrOVCiXtiSFWDYbjfpbbDYbHvfznyz': 0.0001666944490748458, 'iZjDLstBWHqlWFkTKjzeSKzUjHbyRiZwMZWDJAcdJitigvvvmezFxgflgJ': 0.0001666944490748458, 'TyiJKqWuhRbAXlaTQCzwFcinmnhBGsjIIsXLDF': 0.0001666944490748458, 'YgsicpHkwtjLjvTFKSCUumfJuInjfoFDXaHxzjUcCGwKbaJpqwyunTUcNVtQywUxcjIehVwfjzcueGMBIWUzOCcqbPluRy': 0.0001666944490748458, 'fWVbAVajMWeWUohUMXAKWheZxRiJgSXzePCwLvLXTRprmaKoYveGCF': 0.0001666944490748458, 'YmoZfdyRQXdHNTHEcccGGbloJHjtLNVVIHKfbKlRXgPWkoquEOrCpYHCTBcOKILrdzRZItuadzEBTqMrSzLGUtwmXtOLRFL': 0.0001666944490748458, 'e': 0.0001666944490748458, 'QivSkFMUlFKsNgWrPclIOzDlRapTQheeDvsDIlzBMlJBKXPAzAEAsArTSFOqMny': 0.0001666944490748458, 'uPReLVlzlNXvocpKQlGCMgaOoFjwVvImlGfHVwfFUBkEdCegEBJkTRsAAXYaTzKHCdbaII': 0.0001666944490748458, 'bUhELhzQq': 0.0001666944490748458, 'SZnisrGFbTHIqtQrVnzsWWrZWLulma': 0.0001666944490748458, 'HWUvlfUMIeLFcOgjlKbnrcASsIQws': 0.0001666944490748458, 'fkklLdceEPhGsFAAHycdpyBeVdPBWVXKyXdLbBZaeHLkHHpldwRPKalYGQcsjbZeDZaXDhgRbkwghXVnJFEnrwXpqtFqCYjvSeIDTgtfa': 0.0001666944490748458, 'MpONtZvPaTXxdoizQugaGBTfOmesJejgXGqhkVHmtWApaA': 0.0001666944490748458, 'OMmOgzioXtdGpPpaUOpcUUphgAlUT': 0.0001666944490748458, 'wKMQUGgyakcjvEOZidrSAvmzZJRYmIXBmPdvLcuVwXzznJN': 0.0001666944490748458, 'biZLzRnCEVNxuqgmgKFJyPTKYzsrCVtBCfxnVfOcHaE': 0.0001666944490748458, 'goANyJcdluREsVQsBNwXHeZTHwduMCxghtyywbEgzEPaHVAiOPtpnlieWjySwFTGee': 0.0001666944490748458, 'yXBMeqaNccQtIFKwQjPKfNGEWRUCvUPOlaylTLRNZQGbODkjlCzFQElLOyrQUWe': 0.0001666944490748458, 'AiYZFlQuAAALTQtZVBlLVNJZLakKqHWWDOzrVDCRsKysmoxIyIaJYUDfiWcaSWftqaFInairAafdrYEXmmgZaZLgsEoBiqDhqeKwgoKVq': 0.0001666944490748458, 'ILFaLPsgLLKjmykeEpNMoicvwwybiiXQXogdlIwOTeeere': 0.0001666944490748458, 'FMSdFaiOcFSbbDdhdiAUVVBDNFgizOtOrvyaL': 0.0001666944490748458, 'EePAGzbLkLmqYjGNbtvYeoyhInvWRLvzwRHZjHGSFXbazUMXloVCqTsYAXzDyPYuPaBihGLAiPNTbgdtAiNbnaDeNIlbmz': 0.0001666944490748458, 'xowmhoyLBQilmFPzFdZFVHNEPZWDkWwtlqKdquQIumfYloQLbdrcpwaVxDaUUSSaKdImMzjrZZxFFXDYElAipR': 0.0001666944490748458, 'taAbbvjCDzrgYFrKaKmvbVnTgrzOGsbdiRomKtrujSkkCbzWQcvSExtGFbVGwJaTPBYlcDPrysBNbQjwKKqeflpSzaCTBMyZuBIeIfMDpnwvXg': 0.0001666944490748458, 'yueeQeRNKrbcWwEQJfzsVcDvYdJKRRybruXVfPrKjF': 0.0001666944490748458, 'jOCrMJfkWJLsupDvoCdJNwQqPAVTFnSiuBDeqNLvdz': 0.0001666944490748458, 'PPOsWEoJKiRMzvBESfKATDFXuSepYxjqyqkrEoutoHrGCKJGsgqmFGWZFwJCLIFAHC': 0.0001666944490748458, 'vEbrffbZbloDPUCkOWXizfnlfsnOivrmQuCOWhNswxWRyXpbdVwdClQCxvGkMR': 0.0001666944490748458, 'ZjeBeoSrAZdeTQewb': 0.0001666944490748458, 'NRlrToJPHoDwYjwrvjOQqTnWihtIkrrwFFoltpWpukBbVyeSIDDPqhHcXNEAHiqpqKJKWjiEYFcPFT': 0.0001666944490748458, 'YLypJOnHkAAViNnSKdNjnCcFVVjZVxUciO': 0.0001666944490748458, 'BErvSSDsFseceTSYuKFgLQaltEOAggVUVeVdAieWxWBkfgwQxhrMwKWQkxYdHkojytACmQPTwHyfADsDwUeTnWg': 0.0001666944490748458, 'HWPypqojwhedTDIfwWmoCXrOqKVBQlhcccPbbynKGkwEspwbYmyJhWJVHyEytfscLx': 0.0001666944490748458, 'XEEmGzKvLIrKboflEgrFadeJWlMpxkyXofColWoFjw': 0.0001666944490748458, 'ATabcKDgjsrSQSkSImjQuvigBVmILQcBKCAlcNqcFFwZrqcGrTrfEw': 0.0001666944490748458, 'YzfsneTFhKYleKCJk': 0.0001666944490748458, 'nJpZHlSshIyQDJPUkeBxTJ': 0.0001666944490748458, 'UqWiMiolJPxpQQLwzoREYimevQRvvZdSaxurILFCsYvhiTFbuxzXPrtVPSUIFrDPDhQXHZWFhMjTPrtPhoELFWmXjGrKLiFfG': 0.0001666944490748458, 'eREPhMSRiRqbUNEPLXWZALMEGAmLRZbvhGgCgqqFhUqVRmpAGzALYcUqoxlKJlUpJMsgOrnUXeRSePeVtPbciCVDxzIFvcQVRUtlILIxc': 0.0001666944490748458, 'ioixGycHeWoiFyrybSraVDdwwJyudIDBgTSJypGFawYxpCV': 0.0001666944490748458, 'QjDUiCmjeEJwwMVubmQtyLnzAjSkHencrjzUpkZlzkpAqIsJTKYBprzIMLxIdKCENKbvWBYxqYtsGMhvGeFIssnXhPOhzgvdyORkmNvNW': 0.0001666944490748458, 'ciDkiGHRQRtebYKpbSMDkIEvXWfDlwomYkPfiaRjaiApxPCOeHqDFLdpCP': 0.0001666944490748458, 'WFnmrtpdgiXFvYTVGURWhukrCYBbQVSMqPZRDSiAahaCAUsptTXJHmCgSFBNFDXZgUoyBDn': 0.0001666944490748458, 'iECwxVaQlQAkDqPtclHgtHbMJTsAcIyVgLbHOLXeyaVokQNAZqEXUNsrKbfaW': 0.0001666944490748458, 'TxlMqBqNiYTKStbdRIBsIrgkdwduzqivpVlGdoOtyDoBTssvziyfejSQbntXG': 0.0001666944490748458, 'kWGpakAsbuYsjw': 0.0001666944490748458, 'MrilvgojHGQVgSWRjcyHoFJMAOekrKzeuXhJrqunZZxOazwgOAjWGBrXfiKEZRsOxdPyOFxDAthXRFOcKugHiKVgbmNphlemOlixF': 0.0001666944490748458, 'OaD': 0.0001666944490748458, 'kwDKBuXkQT': 0.0001666944490748458, 'SBMIruJAzakxpQyQPuDafFmICtobxewYlPQsWRiTGAlqXZyUmucBxVgXtSqVVjUviWnZQHBcJiwAXh': 0.0001666944490748458, 'qjptCtynLlAjfR': 0.0001666944490748458, 'EtToduKQEgCUHHFjvgCaUqMnv': 0.0001666944490748458, 'TmmXLVIywZmQWYKPmqnpGBfucQMxjlAzGpHEGXIeejEBAcsKYfuuehfrfcinrWqfiBsOWLproPubjpBMgITDw': 0.0001666944490748458, 'xzHmKnaxjAJGtRXewboBPi': 0.0001666944490748458, 'WIhYDYXMajFNoTdjUiQnTw': 0.0001666944490748458, 'ADAzqcvQzNPxCjqPWtYxdglkeTzajDEJaD': 0.0001666944490748458, 'UqTNxokzAywFraAkZnwgWhFpJoDIaTWuYyOAFUulqDMxjYHewhJvplOxheKyJdIzKbjVrzEmMKseJSVlUPZakvtwcHkFauC': 0.0001666944490748458, 'ck': 0.0001666944490748458, 'DiTzgMHoeCyOKtRMTsoJbYIxKZeQSJwyXXSzRbuWbDaJTzskeZBtJdkUpWtrtetfyQhslqZqLAumwGkYmIlUXqqgYPqYdEguaCUoCtuBRCy': 0.0001666944490748458, 'ysiXshWChtSJdCqhGpBnPcdmNshQYcsnPkccsICzgYznJAQwCtdAQPCaUaWQPdbFcaoZENHOrSwMHVpfELEfMGqDKXPycTLKdonlbjlPVPHSYRycPvl': 0.0001666944490748458, 'zhbpl': 0.0001666944490748458, 'oEJZiqFfyoPiQVobBSNRPINrLyzeaGiYfNIzBABolpMotJEOqCpFaEToNiaYoeozmGueXWVVWnJMwQEvtHE': 0.0001666944490748458, 'wDaODkLWJwqmDpmbY': 0.0001666944490748458, 'rmAoYQmpggZmDjiYiVHUFTBuMWPDwvBPTwjmAldnKrFjKYvNdLcCCNN': 0.0001666944490748458, 'oPqJsdufd': 0.0001666944490748458, 'bgCOUVhNzpcHDZpTthenuxWdIIAWIcSKYNxKsDVpXckcPunzueoFjgKCZICDkfLYfDSsDZMnnRMleDaPlAICdAWXcaZgeqsRTDRCUbHgYV': 0.0001666944490748458, 'hK': 0.0001666944490748458, 'dVsFCkGHPDwQHulfrhXSwnrrbAGIOgZmPKwVioVIjbhNGRVEJeOXLLYUkCMwoQYIPKvuLSeyyGcPSvSxnhJAqbTgRPGZtvsKCOYEWcH': 0.0001666944490748458, 'PmWwyPOjOhHrCGmHWHaVpxHXHIZCtofHeKbrr': 0.0001666944490748458, 'YaCzoKNkjRKmZhyHrcCjtFqGqDNexFPooTagRlvsgfInkokYZIAvgrYhrVZtsxdXQjdsCWQoYd': 0.0001666944490748458, 'XuQSMGruawFwgItkCdWGhTyZkPaHpIqZopYWeDGvHyOuAvLsezKYTBNds': 0.0001666944490748458, 'mGjkDgAKpRrKFfzTkIbMxjHBpwostRXHoZTZXLNuBGdLZsPkMEzxUXgzdBzrIrrtxG': 0.0001666944490748458, 'GcRblYRtOisDLXqtFJKegUhEUzsMcAMCCQyomTOkxMuANEiPBOejXVKdREwhSjpMvyJRwjuYzziMBtaglSNyKToBgrs': 0.0001666944490748458, 'DlmmxziIJCgwrGxqJbaZaDVuWTMxHdepFjbiwPPBFMqTyPmntaFJTkYoSPiLIreZxiBJJDWWxpJiqapwHxZzsVPifuIHTLxjg': 0.0001666944490748458, 'OQUNRMebOSijMyCGdSVlzBnCHZDTMoZGNGyfgQsHidUFLcKIzKergwMFSV': 0.0001666944490748458, 'zqcisDVZgjrZjxWByOWUdLB': 0.0001666944490748458, 'lMOEXWtTcaSWyBvkeplkKcvXonScpwyiqRhmyImouEoxaaqQerMcRZdqblJGquQJmywFzOBaIYNkJTCPEkAwwgwmLuPqEKPglXPAjuoQmqXiNUaeTT': 0.0001666944490748458, 'pqVIPPpFNDfaEGiZyMeZhJwviVhnnCydv': 0.0001666944490748458, 'FJZaNORjkAsGLLhnrvAobVfwwOIkqcRgpKPQOiDuqKJQVOKyWRYOeJfweISkUYpIh': 0.0001666944490748458, 'uoblrBxpgifrwllWswnhnuYWyKLrCtExDaKLYMMvDWJOrjiWBELfkwyyAhSdaTqSVtCfox': 0.0001666944490748458, 'XtxSbcIUamLzxcVRsjU': 0.0001666944490748458, 'TwCaJPvvm': 0.0001666944490748458, 'vPuvSTFrVuAFzwquecXDWczlbrdEuwwIqIBHNpAbSUWqqqxAsdptsYCVxUfcqnnaZpbuMEMMVGrddhznpvyCx': 0.0001666944490748458, 'pekYwjdbRhpQLzerMmYFdexiPQMPbzPstBQyIJTRbgaGcqKrvIurKtRuYxIYnorMzGbgUhLOyElMPHe': 0.0001666944490748458, 'HjNLCuJguyquYlLoQzgcsgwjtc': 0.0001666944490748458, 'NsSPUQ': 0.0001666944490748458, 'ADZCFLcAWZRxsIoQLbtwjgcmESEHJQtjUjTqmxXuWUVFPJiJlHzVhgivBIcbyfDxntmHqtPlPvGyWjCXAbadeIjvXSaHvh': 0.0001666944490748458, 'HBjZW': 0.0001666944490748458, 'PYiKzdyrIeqvpNGfHRYxyZwcfbNFCKcdyGVEzEWjuMBKZMKtMUwDOoIEjYaKmcSQq': 0.0001666944490748458, 'DcYpmxERJuRlFHlThxSiclMPEQjxlBXexFLamdBcVbWcLvWoPCCbUmsSMgEkB': 0.0001666944490748458, 'dCILxcEycpQhQOqHhKDhQnmpDqhYIZDqO': 0.0001666944490748458, 'qnjnQRCqjbKbmZcEPRZbsFEvscnELQXmgzruXlMhBZIcsAFwydEznTXkfGfzYonBGexOSmFJZEQ': 0.0001666944490748458, 'nnIhpBYoKAHbhikSOGlrqEsRnJLrTQfLMfiWVxPlrDXudjgQflAdLFuPIInToWpgACmdDfolzAydYxRCWhbkbxacWZpGICZEZgfOEnh': 0.0001666944490748458, 'gtNIrTDJDKsSWTpdHxzKSaiIkwVCGBfNjCGdxEfAGDaPSaVqSL': 0.0001666944490748458, 'OafBYyntiYEGzKkFqgmiKCUFoeeUYfClHKqZEtaEsqWUlZBAejqaGZBkPglxTbvFYnSgPDU': 0.0001666944490748458, 'SYwkHTmSYgLaiFfRWKpSkdmgXaRLdghakKgQblGFaMVeryFaaY': 0.0001666944490748458, 'IUjsuFmzRgdxXOClDZPyWNVdSeoqTjFjfczsYGctKZqAIGfiRRWpmPegrJXkbHXJrXUyjywkjeYuUfeCvqnXPLT': 0.0001666944490748458, 'DsatkxlBzvKRtswLPaKgmFEKAROIlgmjPfAYrtDZI': 0.0001666944490748458, 'FNpbyymkJhzKdqRdBNVGf': 0.0001666944490748458, 'XkOANLwUxmcMNzrONtXDloygg': 0.0001666944490748458, 'eaSpnforpqdmBtYfXleVjmztZXRsyxNddNurJVBdAawAzyAGHzboAjJAhzLowWIvf': 0.0001666944490748458, 'FoxRMpbDLQpgjgPjoYgilYHtEXLPAOfNGiKkOEtmRrBqMwzzRWLHlohlYNiykONTLIBuzBRlIsecmwOsJtrAZsFIAEWVBozIRuLoaAJYdFOASf': 0.0001666944490748458, 'ISDXfFdiKfxIZsjUlJAcivuoijbxqRLnShsiWJvZRqpoVZoQyOXweelQuaFIorAXPjogrninqlJyYkyOnjcWmjIoqUUQFBzwzpbyovnpoPDyQqJVdJEeRT': 0.0001666944490748458, 'xJtHmZTBjqjDvh': 0.0001666944490748458, 'sgnJgyFVBAxQrUkBvJpuDAeRGn': 0.0001666944490748458, 'FcPrioHdAmaUBw': 0.0001666944490748458, 'KIkWFzsKkMudzhJAHrYmNCJSNzNjZIgtYidGPHmbRZaLqcPFljwFcyBNMJyBIMQLKqdlpCHUzAhCMlRejxHVKgjkLnblYqOtTeGGUu': 0.0001666944490748458, 'ERDeEJearoN': 0.0001666944490748458, 'YAohPDgcNIfamJbrhuqabRpLJyOrDBmUhV': 0.0001666944490748458, 'kMnnbxvqFluhWMXybhSnSsmvxJQjlxjXiGOadI': 0.0001666944490748458, 'lNvXtiTGutDFCAgTtlWLCPUfjyTmIqZyrISyAJeELeShDEFGJHdwXUdXxfIPVmp': 0.0001666944490748458, 'GVQZhjQViCRtEJQksIcerjPJJkhQHjwvRddnjxjCpRbxllEDmXZoaVcrdXz': 0.0001666944490748458, 'cGUWPQjULFSKbunEyVyEZnGrpAUBaIJACYuWaECnJMWgwsJJwteQvLcJPPJJiNlzBnEqoKkEnIPCLQayfxzUKHSrv': 0.0001666944490748458, 'oLpjmlvaebzEciIvpdBFcdtfnFWIkuzPcajhvbXBe': 0.0001666944490748458, 'JBcZZsLaUeOTismlhSpYpCJWkSIOastHQyxwiwJlyUwNDtXwPohcCwdvr': 0.0001666944490748458, 'xNOANMVoABdhnfwMTFqBgNHPnipoq': 0.0001666944490748458, 'bOBeaTiuCoyGHKhPBxkzeTqqBVQEcmhPdnj': 0.0001666944490748458, 'wiD': 0.0001666944490748458, 'cXqUJxoLcYfyDlnPWMQBfNdTsPPLqYTEQYpajGEOyvdRPKQDfqUBfkfnvwurhpmrfUsPvpLzMtqZX': 0.0001666944490748458, 'rjNcOYWhHddccEHYlwPEPSMkxvsFHIclwVuiNzIGMXeWbzFkwmFNjTmmRDJTaxpVFErMCLsuBOOtBB': 0.0001666944490748458, 'kBkUKI': 0.0001666944490748458, 'jXffXjwxDrZXOjeviYClZmbPswGxCvFhSrByzNBtyulvjnqiAjJEuaVTTjysRpkbaEKozFLiuyIYfNXwMxtXJNTnYCTIDIWrOJGkufXuymvzwRlZOMgwi': 0.0001666944490748458, 'THgDZvYDJOcxJTabeWxxHIeCfrSUUgxriAJokBYIvzebhKXNCUfIFJpwXeiOcmoDsarRFEIapeNBgzxxxogxWVgMJPBbBzSLMTUvIu': 0.0001666944490748458, 'gNXBJXKvUPayoPMSnepHvXvfXUseszPKrMWhYEzEBfhXLiCXGYNIpKxnFWhbLuJGkpOpXntUYVGbWaagKGovyHPvGfhtenwptbikxCBEesoIbbsAHwP': 0.0001666944490748458, 'WDxECDbZh': 0.0001666944490748458, 'lEyesMQBGzVuo': 0.0001666944490748458, 'bJGeUvtjDCcdAcEpjWtkDgLjtxDlRggVFtuCbbeHcTLJaDNvJfKXibASvunpNcJKnmVXgoPmYpLbQNLRbm': 0.0001666944490748458, 'eDRKGrYNZRMVimioAjz': 0.0001666944490748458, 'PmpOExGHJFBjK': 0.0001666944490748458, 'xqJjHlDAybnIaNzQbNhfAsspwAnsFJRARFMtupAERLzuDVoNZDd': 0.0001666944490748458, 'GcRTpDYsfBjHsscGLheBcClDoIvOpHYplfXcUfmOlXCtCtzfPikbjyS': 0.0001666944490748458, 'HTxcGHVGqxMHB': 0.0001666944490748458, 'oVeFiaSDgjOdeVUragFrYNXlbovWkNrKzuXAxiTgVfrmN': 0.0001666944490748458, 'VPteTAFyz': 0.0001666944490748458, 'GBdrXYwktFrZGNMSzkXucrQPurSJSFSzblxKyIwOVmchJOWoMjAUQYXuCKZqdDsHgkACltLQvbrOFjGoLQvSqHmDgvJksVIfRt': 0.0001666944490748458, 'gBZFspLJhaEuPbPzETRWAQvxgJFDokbsDtbHvYjApmdTvarbMGzAQfkDRnHUvavbKHAalHozxUdHwpcIJMcKehiyzhMrhlxCzSc': 0.0001666944490748458, 'EzczQWvGxDYSYZJTTlOStkCtaecvINbxfezmitBnKYKWcI': 0.0001666944490748458, 'FRVbhxjBYffiLyVXDaTNUDEpoX': 0.0001666944490748458, 'ngBcqBVLsZKNfSuTpNXMKLBjhaHEjfzHPNIwIZWOzjQUNNxiQUhSBQYjAEzfeVGpRYZmgTRrwkWktGZqPhNxvsRFPqLrSQePuoSIgbzjfSB': 0.0001666944490748458, 'oXxahSEzbTRYhlluNvtpXoslaebcKMKBKpJnKKMGNaMqGYUttiWiYSrNmgqaiSDIDbQsNHQecTdVeOjegNHyjof': 0.0001666944490748458, 'SOBmyeyEMMyBYo': 0.0001666944490748458, 'OiTCgD': 0.0001666944490748458, 'owsnJcRTfpxsIkxBlyxRwIYNXuvpUpYxMtnBxGVzIFrciyITs': 0.0001666944490748458, 'vhhgMDYCbtxakDPyDLnZoFeeXRsEgBTqKIpptyNGYBcXIn': 0.0001666944490748458, 'AVKykwGHmOVNKqSEipBiFUpzOHkrjWIJLmlFIiAYS': 0.0001666944490748458, 'qrHrQuqzMiHNObrxPkinazwkrJoCibbjuOamNYTFvgOhGbhlUPvcTJBDbpyMjFPYbncsTsaNRhSFbDuqlphwdt': 0.0001666944490748458, 'FXrAGpenYJHJTCbVQkVEvVHKtQLmhueIWXRSYcydaEULp': 0.0001666944490748458, 'gqtdnfWMCGtYSizfHWVFrAbQoMumiYvYqflYlGrWTPdgSwBLbgKDeUavYFaWjoxFiRCjqAWGZMTytxcAM': 0.0001666944490748458, 'HugSyPQfizndmSeaffZhzDiJSNOXBZqrLCbtGtxckFuBjQRiXODKlxpLdsQYvHmPAiWBQGpkNOnCH': 0.0001666944490748458, 'yweCDSCASVNEBrOdyXWZOclrSQdHxTfPoL': 0.0001666944490748458, 'bUCHJyiMWZuwuOnxQzleSqnVOWwAcUQSHrXcrbIQoaOzcPXqlSxoHwgQGToSwcuvLFQAJrQVlP': 0.0001666944490748458, 'tXYsLKprHJPBZeetZRrdlRbNUMtDQotdZPMGMqmXNqtDiwVqBDSmhJhsVYtXDlIrnhahGoJSvx': 0.0001666944490748458, 'aSqZjnrTGBiJVdOJIXuCnkMfpOfCtiacXuRxPexnfXsXbRwpCnnZcHGQoChKlCNSPDUVxqWtvTBXWPmoXZqgZbqFHy': 0.0001666944490748458, 'EoNtpkgzZVoGlBrueBBaBwdjR': 0.0001666944490748458, 'puFIaqAWIvtWmKKHsagYxTFXngdDdOJwfNXIXcoLOjMysmErchvawRzPQvM': 0.0001666944490748458, 'pFbWKAPcKKMzemWsUzbzQZIsmyeakUkEDxHblznNZfmJmiELjGYyf': 0.0001666944490748458, 'ETVEdfbGTZhEeDtLnmRZsuaDbPUBAGLWiUxCideUBCjZUCKaPXTJlAHKcbEMtmoRdQkuZtbZBQIAGcJQEGdqnJHQTPRUUwnQbrutXIOPOPikokBtJoruVh': 0.0001666944490748458, 'OlmxbHUlteGtfCTovmQwcC': 0.0001666944490748458, 'khTowgJLCqxUFKqdaZtSWalJdOiklJhwlEpRhbvzrNAifYbesMWqyakxGlXoUQsSMtfNXxBBvlXqmYBMmAjJOsMmgvMkuhDZHzgYvze': 0.0001666944490748458, 'FbZWw': 0.0001666944490748458, 'EfePQzpcqYJbLxXhUnVHwxbTejCQCPGYZtwGrWQHwIaVWkUsRYCftZZwrXcCZKjkvMWHxoGFlu': 0.0001666944490748458, 'tPHgkKqnABBBcKDXXKEkaPRkKhlDhPwPbbf': 0.0001666944490748458, 'NMhvQjtzWGZiuJtRW': 0.0001666944490748458, 'PPzfbYxgNIcHOdgzMqapxNwSbhjDthHkiCZDQkK': 0.0001666944490748458, 'HacsTVkopsOGzpePEZXsiafvRp': 0.0001666944490748458, 'CztvJGJhIUEDJqVhengHOpbzTGiAGaKWIDydiQLhMprcgqxpBNCQCuwYvfFafAWqfMJOWGzaMBBLZsItHkVmfTpydU': 0.0001666944490748458, 'NKGqGEWltInjegEsqUEMLNSHOSSLuSJycbfMHXMqcqgDmKWcdWGOaoYWfREyPPDSsroQVjPeunkbbwekNAqnQYv': 0.0001666944490748458, 'ssxOaozPHuIvhhxeabcFhIEAqRdScCHapiMMpUsHYgAPWnJvQCAimThWWbqeSFeUOvcAWHJpBHRcnDnWjWzKMwwHFu': 0.0001666944490748458, 'OVPJCtIimzItFAsSEavcBmiarYjFrzpnrPvSPlAXOjOJnHqttIgaGGsJtQSFFD': 0.0001666944490748458, 'HLvnvtFCtPLqTrkNgZaewyxFcCGNDXrePxGRUcgQOSHNpryaSSsZerRyIKyFPqoWzhwMoMoMnHzZNXfwcnyWdyxvJHzPYGhWzgKeEhcooOM': 0.0001666944490748458, 'wjApbgEbreCMtUAeXljELIWPTwavDYjfJkULGU': 0.0001666944490748458, 'thwFDmDAWJzqDEDJJWs': 0.0001666944490748458, 'PhfwYLWlmlDaBKAPtzNKjxNizZWGiEXvMSsdjqK': 0.0001666944490748458, 'gtjITXisyFiaBaXzAVOkEWvdWPJ': 0.0001666944490748458, 'vCVHapgmdfJQmEKsxCNMmvTmQexBqiT': 0.0001666944490748458, 'bfcmE': 0.0001666944490748458, 'WVPXnJAfzf': 0.0001666944490748458, 'kftHKKkAbScuwcjDOAPAXXAUSNoYYDfrFYoJpOvkbPZngLqfYKroBIEOgQEShJVkzFYmvEWKQajeASrIzYlIWqTRjxznSe': 0.0001666944490748458, 'WSNkXYXyaJYuoicuvHUrVWdMGWZrizjCr': 0.0001666944490748458, 'RrTszDyXIGAqyPUMgHszcozARKCzkFriTnUZjmrPnDcwTpQRXnFoAjtTrgoFDFryuGcbUSelNnS': 0.0001666944490748458, 'AMDNGaUvcVPCGDMOsDsUeIrILDEwAeQhdfonxobZbBWPWJYfsXFPtLskupUiqmBgaQBouYyBBbKslAJzrgfupdwuiWttkIKMlVeAMBY': 0.0001666944490748458, 'SpBgEkDpBxfQFGdQptSVANDXtsTwVNpytfcxOPtrcLDAreroEZsuaHnOj': 0.0001666944490748458, 'ZPinKUJZUgbecMgfkJWMXRVjFUlMsaWFkWPtnSVbjDZEskFUcA': 0.0001666944490748458, 'EhUVRRhcgMpdwaysPOKXzLGbuodpDdGNbioUFTMaWmwCpTjksQdMtXsgnYQULcxrspvEPTcHdfoOMHCgaGsAVVZVLxfeeksdJAzFrczVGAZYqQq': 0.0001666944490748458, 'VciMBrgXhdWYiXlvisXhPsbUzsYJOGRjQvKnuNNctKINWWgqBlchRylpXrywkiUYuCDeCXcBgEnAeHEZPOZwkuRrjIjIi': 0.0001666944490748458, 'FOEpoxYbxVqJcRrecLaTkWPayaryOKzrBMKPJTseEACcOHpHfrsFgFwrhNrXLRODKKlmwCKIdMrOeeHhMtwraVvmgoyXzJmWTuHaFcNdKttQln': 0.0001666944490748458, 'ZVrnSkMEKbxeDhcKiDEDAjGAoPbCeWiWxWASHpOYcWPXruIoXiCuWaFJkCmvEICpYfIZrafJdyDGMClZwuaiMmVntDhNKxajpkXCQAw': 0.0001666944490748458, 'FWhkKTbrDRReOEWghrMhwvpMPLkyqGeGqbURhhVIKSWwVuAnDlMLEyinifWAEucMGdBVjnlqjDkmywblrQAbWvcmBbfvHfQhIZfyHMonvyuGhOGYG': 0.0001666944490748458, 'dthFeCjgNKYnBPzJCEMhRjZlNQYCCATJfLBqmQHFmTTDfhYbHXatrhSBbxRoUsgznEtQJoMqxLkGuaDVTSKhTUKgWWS': 0.0001666944490748458, 'bwqJwnyPmiNfzsZLvfoAivNzBMoGoIqgkkkUgdGtzYEXJxKlVjRwwKhKXbmNW': 0.0001666944490748458, 'nbeppnjBIghjNjjMpRenxnkTIXXfmQRbHPPxiJekUeKlgtHbwAWQlEyLGa': 0.0001666944490748458, 'IKQLtdXcipbEblyqbTaQeHNQDJJpBPYtmrqEKlLUPLdzmrbDmtNCXsxFuzo': 0.0001666944490748458, 'XnVoSpACphYgSAtOimgPvVgvwNQRxTbIftOrZfDqIYF': 0.0001666944490748458, 'TJuJFSGjgRHFPPKFnwxYJNjNrqDTAlPntjzzGlHXwse': 0.0001666944490748458, 'XhdYxfjfaenWNtrpKQlpBqCAGCKTmPgiFnyxLYSwYXVgxUIOTBKrBeWSQikpqAbymDSuuXCcAsVWRhFbPCCyI': 0.0001666944490748458, 'sPHMupCTVcenmzsTHCSKZgWLeJ': 0.0001666944490748458, 'opNrWWrSZrEVEMyUanAej': 0.0001666944490748458, 'Cz': 0.0001666944490748458, 'EcbwBflANIMtfhvvrihPVOebUGOMzDGfFJkOTgjOAyajpGJuRvk': 0.0001666944490748458, 'ILmxNIsGMDIfItQExiTmCgvgKmtmTHPKRxZKIdDlaBBFhekuLUWkUuYvsjwOOITwwPcOth': 0.0001666944490748458, 'KdGXilfzAUlwcyhBRSABPmqgKhLTbvbMybQzozXCKXZKmQPtJNYSiMBAjGQgxuFNFgvODUlmSRxeXuIqMMJQOGbZqZAkonzDAtuZGtiQUdqPYZcGqxRYV': 0.0001666944490748458, 'kTPbJCGIfkipDawKiJAoudqPikZGaTOeLZKiWvIprueHyMrWZQmpQlnBdZRwBtQBaHmgwpnBSThxzhHEringRYEWsRyaDLlbIPA': 0.0001666944490748458, 'iFLkIYzbUT': 0.0001666944490748458, 'EePNloldAJtSvNGvgQWxUUwSrvDhsSGaBytiyangZeCLFpyoK': 0.0001666944490748458, 'QEk': 0.0001666944490748458, 'YDtHfQfPjjJGMr': 0.0001666944490748458, 'lEkJQeawIZHHvHxqAdqTBHfLBWwvpNQeAYSxOBHoKYGHtHMVShYbtvPMXksFaYq': 0.0001666944490748458, 'lkCiMGGfUsrIuSxnxCDMAnMHkaXUcw': 0.0001666944490748458, 'HelBYMkWkpzBJASwTNErbxGQDoBNdugoTtfeDebusCkhwkCHCsCVzgFEOdGcTPFNSiKQCAztQIAqvMHRJQEjmmUJbbeCzReOQMwggkoiDKdfZUvtqE': 0.0001666944490748458, 'plSjPrGdsFzccMiiBtUljNBETazGspIamorZjWhUd': 0.0001666944490748458, 'WrWVTFAlKJKyQRtMmNrYNlwroCFPaQKAkEnfGjdLQjZdYOdoVjVjkpsGyOASDZGNpJFQyCSVSGsxWmixdTnIlrfkMgnrayRaXUTIbaShqfToxGU': 0.0001666944490748458, 'DsjZce': 0.0001666944490748458, 'qacycioDVQHmQJgPZGlQVwZSSUYpjfgpXFpmgtAhBmlpnJcvWPWck': 0.0001666944490748458, 'hhtanAbttrgqKPINjqCdMieJwOKyIYQANWxUvGFngVwgJADrZYuTtRscVWjAU': 0.0001666944490748458, 'BnKdOVslsu': 0.0001666944490748458, 'KvfBeAXjv': 0.0001666944490748458, 'sDpHCKbqjqYYrnTLQkfYaHcxqArWbus': 0.0001666944490748458, 'iPCkTifPLcrCKpejRwWkQazKmwWiWFOEQTwULyp': 0.0001666944490748458, 'oAMMWDjfTuJlxgtXfatkCKOdpJHnkaucOJiwUMVPCfXtmJQBiXmXWcggjiRHqpKdqXIKazSqRZGEBZY': 0.0001666944490748458, 'mSoYwcfyEypcGjITrYPALgBNiMcpXkmVChxVbZkPo': 0.0001666944490748458, 'qfxuUdcEZkesyBehZFXSzRIoTjp': 0.0001666944490748458, 'ixLZMOSeNqBkjWJtymHOWNRyqboceOXRfDTQDDYzmqNMLbNfXbRmjcQVUSFgpTUiCrFIzcMYlCdSBrJmOFMVSMU': 0.0001666944490748458, 'VKnjJzYklz': 0.0001666944490748458, 'XtXfFcrAMPXmtRnmu': 0.0001666944490748458, 'szSEzPAVMuaUxVy': 0.0001666944490748458, 'SMZZeAQmTThHcBIHrfZphNqLsrnBPQZkHuGYbhOkUaXkttFRDSaDvlrFCUSaGuAYNMbXaHAyoRceKJYUyzJxSjvRiEueNemnbCAzPLRmvNsdpDkjFqixCGt': 0.0001666944490748458, 'vgwtioIuXiGSSwgNNmpQeLwqYTWIrnRJOZlJyPLzNVyYfLNMxK': 0.0001666944490748458, 'FfscGokDTIAzhsgjhNYSbVttARWZdNAGEXuWW': 0.0001666944490748458, 'p': 0.0001666944490748458, 'YUdrTmobNPPfqMqdPbnlOoilfhoiZxMrLEwPFZFxrDKlGAfWgXcfIrleyaOTaAzJflpMTFWAsvoDaXqnDpipBGRGwdNuEgeSLFgqFLyWKRyfKq': 0.0001666944490748458, 'ybjvHJLWqaNNNoCRSGmxUFXvKAcytCuaYhCvP': 0.0001666944490748458, 'GRKiseLUzGcYYkICHGBnVyhPriDaHxxjpFOzHGhJWHKmYVQmgnqQonxNLuHqbOhHAMuQJbJRmYNqjnOiWEglcajeQxo': 0.0001666944490748458, 'yYSDmkKyvlWkTaCazCazjhDTtTlOPrgPiEaYs': 0.0001666944490748458, 'OgjiqCuEDmhMvFyIFMnjzYSzPTjsgqjhlJAoAmImLJXhxnXdwOIAopiBNyraqglLgNMPxBSkIBeZFqoMlOALOeCoINrXiXa': 0.0001666944490748458, 'OwWyUDHQkKStAkTJSzDZaTpkvjaEsKvrSojFRDlKmnAwjCwnYunQtsHKxseAdZgRXLyqkIhAjJWPKsCApRvNaWdhUrsusxn': 0.0001666944490748458, 'roUwSDEQqHBHOOThNZeznNNANJkHCnYuRIxzAZvyXGOPHXZBJYnGAXhoXgSZyKpIbZIHlRwcBmYBwSffHRpVovfxSrAEZHPhXiYrgZiPnz': 0.0001666944490748458, 'xibAwXXHgBusBwfEmhaFsmhPZQtGZwJZgOavcvLljmGgrxaKpcyfDKdrwgZyOPzzraeszsYUqvjgtMSDmlugLBxekHVzd': 0.0001666944490748458, 'ICVawpgmTvwJVTwRLOhqHjjTQMYZnRvhyHWoaCeZGAZMIMvVfAeiSIRVpIGiWzWzrEauGNBcojFFCuoINiodyCCDVF': 0.0001666944490748458, 'uAxnRwqFjgBLpTbTeGcbpupVtoo': 0.0001666944490748458, 'tFgHrovLbpGVrNcZnzTHDK': 0.0001666944490748458, 'rWYhnEnARYdnPftjfLDJDsVDYKyOjHgUtpeiomLVVMjXIDXnZOfyvVpLPZqhctueJfhMvQXlChPnlDulHrfqZHZpqdvxZtcXM': 0.0001666944490748458, 'NbqkUYaYdrleZYRCUwHLHMfaiBETyTONfsHROQCEdCxqwANBTlsiBoRmlDJWCoYyqCEHsdXYWaTygZTOcSRooyaKDIgMJMdkXbRPUwdMyLDOnSNmkZRXI': 0.0001666944490748458, 'cKgUYEWSYeyZiguaaxHhpTrApPBlvKOnEBnrbCjjXUadMljWQskUIODdHjCHzdgEZOzrSiXKpWtHVYxTE': 0.0001666944490748458, 'hqjpFLrsaaLeHqcSjUdBlPzxyFTzOPuutbCdOfpPvcmaqAIeVWRYueoXJluwOfErNVRzMEkEtilelEaRmdfLDZQDVLaaAXgqBeNhmbbDjxqSsIscEfHgtzq': 0.0001666944490748458, 'cdJNhhkUexpJSMYoTqhcAmeninHTkYDOeYMEGTAOLYbgOQpAGskgvQvoxwZmsP': 0.0001666944490748458, 'vWWXIjWPvKLlqtsCytuwjpccGGGrJdIXU': 0.0001666944490748458, 'KudaQriiwVrTvBGSlndxpQOWCRYnCPrMJPGcVXBbavvNeGgQtgnFMruxifDQKWkFccKlfSLuvjNbRpygiWhOsFvxUUqnuqNDanV': 0.0001666944490748458, 'RUbJDAhmrJEMBrgBUHAaphTgLkNAIcapJfckIgwKiQorNXTrPtOAxNcjhxmcCHBkjRJ': 0.0001666944490748458, 'arsSfXzbnxHIQIhFzTkXjxXTWdwSCxTywguBpGxMXJNUusrFNuklVZETXkoGbkBsTpcSTnWwmFROITTbaPHfh': 0.0001666944490748458, 'JmiZRdRFUbZGSkupFuWiyMlsXm': 0.0001666944490748458, 'tvRudPraorUHMhcBRGnFfTEmprqzaUzXXvJdrCPYkCYdNemEebPkcrIXZwKcTRZezlrOjFrFygAfrdfkZNg': 0.0001666944490748458, 'YOeMPsgTHejtkXdgafnFrycJGXSKMDRakEJPtpjXNLhvasLVpDImdBQcMxeCnBnlgJPArdQTEDrtMMPLUamTSOpauE': 0.0001666944490748458, 'xIYKEgFFEqPTHKWiEbhVvICCDxrARtYRpORxUdkGcYKFUQyngZpQbLyYgiJvIsByKSoZhcSWi': 0.0001666944490748458, 'xmNgViqCcaxPiMRUBPhqCBJjsIDNAxieT': 0.0001666944490748458, 'yyEMJHNGtTIxkuAjSjWBYVm': 0.0001666944490748458, 'HcGPNoIHshXiNhFBGZdlTBoVelLpMZYGGspONOrDetUchWuNfthWRcOHsrKctWQ': 0.0001666944490748458, 'BqxSnjSSpfnYmFuqpnQeIZgDQGK': 0.0001666944490748458, 'rBxhJBcjNgjraJpuJdzsovjonxeOYYZoKSlEBgEzjxhKltFbGZXURWaqjeaQJIRlIhvzkFnQXcWUEfMqtWFmQuPhitl': 0.0001666944490748458, 'jhbjlfTfKANkQRcYZGxwxgLnPRqQtIGUwDZDfWpwRBaxOFMpfzIzWPHZdnyKOEVYFLgpbpZuZcCdUUYObARfQSVZvQaLmPyWwZY': 0.0001666944490748458, 'aKpMcxuWhdErhgERPPwzmnCbNMFUiBe': 0.0001666944490748458, 'xJOMvFZCohOlpecEgfNmGkqJSdHRwUaYQFGFiqmiavIqWTTtFPFPAQUkZmOkmj': 0.0001666944490748458, 'wjoeLVpMXlfEqXGxjAJkjUzcbAGYZMLNmSIvdPPcjwLHClmrYUNbgf': 0.0001666944490748458, 'fcAohvOaYSTvemxyDfojvljxBQZeRsZpHnYelOwQMCPRLlieoRruQQI': 0.0001666944490748458, 'oXHKfpltZSBWwmvwoVpoJRdiJ': 0.0001666944490748458, 'yJSCECUxmCVMYDzGMIjnQUtSnCnHyKpCTEnfL': 0.0001666944490748458, 'MKffwFAuNKimJFDBYMhKLLzgiLYgUJtCcEveiCkAsFROtqZpmEOVHTdUDyiqfqTaCmhQKuYLiVXsAqtaUVxLsSTkZXyecucRPGfbObfTJPjQDki': 0.0001666944490748458, 'XMJCXwIvtljCvlOjVlXyzRymLhCWNWexrt': 0.0001666944490748458, 'hFurQFGkOSzRdwEjjgSfASqsMXJLhbXxLoDbvNC': 0.0001666944490748458, 'jGKLwnrXmMDptEIzMyXYeyxQVSFYZxQ': 0.0001666944490748458, 'tIWjRoGhEtzLdHndZAskXHC': 0.0001666944490748458, 'EZnInAHHtjHkwQfBoKYisSekhTAnxulPvEThDEtQunyBAEEypqxPREEdqpYpwHyoFoThBOfdcfCaToKWHiPcPYVUjdFXjOcdKPPXR': 0.0001666944490748458, 'QvLCyX': 0.0001666944490748458, 'wgOfVGUQtbQthdsAsuSWxnvFGggzLsvENEblDlnQKrgFALWLCqaeCARInSusURmJTSPPkGYjuHEhSRipjAqmieFOJhr': 0.0001666944490748458, 'JyGYhXyzSxXHP': 0.0001666944490748458, 'GVhhOvKFyehFMQonHHmJY': 0.0001666944490748458, 'ZBxbECbuCHrdphvSTxSOUwSoNPjGCMmlqpYmJxJCaPGeFEyFZfuDRlcFaVZpLWSdAVVSQhJdFXp': 0.0001666944490748458, 'WeSKtjnIhmeWhbUTDjAtrQbxDYTlDBoOfFWVLhDyQgcKhstmKggGyWaNAtnsbwtyjHuZOgugKTGheZhVMxtgwhgsDnBqGPJHrmmpIzpUeeZEP': 0.0001666944490748458, 'yUGoKBKhRt': 0.0001666944490748458, 'hSHKBcWyHOQrtVeaVzVrgSDEujOIBHXBoStnalGPejHaRMhKAeAjmkPSYkRfeveCbZ': 0.0001666944490748458, 'bnzeBjFDiEtSkkwNcPICoWNUDuLyDZ': 0.0001666944490748458, 'nvqVQglXWdnKmsPNzfupQyXvEeJgauRlMuSTHHxbITfhEsvuLZosMwMOrPehVbNkCnYPPedvaxoDMOZVIi': 0.0001666944490748458, 'KwsLGjGrlEEOOTFOghkbhRsBqRPTIslnEvqOADOrchxISLhXHbVJwyqRgCsxfIivOAioJjSxQJsGxMZyhZl': 0.0001666944490748458, 'NKjYZamhNUcEqhafshkfNflNCNWnszzlWMuvxIGjtrExub': 0.0001666944490748458, 'RrunMuofzyQJYtyExaYekNyxLaQSDB': 0.0001666944490748458, 'rxJJPTYBWZnKaJsvAMCLvaryxyoLapCiFNgdSzmgZDFqGzqlrNrQdCFJOHLhBdUYEWFIniIFKZPNTMYtySTSRBrimOrZILeHhfaQsPd': 0.0001666944490748458, 'YBcvaWoLUhdcYjiftSRHYLEhxddueaAPgjIxetsHDtANHiMbWrHsoJnulicNjgZEXrfFKFebpvLfqZtRUmdTHbKFMDHhVPVECrZAoJngVWsMkfgSQOn': 0.0001666944490748458, 'tXNGwoCTgRulkBIVZLjNiOprLRcJDsjejdEwiInntuzVWnXsJDlkwBgtSwSEpxLMrGUQITjSlqYVrFkSGgfBiSZ': 0.0001666944490748458, 'KtEuTZnFvRLPTynvshTZU': 0.0001666944490748458, 'hhaLHEOediTemipAIzuOiFDbavlZQGIKFKFTgAflQdyFrffoSVzHpppgsjCRcafMzykNUUrvMGnLlUzczDeCiVzdWcUnpmaGIXbCpwufStehEM': 0.0001666944490748458, 'XBLVAhN': 0.0001666944490748458, 'BLEktDJSiFugiFIQqd': 0.0001666944490748458, 'ZwUQBlMVJgXHdkETQKYRkbEkmSbXzoXeeQfoNpadvSZZUnVVTcjYmalPB': 0.0001666944490748458, 'amQDwavUizkJaSKzpimBdxfZxouqpKNfJKLxjcXtStaPhsjzzAQHLdiKVkBsvNhEPlIRuNVJEJfjmvKTDMwuTdvfOAVOu': 0.0001666944490748458, 'CIqAaJIeBOxnmBpZPwdsAgMRlzUGvlVaByLBRkqvfcaodCQqgkdXhNRJdKLPgadnsJbHOiicRzjUGHsekdbmqpdLtKdYeAgFjA': 0.0001666944490748458, 'WHQPTVrHvHyaRiSmbjHLWaeepDyanaNeDAAMiBXVjZJTJkVQXBdIjZhHIVFUhOXnBXfkMlyewEmcQtqVdUHOIyXWv': 0.0001666944490748458, 'KRNooSnKIILFKRFzpcEwQrSFCrHbIthkICdIbMHtbqdppJ': 0.0001666944490748458, 'JLaQeFLGGJWEBKClnYZpVBJTPRyMIffKQdPOQSmCmHInHEfJesFjCufxukeyLa': 0.0001666944490748458, 'CzzuORxyWBOXuTreaHaHnxVwejkbOPguBjpHpCbTUSVjbZNcmwzMAxZoNGSQQMQEHHfjcBCtzyjUbMrVXXnpMOTzbiiAOwtRabszzhgsqrogZRrqCpGneFe': 0.0001666944490748458, 'ieCLOQAySXwQJKuGNTvUzhVNkhJkmTrSuYBRSGccQHVcLuqaAyYCOQSVwkvVambIuqtpATKivkBlwLYTQiGwQJnXuOfyjNXiaCjYQjqKIh': 0.0001666944490748458, 'UuSCDZHolAgPIxjIzJpWWrzAumtdQnUGxPfJJnbhIlqaMcTBGA': 0.0001666944490748458, 'KWthYdWGOOhagSpRccEomVCzGNVEiELEcpFSP': 0.0001666944490748458, 'CvvTGDXENfdYZuOBdiriirMlMjbir': 0.0001666944490748458, 'HBaYEiVnkc': 0.0001666944490748458, 'JTqEZDqrMQbcyfJXcsBhJiCleK': 0.0001666944490748458, 'YDSuHLdUIQKhAbIaMGgvDdRPToyqMDtOljjajHoTfqu': 0.0001666944490748458, 'HLlMgiwNAKbnCguPKGPVugVNANfZbPNQMDNibwJ': 0.0001666944490748458, 'fLZ': 0.0001666944490748458, 'JUWjfrXcNpjQRpfLWFsVoKBhazQVhGodaphXyYQeMfJLNLBhzpKkQPKelUByvIGfEG': 0.0001666944490748458, 'MHUAQopoOAclLPyokewEJfOzMpwlEjx': 0.0001666944490748458, 'LzTByGdnRHMYtFuDNzWRMlyZUoaqMsvcaRgNmVtehDbqcIUCpWzGgjEqdearmEngbtRaavAyrtOueIFJEYzIj': 0.0001666944490748458, 'CvXyGxSZMYxmUMbVGaxgMyperCXoHYIFPJyrmQkkJtTFSUGWuhaJteqNMgbPWilYFhcApPafzOm': 0.0001666944490748458, 'ueXCIvMpAeNqdVOjTMNzqBMKOsYMQYpgw': 0.0001666944490748458, 'iaSNPoBiwFxijwNIPfLyotcWNEOHKKqvjOVTNgeaBpbapEUSidcryBIaMhRFFckAnOrBrmGJQWtvPkufTkUaFsPJdXO': 0.0001666944490748458, 'WFdqPpgSjHjLci': 0.0001666944490748458, 'YZqXetjFkLrpxceERLTRUTQaUKKdsgLKTiDbBkKFNMBCgCTUXsJhCYVvSUeuLyGUeleuCxNdMbBqzJAfxKSoOYrqDrh': 0.0001666944490748458, 'UZMVOfDgkUbCMiijVtTFvVyrvVZsZxgOBXNpXNxTpYVzXVJMZpLvdUvxNLuCnYGEZSuqxaTeqnVsTrPmdHCzgVfqkzGnwQoRGjembA': 0.0001666944490748458, 'phzyi': 0.0001666944490748458, 'FLNKiAeSYowOFIjmDvVGCrQiVklhplYcJfkEjhfkJZVISvx': 0.0001666944490748458, 'WpkUDFIGKveHMBZigSnicgpxzxvKTguHnRu': 0.0001666944490748458, 'JEFLxphZNtOInESsSKipgXCSnkCeFMPTKHogvioFjjcBJjKSjjXuYxDTtTifkANRUeuRorNMKqExOKgMeqyqzyMGXbP': 0.0001666944490748458, 'obIpImQEmsCiSQlUqztzLllfSoYkjwBlXmYEdYcwNxZtCrP': 0.0001666944490748458, 'SeQLbCBfHTRGUYTKUQevyjiOaJuuwUlfrmjrWqOCJqohevWDWxFwRCdipvqCjKrGCrodM': 0.0001666944490748458, 'ZsBDhyeKNbJfQXBQRIlMMTtEtmKEQsXMHAspHZERyvNsBwICITnlDgxgCTkCEhbIALWNLdUjrqmlbMpEvUApytAbkYaSaPBhkKlXvlQQZNk': 0.0001666944490748458, 'VuCLqBapAhkxoJGhlQDkeSLQvGvWd': 0.0001666944490748458, 'RykukoOKeVltHnJJHmGZNXrfATpkFNYpbpAyJ': 0.0001666944490748458, 'AgUlyLIGDajZnEjOZsuqnPIOLNxxgfBwBtGqOWCJHacqYDwymgjaNrdpDUtAFTGRNnVPS': 0.0001666944490748458, 'zhnBejhBLiYBobZsETikeJNTbsAOavDtfKWqAHdlCuDNqcPxiDeJIPdHAXysUHwnApitLDG': 0.0001666944490748458, 'LDvAFBiGPyLMbxXROQYrBcoMKaBvAeTuJSFKeYKaeWOIvffUWvoAtLDrMuqdwgTeYzSHqZlzDDzxArUtjhSuTghBKqooAWkDLmSScAXiXKkHZacFOIFkqjQ': 0.0001666944490748458, 'CXEsZrguQgYEOeOXYjoPliKARhrLDQSgXhfiwXasQnleNXvgCQZwjUKpHjZHwiFhEGUhycLcJOxrXrMCMmcKCRxyhpzZMzYNyGkwfXmmvXWrzGJQy': 0.0001666944490748458, 'WFSvrG': 0.0001666944490748458, 'NHmfOENcVzThJvoywQfziVSSTWJBRXVfAwPKUcSeajjhEyofayRuZXRxsDginBkWEtjOXFnextcllXYtfcpSQgqQCLbrIzgmUWVDDyvqpNjKHAkyDrl': 0.0001666944490748458, 'BFVjlDHSzsGiOajITsTzDykIhwDDfRRlYNeCfSBbZZgMbaodNZDBKoQAfWXmNrTQppRWTuQoQUauQWAPgfRlUYiAkbCPqnDfcPTCsLPxVykVmwVlzk': 0.0001666944490748458, 'pPaXneWEqeJmY': 0.0001666944490748458, 'gdpErcIUNzlCGdNBSZnfcJouYwNkpvuQgucUwNUttMZBYRAtrtikTbx': 0.0001666944490748458, 'aVFGcOZYfVwsxoIWjeKUyWhsuWejhCiFPlIVCXwyzErGXGPXTXweMrTRjPBYqcQXAhFWXhvwtXXnaqhafKWttQqJbzuhTfG': 0.0001666944490748458, 'FKOCrxenhnFPUIkqUimehxLnCKDLEnHjskEbrFfEPJzaYkeKrnQyjlFeOHIqAzWPVoDFlVXyNWAkHkRZlgzLuMdsCekKssneffJVbpOeWvW': 0.0001666944490748458, 'JsyTVfGCMxbfyIilAkCGtmNjkFzZLXvAOVmGsLlBAURTXMkeCvvnJNKdLrEXvAaGcDxdLFLTBwjeSRhvrgPxlODVvZQNsucZuhIdGshnbX': 0.0001666944490748458, 'ueBWcjmtiFfNbyVcBMSucvXTGEUuSoPYSrgYjHlHMWXQSarDmvSiODrciRl': 0.0001666944490748458, 'kPWqzyllVFzGmiKeBWZgUEqQOxAAyezZbszvOGWuVswPwtIyHERegrMlRBBHFKyWExUWaBRwrwqHnP': 0.0001666944490748458, 'OjfCGaiMoaootosAgyomAqxibgHpqeweURkMe': 0.0001666944490748458, 'XvzDahvxjpLmKzRTXHZenJJmOqtZWwRjioWbhpQuWywOkaCYJekKaPjxwdRgOHBPAHtKRPCqvQbxQAHZuvRWtrnEjhKNYPoEagh': 0.0001666944490748458, 'Xl': 0.0001666944490748458, 'pr': 0.0001666944490748458, 'TgMRaurcGPxpItsQdmesrLLfuBdFpZKQXDPtoFbonuYkQvRUZkzIswWvVoEYcMadTkEwmLRXrgaIrBWuYhuBHZ': 0.0001666944490748458, 'gDtdjJC': 0.0001666944490748458, 'vfNsJdeJdnOaCPfaATjsUAowQmfDiRZsDCVvkZJNHyaRQMPbRwUxAwPXsJcLYLJhMNmJgeEdQy': 0.0001666944490748458, 'vxprschaVLSPnKPzrJSygXiNjWPjGmGLlOilEqqYTNDAzjBsuvMKwqVezFxcLwCFGqfNQhzEYGeWyhAoWeQjkJAdMgNoyCiqSNCpmVBDzC': 0.0001666944490748458, 'xwssOqVkcSMXtGyFyjRZhBMmsNJNueHpEeVswKsFEfaWiXkJU': 0.0001666944490748458, 'euExBMGLGZXLAZQvOfCWKlSUnCWEjat': 0.0001666944490748458, 'kZOKVtvMTYaedSnvKNdIAseNNSfGZSenbUhxJRufXvlAXZtDIngpgyrxWflHfiCllLPotkXuoXhHBQMsCTLfbXHkt': 0.0001666944490748458, 'PqqEYIielcpbviPnPVWwAdZLbGUXVtkhNfmhPFUMuGRUAFnZeijhHDGTnKQ': 0.0001666944490748458, 'NTJGTOPWNAtRUPpsKBEVIZBbnIiBsfKEIfRmNHrnZIxxGVHgdyuwsuYhjCdkAWv': 0.0001666944490748458, 'nusBZODYoknLPUoBttfHgQBrLKTlXBTbrmHDOXhKdTiEwpiXPHdCZar': 0.0001666944490748458, 'AyqUXRHVbAqTGpGmuBZmvKIgqECXQCMttwWuvWPtOVcrvKrAUEtseJjlvuBZqGJsfZvEkXIqJufUhHgnPFIDvEvoeowbZnQMuMvTtqSGjoZSXsqPQMHmkNM': 0.0001666944490748458, 'GgjKLDdSHJarXcTMpQbBZJHrjvfxjsySWMaCdBlfPsUlpKMvmbXvgUVPDhfsqXaUF': 0.0001666944490748458, 'gXDIdFenpiiExWGmvHGjLwOiygJvwHwsu': 0.0001666944490748458, 'eNOOsQtMzUKSKIlhcoUEyWMSUbYPAZUsIbCwkOByjjiQKIkmbSMKDswwaDXeRWnxLiViWeqZGZVtocOORyDVKOCxdjnPdtBeADJxXLwAHZRTJBrgILdcYA': 0.0001666944490748458, 'ggINKMOuNqtYuwnJRJXlYGVwCY': 0.0001666944490748458, 'wREXXyWCpvnbDqBJaUASLRcPmiAmsJNsEfHvAkCEUtPPBXGiZvddzZyHBpPto': 0.0001666944490748458, 'yEHYiywTVFZHAnFxVlapiLksmnzDhKNzScPLjdKOLKvbXlDKFgGPAMbYyJarbJhlGrVNMDvfDf': 0.0001666944490748458, 'DFPaBUsAKPSKeiXrFJTpuXiugVrfoKLlmhChBkvRrHJaExHzjEVlrscCRjiskzPmQjYCLRoFcqAGoELmaDREpuMpyb': 0.0001666944490748458, 'CxORVlHAUsHweMiazpZZkGOCTJXxQXSsUbXbstEOmLEUbJU': 0.0001666944490748458, 'NjbthbujVPVFLyPkIBSCYpdqGJnpZbIuylRgTPqorAygchbUcbjxacuAqtqHVZIfCzxGLszPSMxSHobMJIaqMqIHNtMjnPIPsSGrKldKRRVGR': 0.0001666944490748458, 'XuHraRPyrOAGcnYveeIiB': 0.0001666944490748458, 'pTCtaICcPTisbFcFAkbzJRvyqtGbxeGJjcIWvhyunHmpfYkuAkIRvChVxWweZuCLpNxDxbERWooFDoFixZcjdbcfIBhwvkeytEQRMAEjGiT': 0.0001666944490748458, 'AwYRygxWnAWBMyGjFHxCYRThmEGWxyJTBrtAvFKtvjUpEWViNtH': 0.0001666944490748458, 'nvSMsPKBTsnbeFNtbsAJUnjCVNhTnfhLgLuPnhrucWmtBrRQMozgDcbfhrBHkRPaogoGmTCRSXIVidecrWlfrSmknRZzOKjkX': 0.0001666944490748458, 'sFLxZWxXPKWhzAQznxIYqsjcrOwmhzoCFwhcxOblPMXBBFilUCoBXtpznMRUdOAdkqcNFPgGgFULvusLvcIyCZHQACgFFHGho': 0.0001666944490748458, 'MMMqQxAJejeWKyndvIUzBroosEGSSVJalbBwPdObkwmDGPpOgSwrODTcCCZIlimxskoXuD': 0.0001666944490748458, 'IktELxmOwnynAXkLUNjhiosVlUAWV': 0.0001666944490748458, 'YXVNplhjKZsPOoGSlIzBXmrkta': 0.0001666944490748458, 'RlQgNziEbwiFnuDGJEOpWlpgsKGcxOYnNJwbodsDBFjVksRwXnYVYbCBqbXqNqFPcoyLSDqyNPbRArCjD': 0.0001666944490748458, 'kwBJUZweTxVQWCvYYXLYBugztxhvtlFfovbgvVGURLRpwgfnWMFDTfRTZFWXfCjWTTOIaenuyfhkfii': 0.0001666944490748458, 'hzvUXCNLGlpJpdotvPIlTVfHvWdwtmxIzDVpngprYGTxLDTnZbOPWdiJwENavySNyFKmckhXDEWviYaIdaMNHqHhff': 0.0001666944490748458, 'jIHCVJmSwDVVwjurfvftjXpSLZtHQgIMMorWVCSIkkJBe': 0.0001666944490748458, 'AIHWnagaKHzIKFjrRAyqzFalwuAnvSriUgNuvECiMWgCIHukKdURuWjPuwLFAuToMpSmtB': 0.0001666944490748458, 'oFAzByEtmzcUGqsplxeNLaoldhMaxtqLTvmZOuzvKgloWBRnCMKrWguDr': 0.0001666944490748458, 'hlcGSimVFnypTLnTynoZmHdmssYiPrNIzXMnaEmghNHwFUCEbBSzJTEzDfjzVtjaaqHEbVazvNZCSgENSOwbJCTqcTqod': 0.0001666944490748458, 'xWlebAItLjpXVMadbPuAiLGRTgfqqvQ': 0.0001666944490748458, 'EYHPHcQOjOiYaVftCjlsBRZpsQmpnGqMArDrDHUqWddPMQDlhquDbdPnAgHBqbfQpseFQsYAtLmfEqcqlAydkvdRgODVvRmGBsVMJKBJoCBnTn': 0.0001666944490748458, 'ZOwPOIdQRZWxWOTVohWXtOpRcFIijSeCGJPVKZyaYGOpPVKwCTNSSFlaCyRUTbRSxuNJNbHYBsEyfynAccwzQAFjBbnnNzLECREmaqNUmIukJfUDIEvBBv': 0.0001666944490748458, 'OhCWuB': 0.0001666944490748458, 'wdUPUJDXDNFjPHjXoRoNhyOtSOcdqpSrXkNbVaWzEMhduOnxEC': 0.0001666944490748458, 'etNQebNunzGdwtgysoOdjttpHZ': 0.0001666944490748458, 'etXfmITmMakBvupZYlSzarJxUhjVarBsJAEOjJnSSuZurmbYlnNiKBOjVMczFmlaXCSbtkePxfWolMaCzku': 0.0001666944490748458, 'mvyilgdidyLdORUjhqMiZpZtCHyXHTZdFjwJrHrhFuziVJwjZehOLJGFGyALWPRwaczSWC': 0.0001666944490748458, 'SaBpuPvCQISaBdTeqUGFSXvmEwaEHfSWoBkgWVkHrhIqlTgrUgprCyWxVeUzEjpJcQsEekRdmGzkKAhcoyf': 0.0001666944490748458, 'xRfMwMFysHbqvBYsIcDHKUUPoQjDPhPOGogdINCRbHjreBIFchwrbuSxucJBpl': 0.0001666944490748458, 'BxNHWtEgeGTLAsIJZCZcYKjopWnMOtYhrBHfhWIkFufRANXcHbeimlqBrbqtlhpmqtrHGpgYvftdr': 0.0001666944490748458, 'DOKqIfwaDxgSiWeIiPDIfwMvopn': 0.0001666944490748458, 'ouzVGaPQTNlPZwWeAWThNOIrnsjYodQPcLepsLHLqgdszwiMuXaCDenCRcxDmmHTDCaQBTRWALchtYRPTjFHwXMqWBafzkjiVmBJgFPdWfvWgKgiGWP': 0.0001666944490748458, 'qOpxDkuuaHwLdjbObiivTrXPTGigfUnSQMhJOOvtJNPVJuLGJXeWzFOTnMHKGPyMSOtJnaGWTxHyrLDhUBFyGnduoWNayPjwnsZuooZNxF': 0.0001666944490748458, 'lUdIXczrr': 0.0001666944490748458, 'pRkJKPhFJYgSnPJjwrKDHfDFlSeCFvIxLssYFejvzhRXpDmzpFqexKxeNkpBGECLPfQxWbATHbYvhpgUCpXNuNOamz': 0.0001666944490748458, 'PTqigrPIxEYrOUbHKIDXWyECjxxYbVxjuNWdeFdJGZvvkzBnEEEQfcNpkEg': 0.0001666944490748458, 'khxquMNAvslXaCrTAuQzHlhtpfsVkbGPDDtVYIjReYajXEqDAloIsJAEdabQqZuhNBkjaBZ': 0.0001666944490748458, 'jruwsDQcutqxlYDmEcDiKXkZjLOyzmkkogSHEBGgBYyBpivRiEeGPOIWldGjlWswwXNKOabaiKbSUFtTAanVMIHCESppz': 0.0001666944490748458, 'AVOUkKkJhWKYItVBIoSttiDEXyvjwnPjsBMUFyuipwIsLtlSPEBFIEKDQEFOzHFMJAypSdGRPsMvBieYexHwk': 0.0001666944490748458, 'eQQruxUDHTTHsNoffMByjIrxUmrtDjDCtvBcLkjJEfrTauZMhiZXtLvYozQwdEVqAxYQKaWFIMyPmaBTyTOQRqKFARGqSBSPbrGUppKwGiauumrXovvgkxz': 0.0001666944490748458, 'XErLwhjwtFh': 0.0001666944490748458, 'aLJZIWaczfZmqtludGUAALSsdJnvvBUvgs': 0.0001666944490748458, 'izOXafaMlKUCwgVgxhKTKkDuRKTCPrupXUwHLJmgtcXPCiFhJaybiDICOQSEcUbAGhDuk': 0.0001666944490748458, 'kS': 0.0001666944490748458, 'QoaKXrccuijqQQMCgSpTNtRybYSQvqMEskKRZhj': 0.0001666944490748458, 'DfMtcrUPqA': 0.0001666944490748458, 'JVnmVBFNLKpeVtkotDhCXqaEjemDGlHkPXGDrKRnlSfQIipQMeBZWmEwWJBIswpiJdpXJFhKZbukjadgskzObkSkvGg': 0.0001666944490748458, 'bibjxRKVLPJOCQpBwe': 0.0001666944490748458, 'vWEJWMRLyJTcvcnaCBZTvqa': 0.0001666944490748458, 'nyGdaLjrLjzYckgmIydGlm': 0.0001666944490748458, 'RZEcwBMHTPXtvzwOaRExvnRvFMLVOXswagOmeZOtHnNOzzFoMuagrsOTjWwsNFi': 0.0001666944490748458, 'HXpJUKCpjXdXzCgKQJUNhEGwZAVyYEJJXptQRaIHqwWPWCCkRAGNzlCrnj': 0.0001666944490748458, 'ZLmGvDvyQMZjHiHPFWhxmLrdeLSESInahOLQMJaeWPhdLizaLJZuGZmMmzuxOEFWEVultLanssRIuzvnpkbHzi': 0.0001666944490748458, 'OSiuSIxGlojozRXaqtJIduAYu': 0.0001666944490748458, 'KCTYlrqbIhSHvUiKpSSWMPHJFxaRavUUOZRofIChBeEZiupaOKnrTlbAIqBPlemZbLAdJHNYOFqXHmnYL': 0.0001666944490748458, 'FLIuMc': 0.0001666944490748458, 'ZdPqeVPcNIwXMsiyFsyLNYdmXVxJFDAOUHYgrVIGxIAMEdlpUtcsMSbNpgR': 0.0001666944490748458, 'aPSxIIEpFnmCcZitZYgPvOiHgvANDzKPyujwNiMppGSOVHmIksBTnnnFzsSQvABAEKESSWxIvkHHurgDFBOqLApSK': 0.0001666944490748458, 'uyvQRtuOlxtxdkdwgXeqhbTln': 0.0001666944490748458, 'pYduPXNdjKozOwckIsOYqzPBixuzHClsLIddurUOqCubIAhiNyIgeJjmwIZWreHzdbqPboyzTfHAlqJlbz': 0.0001666944490748458, 'HsfVLUzKFPxMLkriHvsuzKMyouKUwdMPqdoQknzfcaKgUkd': 0.0001666944490748458, 'wxzJiYaINdSvYtvFjafdywMUtzCCzRGKsky': 0.0001666944490748458, 'AJHCODqYcDWWCwIjYPOqBiVfcPMVLOGAqirvUwCjYOxFN': 0.0001666944490748458, 'JNDmpotmWLKRnfnopqMPXrGCOcdJNJbaewVxSaRSECQqCDDIWKN': 0.0001666944490748458, 'oOlhWHAvmugfIigbuLvbGwLLLaiTNZerqOLStbWPFnclDxqwEWTpVkgHRdRYYFEAlFNZJrjPT': 0.0001666944490748458, 'hEQSbqQQzHxWycdBvqiaOqLYcRajGPMkYqqVOHFAxTuvOsllEtKZkpK': 0.0001666944490748458, 'fEnAchqvtqZSlJgwniyFaRKXaADOLuckRyGTYdsmYQsHGLLsqVFuiyAChXSsiVVjGFLFBLvnBqaZwSVqogmUCkYRAXyJxdYKKDf': 0.0001666944490748458, 'WirVvpiheFZhgVqpTkNpRccsDdVMxmpShtnRgDHyvKdFTSxuxAArnGCwFHSejxFBVoexLmTeFDBXsUKbsSSrAehDgAgoOUrWdsZfkPBZcYdkPdv': 0.0001666944490748458, 'dLMDnyAxIJMjScpNORiVrjPJiGebJ': 0.0001666944490748458, 'jkYNUsEdjmoahLuaJsftfel': 0.0001666944490748458, 'dYyXhVvuzcfXjeoPZ': 0.0001666944490748458, 'ixPSuOVrvxRRxafIgelcicxkbWSPyxMvivdyAvrRzeGgvmH': 0.0001666944490748458, 'dXeJXrXuXEwAetgEJgdGTsm': 0.0001666944490748458, 'QoDjAQDdnDCiMUDTKOQ': 0.0001666944490748458, 'JXUhIlaAagnHnIkLWqYULPgklhVeZmKwbHgUrXXHOZyQjQCUyEmWKiwBJWsLuPTlvPgVBICEKIgPBsenbwWpqwLFbaSosNNftradV': 0.0001666944490748458, 'spBwIUVBLUdMJrOJziiiiUsgabcsWQViH': 0.0001666944490748458, 'prhxRnYnZnIoWGlztevMzwezcGyqGusZytgnvtIaWJvoAWkMnSfuCHDZGRdCHuPygQodIZ': 0.0001666944490748458, 'IyKbrzXGsBdJMtyVIvjhalbgLNOUfDycbcLprXlUhAFJmIHUrnYyctihRUVtqRshTvKsJUSEaJfRAKXfYQpNyjOBAzF': 0.0001666944490748458, 'CeHSpxQBWSUquYwhCbdwPLXLFymBOniqqtlbtFOeVeQHVgtOHnPdJDbujrCkkqWyXuxSpYGeYJQJuDkiwyGpeWmxpfBgR': 0.0001666944490748458, 'lUjAblFIuDjILxvqJCipzUuHozYoRTRSZDsHOXRULjCHpSrKMJdiWaQImxJvyGecBLrQeOiqClxKoIdFYGvZGlzlanZnTYxUHiHTxoAfpZYrgMpBNkO': 0.0001666944490748458, 'NcSnDbSGYppRsytyAHnEVldmIHRuDjdaTuanHOhVShRlXLZzqjahcgMxGxIyHHOVEAnWrEvDtLyUjLMJFIMOLFnegUCysfmaZn': 0.0001666944490748458, 'dOPyTPVjjFsBySFKEOSQTtgGlJNNmYMRpPmnFqhdKtzIzVrrLzHMvcVjivpZhBZGMgIICFdMfjxSrSAeSkXtluIwiOjIrSNxOkaTBdkLARsAaXmOylnNlf': 0.0001666944490748458, 'athkD': 0.0001666944490748458, 'UiRokiJclONVlgCDiGgdfPnZRrmfWbVSNGglAcgmygxpTTblbZRXGttOcgprXNoAHCLxIslMEefyQVzOQXurccbCdihbPdqwhMfBdToFR': 0.0001666944490748458, 'mpIUEHWSMfnSvRvJFYrzOSNBLRcOBSwmXBgRtNluRDLtcIJjqVMgPFcxXxcJVAVhriRYtbHOxIzqXOGzhuZUCRzxeUmLIQlLINKVJpeRnKDeEcdpp': 0.0001666944490748458, 'FkDXExymlDxOnTvdQFJUfLpueLfqSXDGLnlabKGyVCdZNz': 0.0001666944490748458, 'iouXUCYHeRfraNPfhRvCcTUgYjrWsFyEnRIEffIZYEsHekwydkDLifFlNTugaqsJUjwcF': 0.0001666944490748458, 'vEkZtbKBepqEJvcyMgmqJccuKvfQdLSFUMIJbLfimzebvBLBadHbqfFYdlrrX': 0.0001666944490748458, 'MzRsatIdCyhtxxVFAElTmyTdWDnTmjJkdvtbuTEHGoNIHPtGMFKxEDGqDalQyVojtryHShjAUCMNXCvbaKtxrpLjfyQYiznpPah': 0.0001666944490748458, 'sODxlLwRkJrihvIFFsjVzdCflONTyFxoNFBYvIafpDNQwrDKLGueqapMRDVGoaJgtxriZikfryaPYgFohoZHljUNPitvSzS': 0.0001666944490748458, 'gzCpjbQdeHbQHKjodYPbmIAtAKvdwqmpQBvMCFKpZbdNfIOiBpyqZrZCtITqLexfxdgiVKmDLLsswoyEYfpLguwvEdkKwng': 0.0001666944490748458, 'LQAgIEGYmHijTxgVDVitIhOTXHwkqLkZPMUIXprusvIqEIhKNDZiOTBDxlXJMDlSOIe': 0.0001666944490748458, 'pwRXcKXyIoBLfNVqhgzCaRQamqhyKEvpyjNVQEaiGFFsEnXTObSWqqHMqLwAHeKPsykTvbwRlCgXPATMQSoIWxhLQ': 0.0001666944490748458, 'tOAmvskoYwrZtdyMdgjDDdslrGHRTXBygKpsWdXXVlaAFOkQmd': 0.0001666944490748458, 'pFKtnaIXvRWAhPHJTmbrEyFxGSNevbKGoLHXTkHWOIf': 0.0001666944490748458, 'MWJONoNNMOdqCRgYodinKuqNuxxAAAPtLaMLbuZCtzlItReUSXpGiJjOqowxNCufsrXcbwcokkjujHuBhrBMLxxbnts': 0.0001666944490748458, 'krWRcfDwkxExOQkHoudaBRkXuBHPKNKVqQBgvpfNvMePiCpyZnvRaNwfWhQwVlYylnEdvbTEgbTXcbxCajbjYZLEqpeDcXCmvlRhSaNnrFtLVcVLE': 0.0001666944490748458, 'NBXAcWRHSKqBNiQqzoWQmDQoWYInDOThHDrkjinRAeVZdoeOrqHKOumxXIyhdGZyvkEdklmAfeYdCzXSALyYjQIFwuizLqTtvfkizzIJbtAYL': 0.0001666944490748458, 'WFkxDTrpaneGWRryGXIuZGO': 0.0001666944490748458, 'zUzTvEDshFedPhvEahhrGvfRyxbnZphEHGfwXvZqyDGEhqmBftdFdIvuwRKHxemNJHPRDFmLPVccFukNuRRjOTDZGWFjJYfsNzDIG': 0.0001666944490748458, 'aeniXSBkAwgnPWopVqeZJychk': 0.0001666944490748458, 'thtsNYIvGmSdwtcEIqFXCOSXsDLqPAKmIDNhaIcCjVMSvNCPInxuZtVptBinMaICLaMHTbQQtuUMcbHBwXEExwPQhRfYkTkzMnlHyUCkMl': 0.0001666944490748458, 'zApSMsAsolPHdcxWaARzBGpyXtCtzFgJQeEXPqwJfKnFzNgRCAVRqPXzKWMVdkVOapyUFT': 0.0001666944490748458, 'kAPgKPtgivEqjUIyFHOyTRkgPQMXyoZCkiIrljHlwXCLpSLAHkTpIPxHgyezoFYhOAAfjEHVDFtsjnMSkCJwJqiiuPJEOWkytgTffqS': 0.0001666944490748458, 'VqCzKTMYSIBsohubmlYnJtAcymspIcyysatyKhIYDcouTVDynoGgdIgvATxOQBTKYVGVZfnHCQyniJdrUr': 0.0001666944490748458, 'pZSNZgwKurFrkzHoPnPgmAsrUxdVNLwlHxaaMJlJTcGTTiljtTnAXjqbDKfDjoBWYasyNGdexnaONLqlXghKWpdpstzihmBnHhfRCbB': 0.0001666944490748458, 'lhSqAAUMUmKrlqFPlcrlt': 0.0001666944490748458, 'hxTuWAnZltwrguvBjRfqdUnvbnydLhKaVgwntnNunCZAxCqRdrIkhliCVByDJnEhSraveGniHznjXhFbAYAfFTAhVK': 0.0001666944490748458, 'zaueIAIkIOSEWwvNJyTaWZeErcpqvfcmVfWVSXbxjcARfDMqGJomHXjMMrbqrEQuhuwxGDnGahEOUipfHxGWkDUZGKEFuYXtMxCUXQ': 0.0001666944490748458, 'kdkDNmDWvnutytirrEiyXxWYBFsEMGsaVkjdjdNNvEuWOlyXsHXTyu': 0.0001666944490748458, 'YiSqpiPwDWZFupuoHLEoVpTzlLwcj': 0.0001666944490748458, 'eRLAFIRpFlmOrnUESq': 0.0001666944490748458, 'rCWCJKeLvzBeXWZdfTmCHQwLbhCcPorjwhvwaKENSsoArEBOAhc': 0.0001666944490748458, 'rcnHCyArkFNjfZGftZdgzQsQIJDlNMlGchPJhzcPHpf': 0.0001666944490748458, 'RjcxEgZkcEIkWETZHsGZUyFkvjunBLmjkSLOqSr': 0.0001666944490748458, 'VowaWdFqhhgZvqsDdEfqHQjDwwAAkNOMqeHLUSTHvtGHFPvrsthuPfHnskmELFyVUZGYhTNCObyPiRFGYuICnXwCfOJUnLo': 0.0001666944490748458, 'pwtVseBkKbFTXdnsLhBtOtvUoCNISSTLUNiFobgegNeMVzPRSoVQhUQGqvwGxOzqvTArjYukMioMZZCGVIeKvOYngnfkSbzQpDmqZpGCqxbeaB': 0.0001666944490748458, 'WvMWFaCenSDzwZvEnRcUSrriytMyKIOUqLakGKCowGLIRcXXgdxXVMymCQsiivfiSfeoHTnBPlFIxqaVtvSTPzazALfMtU': 0.0001666944490748458, 'PycYp': 0.0001666944490748458, 'uieWvMxnmYXYyfOvByOaqVCVFjspAtsfWqMsCGHgoQaIaVvFzOuCUhuLPzxAuPHsmXcdHHgSLHhCqyLDzDwyGYsatmaXclvHZkxnowrXimpiwZmWhV': 0.0001666944490748458, 'ZDfGsOqEWMDnixeuJqsnrgcztxmdPEDTJL': 0.0001666944490748458, 'FPhnvnXFafSiVkxIyHXDKLmgSJePyQSEZUwqOTWQqoPqWOmpUESNzrMXiRmLyOOHzYaHEkNnKaeexPRFJUqLzXtjoFjhl': 0.0001666944490748458, 'IWYmdjfwyEyOuGHQcGEpqCPMtgIFqVrkflJSpWbbcljEnWSOJXkfRkqpVOrdNVjbFtWJvcMcuLiBMjOwXZRMzA': 0.0001666944490748458, 'mjPTbUGfwrRvkmkkGOiJbyXQiarfOwwgWZQbGaZmdBtpxnR': 0.0001666944490748458, 'eBNInRugYfXWWQPLUNBAEedZtzbelsKYZjotmzZayEfttgSdsqkQOFrUYDInqbAYhr': 0.0001666944490748458, 'QkkQVllkkyCAEPuezwAGLdhfCZmfdtxGKLZNaHcRyMU': 0.0001666944490748458, 'qdCGMFhFIoQcFazwwOuChzUcvhuqLMdOwYXiVsGNMJNHtsxhGAZtXeACgWQVXakNrBKcejoAxCS': 0.0001666944490748458, 'cyFoqBMDGPSCykSuAcU': 0.0001666944490748458, 'fAEUtOJHWznnIcLZJhjAaDcJlpmkolIPS': 0.0001666944490748458, 'hfQRvIpxzDOYafZJKWzbvqeAuhkraNhYlTLZXXAOVyTgtrbyx': 0.0001666944490748458, 'fYzoNufIdezcMnoljoAImdSOMhgENrPCIxTAQVZMwOhxqfWwKlA': 0.0001666944490748458, 'BZzjfFZzTCSKwuEnVRsoIgDgrYvnKwkvYFmtEQxwiqdTbj': 0.0001666944490748458, 'pOsaDwuFootjkVpquVsGVITNVRDwkWrGvkythbsPRRfhIPhLFuPhlsgYwPqFxqohkqgPZMHLGJerqrxMCNFtJCcnHExnugjIdoQrN': 0.0001666944490748458, 'PKjPHzCvNFerhMcnVMxtKZthSvmJVwffeazEHmlQzjWJdPTmDiI': 0.0001666944490748458, 'fDUeRkzBbWSrHvcLRseQVPUnrnGgfcbcqPmScbhEBkvuMURdIDvKhrvWjzaseswvdUiQOpiUtDApWTPusqLyIpGeFXhabPaectGeBhxNiFMiGeB': 0.0001666944490748458, 'TmhwZyoYLwzofUJLaBbVUvNhAXSLLP': 0.0001666944490748458, 'loDKcRHOPBKGXAZYmijneZAxDCP': 0.0001666944490748458, 'YBRdRTsmbIfQOCeXYxTCzeglUo': 0.0001666944490748458, 'MaSZMXHYtINMKlsPXgEeOnXYbgMUaCLHJLskdCUvlerIXJaLWslfBNKIdmAjUvCJshlarWlQpDnWlFrcIyo': 0.0001666944490748458, 'hOLKQiqGOiomriDDIzzdToKNEONjDKSQpLVRnuvDvweDmuOWlKaRJwhaJUO': 0.0001666944490748458, 'NrxpnTbQrrDHAnjYLnCaKtUVMtmypkQwqGNudMkoRDWRkVecJKbwXvbLRJwnNtLwDHXDYhhSTOjXsPOCTeLWbItMVOtIrNnSIODIBgXvaWxpLC': 0.0001666944490748458, 'DxxOGkxvzGTmlZmfSeWsRPcBEVRqSjOUCggMcRDVEDZTlVAFsTgFmwtAzGjutrztcrioJILxmmjFnXwGnGn': 0.0001666944490748458, 'msPzHdyTqzFjEEadxwWLUSLoHoDDqPiCwlghTDnpOiNXoSkfCNJebpFVThunEEgOZHqtICIPyQvIycsOymJmeWLiShanYPhLpkHIChGZTSKEUuEkPXJKyQ': 0.0001666944490748458, 'gJnfXiWyYqvIuxNldYnnYuQNMKQhmSCvwhmkBUUNVPuCNmkSqsVcSVnLNrrriNmugSmMnvPCllDxz': 0.0001666944490748458, 'wDZIwvwMTYxVQuoWhlYbDihalYjRtknlFdACCUHikXHoZVoSmHXIFvgRyurVWExmXarZVAzxAfc': 0.0001666944490748458, 'voVhXatkQeYdXotxhCIMkwiSyygMyLqhSJVmyfafROqbttzyNOdVYObcoYnJBxZRyXkDCeaAZkmAbSwDvjWRSBxqMkWeixnTDAZSWKFmgdOczw': 0.0001666944490748458, 'ZlnypqKIQrPoTxhBcEISACqPwGMJfxmrzASywRHRfdscOjc': 0.0001666944490748458, 'QopmQ': 0.0001666944490748458, 'dDXIhhGqolnDdfCMRecCfkLSOH': 0.0001666944490748458, 'KFvrtkgDajQAFBAGJUDGMojZKyftAVaqfi': 0.0001666944490748458, 'RxXCQiOvgiNOxKayRzTuIZJjdYYTebAlfwLxhJfCtUqBmnvRkluBiOxVKcupTEldDWzrPNPmdOwlHbNMZInvAtAZoUxEJfWWrSdJdqI': 0.0001666944490748458, 'XeOGUFmGeqpFaqaxKjmpITriRbMtaVzfrNHdEjWRmTKSLqxlUNsBTPkhpsHpcyTpznPcpOZHoKjWpLVvTVTpfPW': 0.0001666944490748458, 'SXHah': 0.0001666944490748458, 'ZkHCtFCHFXUWrvZYTpnPxnRiWDMgjCVaZGqlrOlqULYPSrbfOsCMWI': 0.0001666944490748458, 'aKWTolFNuLIDtxMFJuCHUjvBuMJUeBIvKZZYmXdgoWQbHboqJrMOafejguPmyAholcHWkWQGfWrmkWJHVOOJHnkDyLMYzjcTNzerYXBUzUCDakOllzS': 0.0001666944490748458, 'AmkINlKlhwzhDokAZUWoKDoUokazmxJtLRBrashRyrvHySPsOUmasIEnxfLvXKyNrUnxZilCcMAYvMXleNEaDFVjIYkmKKhEEHbFXHeHaWDWmG': 0.0001666944490748458, 'uPxIvRfUSLZXABkreelEzsbJGifBIyQVeRJLNYTcr': 0.0001666944490748458, 'UyIXurKzycD': 0.0001666944490748458, 'oywBhKPZfyctVZJSCpOEhhXilvsiKoehPuxGljrexnVKjgTBjUVGDJgCNdTaGgCzPXmGXAHSEsZYEyZbVsWshhrKXwoezjSNAFpqNZnvLSxgo': 0.0001666944490748458, 'pmeoJuNYZvdzDDRREfVMAWruvbpxGJYtRTfcArfBFGiJALKswspNOrKcGaCGmo': 0.0001666944490748458, 'MVisblLcOxAItzSmfZHUYKoAWRBIwDpeBUrhqhtqkuNvjjPWILzJaxkPKoT': 0.0001666944490748458, 'UoCqzwTVqmdnBqarSLSMAeydooiODP': 0.0001666944490748458, 'dCrFNxwMpsOzDiuIsgzselgHjFJpaMhwlHcEntKxrkXhYkXCFhYPVlkFkqjTSOXsXUmNUXXwbphGVVlzAIzQRFOpqFqBBTZwHndzt': 0.0001666944490748458, 'NjjhnDTjvuUiWAcbqdEng': 0.0001666944490748458, 'mCIGisiTIIiadNEOqGUKQPIPoAtIkFhCamLXbqXQVVjUQsQYYIWGTuyiBGfYbffOnukhDUaZnUyOtkgfDqJRquG': 0.0001666944490748458, 'hXypuGkgOOKjO': 0.0001666944490748458, 'BjyueukUrACGDGKNGRTdDWTaHNPkemalFiBpaQGKIjcEDgwpIzulxRgOEViwajhzpEVOLYOsHtgLeILUzrdYdvCbqo': 0.0001666944490748458, 'SYgBbNsFlhmdeEugdZYTVlYTDeHrdKRVcLAXsAsQByMiEqBjuBznaGEACNUrxTcFPrXZJTYqpzbSNnjYC': 0.0001666944490748458, 'eRClmZUzGWMnLxcAPBZFUtLdVTMmMNetyeRqNhtCKLmWchygXiNoyGSsIESrdxywrZoyYRrpFmElNpwPX': 0.0001666944490748458, 'FOJGYpDojLZacenzkeawYaIGYakUVuGcSwZnfinnLccmWGSvIPulzonzbAzSXrdSdbJcfBfjqbYXqLXUQWiasoywDK': 0.0001666944490748458, 'klItvXLaHYuodHePqb': 0.0001666944490748458, 'OTxWZjROnPdBDQklMOLLbknsfNZUAFZjeoUqtVazokpaUOUmcEhEogHuRpGkTQeEnLkJZDbGbCADgV': 0.0001666944490748458, 'iRKaTlhHtqNgzdCRmYqLnNSwtxJgBbUFSwuOqMHcGYRlrtJivw': 0.0001666944490748458, 'UYBHAEeoEEVVHBtPrSpNdAupalVLcBUPtWxxQWvMhiJsXorzxcLIuMCyTgHTspoPhRZIBAdXdgQ': 0.0001666944490748458, 'PpUatpsRCeDIiYDRBkkJtxWwNeWbXCYGAPQjHyvKQgHTuaOmyXPTXgHZvRuaotSvWCKHsDLTpqlOtvxhsCRRO': 0.0001666944490748458, 'cxgJyRUgfClNxNWzhhQvqfYRPEUPDjPyPSGXWSKFvqqORnALUOQpZkifuWyaZjiDtLhqYxhXKRIlpREHRAGEbJlkjAwSmA': 0.0001666944490748458, 'EzSWNPLPjFaDuRffYiuykqxYNxqCrkczGAcRgk': 0.0001666944490748458, 'KEeXTGFjIpTSKGkYTHlXHaGbssPcLQUpRJVjuVxeOinyKngjmusPdpYbsoSbbyLVerFKDZmBwmtWLAZZkwUiMaaNjETYXsTdKiqAeZUXoPZPyrqBJqq': 0.0001666944490748458, 'QSNVZKSPTWzhvGNXwcjnMEbzsQssFOrIoGWJriaGyDhBRbAqkXckdFuTKyQFGeCLt': 0.0001666944490748458, 'rn': 0.0001666944490748458, 'FnUIBjCLuAIkdoPCONwuodBSiDTXRQfXPziGbkilDCacedLvcXBysTZzNTCOkLZpSqwvS': 0.0001666944490748458, 'SbajtpTOMOnGjsXMrXTKULlFaiBljFYbqGCQQWPRliHFbilUHGTBAYGWbUKUFSmpIHGaIZKBjbkAsYTZPKGbxpGgK': 0.0001666944490748458, 'OmRFMxuMdVcgvoRBcBgYzrfvYPUczgGPlaXckuvcBCmyauKWMsUJeMsLqn': 0.0001666944490748458, 'LGXpZODCkMqHsgSMLqleAiAXaOirkTQHaBLWMjrVdXrWUcBOVbnpUxWwZAmNxiTRPMNJbSGhksNwyJ': 0.0001666944490748458, 'QqxuKHltEnXQLXUIQsLXVSLZkCTnPghsaKvdvLflCdfuTNccfMCPqfVPBDHgThNCeDyOGASRVo': 0.0001666944490748458, 'NRAVnFhxnBHvWMysHnqHEfuldBEUIdIpWBj': 0.0001666944490748458, 'hNdLjeDzYSsaEZjOjVqgtQoXspltrVVSgCTduhboEzaOlkXmdBSDoVWAxOkQWmXQJwbITDVqZLERgnFTagKvDvySnurpulNlbVX': 0.0001666944490748458, 'nwxAUxuSGtzuzvtQoGpwRIkpxFkUfNESpsfHhBdSSiORbLNtuQsdSIlLBbcnYFCyfDjPTtABCDGHRUMaFiVxZZVSsTP': 0.0001666944490748458, 'ABOvdnwKLVkWOPZjXKlBgVsrUBirshgZjhDbNdNhmaKXqq': 0.0001666944490748458, 'ODxWmPOEN': 0.0001666944490748458, 'yQOdViTFePFKirjrTTTwSrCMDHpfqyWZmlTaGxdCPPviUmYhiyLByKDUxWgtVTJrWnTIcIPyXgTJMxcwpavCTKPcevIBJWFzDQtnVDEwcBKGPDpnkyecgC': 0.0001666944490748458, 'iMJuPirafvplsXelZdfzeMpJvtvPnuJMjoyrgsmBFtcFVbxhEwfPAXoTlTFLdc': 0.0001666944490748458, 'CQyaxvGcKlzxbusUskiDAepdeLIAcZyoqJCwwFTZICA': 0.0001666944490748458, 'weByHrBAoQaguiDKYNcCmCpyPjDniMQxpULKvtoyioGmZYSDnsAiuooMpGAbqWSGIcmjZsdBsfuhlHxEIzkAlWwtZalwM': 0.0001666944490748458, 'IOxnkKWKaLYvbqxbZBhkyRBdg': 0.0001666944490748458, 'SKVAurQILoXZbMMOzZRxRHHRFqvnWclVxODWxXiwpzunXfSUHOhXOEtKCnSQbnkhCEjMUCujcXaujQsBcuDofmFfnIjuhcuoThPyPcAAPmNkTiGOUMwar': 0.0001666944490748458, 'perROIquCsKrcqnLJPvJMUvzdoAFbmQYuLsxvwATwqsofRMlanv': 0.0001666944490748458, 'zJTiqptkvjKoklWhN': 0.0001666944490748458, 'yIhHtwoVeQuKBDRAFEIGSqRKFZ': 0.0001666944490748458, 'lDsIWEJTRnugDsnONHyDsAEBttemapctptLsLMvYcsnWzCVEFlutloXnDFZuecgNEnDaxgsYZgEaqXqnsjkOBnkwViLCWRKQXxSESxRohEoNhrnNHffXYn': 0.0001666944490748458, 'vzdTrEmFeWHdaCADfdNnnsMFurKbCowuuaHpaokLihWVHwRGBVUOMWKHGfqBKBdWVme': 0.0001666944490748458, 'uEOaOVVkZEnmYsQxUtUkSpeBVaBiJgIpTXDbMENpaklOmJhGtouCoNCwCYPPoUtUZKCUwvmftfJsQkEcCslIQDltnFKYlLYuejePhdQ': 0.0001666944490748458, 'XDfRLwqkmEICUMEBmgmoikjNpGoUTfiTpmzLqnTaRgkJOxOCtwRkTjoAbdmKoknagQqjlBZRLBOohvH': 0.0001666944490748458, 'ZpvUkeciUGmVrNLKWiVmtScNqKETEBHbmkRpkTVmlRlqbFOJkdHAtdBdHqFHmlczSuonRpyZEovzgMxiALSdIXHZZchRXOAeatYgtBqAsM': 0.0001666944490748458, 'oobmLosYfwxRVpGtjgttVMJKzlgnegmVCbozGO': 0.0001666944490748458, 'oDvNURVSLgqAPaJugvGpLOTGNtMvfraVvfALiAWQLWrIRrUoqEVcIODDBjcETWhqfpRxLkQTaUxLyRXUIn': 0.0001666944490748458, 'O': 0.0001666944490748458, 'jiDeAmEwhtHkzwASeOrRBwnxshjHfCtbGciEamXtNQQmUo': 0.0001666944490748458, 'spzpVTTutLqyrPmSbhOmYMUpFCcvTUbYsppUeIKJaiQpyxmwxJxjgEkxWuwKnHlvzpJtaUZjXWQyOyEyZUuvrvHxMvvphnBwFWWWtvTQFdRAaVvvHoFbUtc': 0.0001666944490748458, 'wkJFNQcaCfgeRAjAW': 0.0001666944490748458, 'cAMgnRxDVnGXjmpwgrHBOGBIX': 0.0001666944490748458, 'hOBslCPAhNBguxCTWMkzGHnTkKqnzBqRhJkruEaUqombsKGGnmPbJRyBRpfoIM': 0.0001666944490748458, 'v': 0.0001666944490748458, 'jckgBQYwfzxRhXTxwoyLQcMPvMWqheEhimtqmuOmRuiJKDYEarPCcSLlJfZYU': 0.0001666944490748458, 'hEHuyz': 0.0001666944490748458, 'nojDaISFprORGUkYyYCYlngsIamHBqlMWNwMzDEhSsstWqYqInhLXNZmYVpLdeOrQgBlVKasTNQRqnQoik': 0.0001666944490748458, 'jjXGQOiKucFIJjIvzjMCWSHwQouOvZvunerRmAcpjFGZdrLshVVYQPUvBgpmXdCHczgvuuyAAQXnRNvylUZyacllupvmbYSoNBBfnqZZbwBxKIRpsjd': 0.0001666944490748458, 'qw': 0.0001666944490748458, 'DwqwzawnYvTFjn': 0.0001666944490748458, 'UibghYcQymabEkWOCNAgxbeeTLSqEnrjpoqgQEgCRpwePZaLDcYrcKuEToBWfRCEYDswpragJbaDiqArgnHTzHPHvhKVBjefdcYSMFKXa': 0.0001666944490748458, 'UBLxTxBteIYiYISkcUiVHofqiXGzZxbOvpQXYoCEmWRDbKrXaYX': 0.0001666944490748458, 'CFidGDRgeEuqPgBxMLapfyAdgGDJuiilEXbxIPXustVuHjVdUfcTKyikqShDLyjpXcizhChKhbZoNzIwFDPLvSmRXQzzDuGYIPdorcusOqx': 0.0001666944490748458, 'GbgyTvCXmDFCoDESNqnlaiEENqGXgIbcsjzXCElLjfwXHfYsINChEYbLwYAunrmTdIcXcAOPBKjFGpnAkWcVNZQiRhnJwBRErkLOFKqXeEtToDxvmXZ': 0.0001666944490748458, 'JRjbuWHKXTTMyezDhqpAgVLaPfysoWTDhuDjCragkqgyFftWimdHNXpiKrmqEmrAvcl': 0.0001666944490748458, 'wXulJvnimlBRPzzBTrxGbKWnriBOHsRPLRwdeGtNcrfZLXIwZBcOxGPEMPlOiXpYxAZxASNIKiUKVHZJQHIYAkH': 0.0001666944490748458, 'SMecqFVQCLqInJJDQbKWofbkJoCqQTwyxWOcGWEumAthrRJoZWWFAZgMyBVzZnqGtz': 0.0001666944490748458, 'dYLDfzjkiLyIYxhHXMeiXqsidjMrbLvGxqFnXBRwcqEAqXONyUyXoLoEOAwFvUqTAfcYt': 0.0001666944490748458, 'TnyYIGTYSzDaAPwNFPlluuVoxCOuveQzncVdpwgwAGFLvSHglGeiXHoLPuLCfWlqSWHJdVQdUhPmtvAxCsIXihYYeYRpFIjFGtp': 0.0001666944490748458, 'nSBxLYecOWFpxsXzYkMKLTGDzsRSUlKJvHzlagfLiaVdDgjUtvzHvWUAuSfYlZgmBvclVgVDrobnExRuGD': 0.0001666944490748458, 'iOjNcqpbSWIsMbEdgdiPSJzoELXZxXJAcioKH': 0.0001666944490748458, 'lJgccTrFOkEvHiOphIkmueuqJARCfQflTcedKPeXTblvXFJaiCZFhdELyFjNvOAqnEroGSQUEpNjRAUDBnsjuOK': 0.0001666944490748458, 'zWkTXglnTqesggSdFkSGEioXXnoKyzIgpDcCrUrtnKYKqHOTEkCeITolPjEsOgSgvlwsNcMtXepPaiB': 0.0001666944490748458, 'hHHtLOTRzhynRpesxdQWxrOFOAIPAfOojWYuxMlkeLYQduySmXqUXAetGgGLhehcUehpuZnDxaNFVahMMqyafCfgKnIZJCKOdpJex': 0.0001666944490748458, 'bbDYTbZYGsZgbVWwSvIDjonmulybzxg': 0.0001666944490748458, 'vBeTGrhvUDCoxVKIfggfNMewvKHkAFSYfBIPLeiPJeOEARPTfQyiHVFbxeLcYqMwDbhSQkuOvpz': 0.0001666944490748458, 'yBJDdfXHXrVQKamZOTqPUVdprz': 0.0001666944490748458, 'SoOeqcadMNRQvvICRQYstEbjjVlWVMVItyHCuzogUDFMG': 0.0001666944490748458, 'rVUtArPNcPKInuUrEySfYFRqpCjuDvuPpUqeeMHcXrOecvViyf': 0.0001666944490748458, 'orpNNkuynMADBOmveWbZtmZUHdoSqXpuzHVTTWAsFKCJyjxVArtdGeUdZJNvcfbYkHPgqmsQGKkDWvlyEaFfuMeKlRCwyk': 0.0001666944490748458, 'kxLPVlwucXtbKpCeHNGLpJWhbZaZrGnpEMuzqmwTyqfjAgjsmWToIWghbGEiWDOSZLAcFcuGLuzCk': 0.0001666944490748458, 'ReRDzDzpdloTmFQUNTXHIvJVwdGPONilKgmYPMeVICYSwafWzPKSKpWSnLGTVPCBGoCXacmvYWWrVJjMebfGIWqWBzcMxcUZhJPGsvivKLQxGPNps': 0.0001666944490748458, 'EbANNuLAdvwqJnwQebTLGpkhJObRIqMNnNShQVJguYjwXwrLRyZkRqGIrblPcyUPgqgNxrvwPnBjVZKbRWJVNaKgVKl': 0.0001666944490748458, 'DzOJnbEzSCn': 0.0001666944490748458, 'fNpqNbMxSdTngwkhYbYtZzdRvVMMtMTeGEMWoiUPZmogSmnUELWDPhUHmRPGvqTvHJtNeIaohgEunEvfKPB': 0.0001666944490748458, 'NpMMJVXTbqJacwcNTVXgWIxqRfpvkGbYpqcIRWqcQnjSucXrpHBlCiRAxWDFSRrJpuhcqhQAbVYUtpIsybXrpaXCvCUxNA': 0.0001666944490748458, 'yTfwIeGHAklcEEHWtUdcZBNiJnTPgFTLMGdGbAaJmaXIGCWHRUOLHyQQESOvLgTpoSJAUryQBlrGiuiagOaLKvocQSqrblGxBTrDejcFdvFBV': 0.0001666944490748458, 'iLpHiBbjTvRYIIVbhDTIcNyoPBneqKKBMwTVQVMzvuirwJbmfFWVfoWiTzEMAHSHcBZTUUcNjEHGafXDgGukD': 0.0001666944490748458, 'qilqjfjWjZMGvwksKELSOvnVsiwmEkyILatXyLomSLvNfMNpuMcyAGCUiDfBrtlxEOPWtKERvxBqqE': 0.0001666944490748458, 'LVYgHehEiWhIKbMAdIVRRCbRKfxzKKNBZnWzBFQfNvg': 0.0001666944490748458, 'OxiwIEwWUEcnAXQGLxUprggdsYRpAnoiVHcGepNGMAQsVrVMbNCKT': 0.0001666944490748458, 'NsIpabfFjvxONHpnfHkwHEGNDnhwDbyPisVGFuaJxfxRWjBgUuqEhdKfVuckEajqIZVkxfbxngLQiXrXthHQJjjwPQyBO': 0.0001666944490748458, 'BJeOHrhaeVAuBmRmMiFDy': 0.0001666944490748458, 'QRbwgwXgzNmYwZcjamL': 0.0001666944490748458, 'WyPJxDHWyijEFOTbZvThlQeLcCNRGWJNzlOQdDVSqLouHIxUYLsuJNcLVRGzNgIaCxiCLwoIMe': 0.0001666944490748458, 'hqwyaSBsmSalU': 0.0001666944490748458, 'kkJXIi': 0.0001666944490748458, 'TJDkHfSTVbHSugLvrrIHMFSjzrPAOZUUUglQbsWQKMaXevqwOnUOgAPGR': 0.0001666944490748458, 'riPuHcBbabYtZNcKohZAGuyVqHynQWHHIqvFVgiUxhiuecAapAMovHchHrptl': 0.0001666944490748458, 'jaGxBWeqeuVYXPSIhhNWrYLJpembiyiZLHupjieZLcxvPFOfZWFSHvexwSDzLvmdxHnbDeuMDbWdAqfWuYoqCUFMeNsMgyNlZDhTtaVAIHRiwPNDT': 0.0001666944490748458, 'PXSUgCDhCwammmBOhPmqoWOPNLhccUOGfhTOr': 0.0001666944490748458, 'RfjUNVGyiRQeLoyllciJvTmMXTHEx': 0.0001666944490748458, 'QgLwLWfialdoPWcraoGouLRMceJzdxueweFymzWqQonLKJVqZXguutiPirmlOsFtaYtkzTBddtWjjEmfCjSUNS': 0.0001666944490748458, 'VihaxdGqnFyDC': 0.0001666944490748458, 'IRbsmkHJwMylyuOUEhqjsSRUexDXADosnrhYClEejIwrNgANjAwkNfwZGktEovYJNT': 0.0001666944490748458, 'fyqvllRgBpNIyFQSTwOmKQrgkzxUVwdzdaeEg': 0.0001666944490748458, 'VuICGkseaQaEJnVdwgGYsANFtDbJrPcqVYxzpk': 0.0001666944490748458, 'TtiQmZNdCkEfuWlvUDbmpPdEHaQXrBKqsKxuxAKPIfeNxMTbsLXQprDfGRJFhLXlOlQapVuerjMmubZkjCIDuWvxsemJsxpqWYZAuRTUmqRbtVQyVD': 0.0001666944490748458, 'wohfqbtRvuyuVyuaHUjExMWgHmgVvyRZgvtbiVrOskRIlXcMlYhInwVBNfmXHQRmFzUmqouVhtymHEWyiCPgJioLEevKZXUPCXlMwOAwqhJPCB': 0.0001666944490748458, 'PHjAahhwrOKgdXUbRKraqcSMdzTMIdYrlqxoJ': 0.0001666944490748458, 'ZZkdxILwiZyIWggHqYjSXPhmezDwDHajhZpTBeYiKiceinqrpoPkJrzkFbFspohVEhvOyIVrnYERNbkdMWDJOhbEbwlwKz': 0.0001666944490748458, 'pfvIvAAJitvKSwEMeuKHpdyfbsXfowbiqYryycTZkwfDbAubmRrxJnqTLUfQLYXqBOpmrlVMafFJBjVBFNakHlqhDrPfeVyJQhHkEIKccygTExpttFsNv': 0.0001666944490748458, 'JgvVoJVLbWjuEDLygDstsqzXZxGPDSWVwlwaogYaCWvFVAFKCqPoLgrcLwbqKJqJdLRxEGYkrashEnsUYtfCrKXlTRtrvLdUxO': 0.0001666944490748458, 'IswBxhEUOCpVXjJOKSm': 0.0001666944490748458, 'UfrashLcyCDzUFplUPMEYZBFdRIJPYqtKWaAsRNDuceMPIuUonoejLK': 0.0001666944490748458, 'NZKxRUSFUbEOwDsAREPpjEWSBlozT': 0.0001666944490748458, 'AKjRTcMZGFokGueAjFhkFVodmgmeGLgBxYvIEVZqpJOuFTsJRqNHTOEZkebXkTmJNo': 0.0001666944490748458, 'sxnVJsXvPqDoaSHXuoOkCDkngWvSovqzDeaWyyUBtjmmrbluCNQpyMTpkzZByRvvsxHXYhGNpLgwyqXAMJDmOnNJyKFqjsHQygZxCPTWRf': 0.0001666944490748458, 'VYjGufgHkBzbPcMSLVQbqPHPusAJAipajJKEzvTgQhDKIisndDCpIZQ': 0.0001666944490748458, 'OWnyCMZViWghlpzcCimZvUxqpimEkda': 0.0001666944490748458, 'BIEyvxeqLebdYnYwYjKGvaeWkMpUpbnPvbsaBFxEHpQ': 0.0001666944490748458, 'LQUjpydzbpABtImPvBniWGRjZcwOGOcVjTGUJIJSOzCasnTBV': 0.0001666944490748458, 'UShJqEaeIPVNxUpgfHQry': 0.0001666944490748458, 'KMpnPKtkbFUPCcIMnCbLBebsWKPbmzZOJCrDXogWdUfjKvxpCnkVTqsYKZFNpdlOlicbdCHxnQlOhKjEndS': 0.0001666944490748458, 'vvXqNtYFuOvcvfTkgiBxvCobMMWVlGojUVukKCiahevZmEtTfVQluXzFWEPqrgnBnfVeOfztgNtOPcZxIHHpDGIugUtXzvnsO': 0.0001666944490748458, 'leSJAiZYdByLyAhenfsMhGdOEWJiXgQrXuIgkexGTWOjRVAVFdLGNGHhGZXUgUmcwLeBJqljjR': 0.0001666944490748458, 'czvLbPJTgtjJIktRdWKEoCKGlBFGnKOYbcnhCdyYfFFVcuhkOvcqWRuDHGpPdVbinnzFKdtTPobfqWbKXTFphZyZz': 0.0001666944490748458, 'CtZLXhWzahEfPcLKJpRzrl': 0.0001666944490748458, 'iYMsOmaTjeeAtpFiJxYnWTrVXcfdfWQOHBPKQtkGiGiqymWeDhCfdsljPUVMXVBWwOXhDRmjFtHqECqGBlwDrqhlGUa': 0.0001666944490748458, 'KMZzWwGbZHLmcGwOiNLZMQqYsaeuesnKpdZJEWXWNi': 0.0001666944490748458, 'BMGWuPZVGXuRbABZfupeDjRbOJWfxPQltsJSLoDWiHnfuYUSBDKyVOuaDbLzSMlsbFlHODTquaRyGaaAndseLIFuPGDJzIYxH': 0.0001666944490748458, 'bfIldeoDCyYsubWGPsTbKBTOVYqANAdYt': 0.0001666944490748458, 'kMglsxptCNSePgvREilzedSkmMVzSUWBLRmsUniUnIwxVngIIlTXjqQLHpGvjzfoRAudWxEauZXnfAyuqQJagA': 0.0001666944490748458, 'P': 0.0001666944490748458, 'NvqbdwXqRBLUgYBFZPAFxwEeUMVZtgcPxxvYZykdmlVDTNMkdmXCtKvZQkCIXpDkLwJNDcYcFQcUmMGXBoEsaqaIHIpJtD': 0.0001666944490748458, 'A': 0.0001666944490748458, 'qxWTPGwovHafqSWOirESOdfvtuQVbERRbBrQCbkSrhTkGhZCGrlxrKmOdcSZIJJjOagcj': 0.0001666944490748458, 'IsdbUTYQuIHUWiKkOEIFqWIGrosOlxPTNFuVrRUWpaODdGzJwJlTZ': 0.0001666944490748458, 'lpAqyyGwhoUQmPjvvPkXOYTEONIWCOVkllbXdByZdDPIPLhmAnnKoYTJVoLjfvCnrTRtYDMkwRkXGUOYjoiUakFJiyVqYKjEnDDCkTbuvSJZa': 0.0001666944490748458, 'cadhZBprmgTZpzyiCeBrPhXcmosDFdszthIYmOrrzuNaRBcMZkYNRnHpIQXZDfCNXkYlWnjxRCszBneLFBoZFvFEQEiGGsWwNdRZjYIeofmrDzqNGj': 0.0001666944490748458, 'KaMXoEJDBpHJLXeKXLPsPziFMylXu': 0.0001666944490748458, 'cNrMcwvqYMvkFYUugyyOtyIfYRgddjUySScManOmzEIylSafVofADYPPwQZAnkJoHZkcArdqMdgFlcLAkIPrdFRGXUFLCELEfEWweI': 0.0001666944490748458, 'wrqGYOwujQhpCoEBRzNslQGMcvKSSnKRerbOraPAqqvVgAElgrbmrfDvAAHaMAYSoUHJdFCoWvffDudkyHZcE': 0.0001666944490748458, 'OnICHkSyjkrQEDLrXlIXpInNbTklHyuvryjMrskfxA': 0.0001666944490748458, 'nBzKlATGPTPwRGEBtAqSlguebWwDB': 0.0001666944490748458, 'HRmXhdynNFOhHIZfDCpxHibskvUIoRiMjrGcPzNazjUfviYpwAxEMuJRMUWyIEmVmM': 0.0001666944490748458, 'HavWVZRQiphHbfGdYaSmCdGzTbhqDilYzCwzlohtoGcVlciGcZauAcNvnzU': 0.0001666944490748458, 'pXJCfXbjRPFnGUTwOKtXnqpvwuvrzgwGoeyltSKbufEprnOikwclhIxjvlveFULSBiLVNmKysotqoDmWOiCgYaLqgZ': 0.0001666944490748458, 'uhlTxrAaLVroCH': 0.0001666944490748458, 'YFpCqFqcLrsmFWQTjArjPrqQsaAijiPBnUIVNugtaSzyTvoQaVDlZBhjIAuyWkhlptnSqQdaBhFlyCtunyMuntKOXmMvEkTrx': 0.0001666944490748458, 'ZrGoCyxSKHlWjrVqFhvRlJfkKshApQsJxrInJkPpUtMNlAaYAeYzYHwoEJBEIkixJjvVxyATVjQYHLaFsKGZQvDugXCZFZV': 0.0001666944490748458, 'gQBeFixeowqxWQvvNlNcxJxRvdlnGLFGOeigGCVMsALaBOLwNpoFZxXDRksAaBxTaiDoHFeWycBagmdCOWE': 0.0001666944490748458, 'rCkiJypEkQl': 0.0001666944490748458, 'KCuWK': 0.0001666944490748458, 'YflCkgWvXIF': 0.0001666944490748458, 'RaCvpSpiVsKyCDOMSQqwRZeQSnybzGgOiNCzrecHCXADUgNrKdxROotSypzDe': 0.0001666944490748458, 'rV': 0.0001666944490748458, 'ryatUslMyaARsWUQHJidMvHZEjiPixSwYEDDGnlgEBAOUzWXTboDWbHBI': 0.0001666944490748458, 'zSzvsjczVnaZeoJGQoxBjyGHNFsfQUJvFhoyAehxPzEYnhdhhsxohBY': 0.0001666944490748458, 'QvGerZYggVmOLrCbxgxzYUQuAejZJgyIahXdrcnDwsjNQQheZODCCWJFxHUOVFbJCZAmbChbOOTtVCrsunsyOfHkXoSrfMhsMZhPFKlVkBVqIPNAuXMxxkL': 0.0001666944490748458, 'hTdtlpsuPmrxJUNvVlbvNgdCVhlJrXRzLgCLXlyTy': 0.0001666944490748458, 'tiJUrfchofuZuHZJaMvMRUhOKQMvNEXwrNAoOQKANjUQVkbizBinDlsnhJYmKxnmClYYoVPfmoSLnJCUWOFGYRisQV': 0.0001666944490748458, 'EKtoraWbyNViILCUzUhKzSCifxjQnjeSTyrkmGutOIiXbgVAJEpoNfqdKgNPGF': 0.0001666944490748458, 'IAYNvPxZTwLVqDHTRDqvtCmeVyGhgpXPCjyPLEBnevcBNWefZygtoqbDdHeNxtCdZGKehWMjMrRDOaVVCufisCqeOEJVWIrsQpHEMPfXaMUAmxbZGwX': 0.0001666944490748458, 'NHJGoZNZBlMQehZdTHoiZNXYCgizjkdwnJosJWpKRiapgFQAcyRQKVGvYinqAIdVUIwAHEKvAoRCaH': 0.0001666944490748458, 'vFvtxiPXBfZuEsvfmECUVVwQijPvaxFEgUqdBdXIJeGuTHnqEPxlwoScMEHcRUwJNqtWqHeAAWbQpKCzGOyKmLljMsNQaBdiouNtLvuedbBDexLDPZqcKHp': 0.0001666944490748458, 'UgPmuGnduavtauyNBgnsrxGWDfVblpXnCcoEj': 0.0001666944490748458, 'ZXSTSYhoJPnoKaHmIwIcouWTSNXmSgRuNlNOiUMkYrLkoCGaQCXnEBhoiLoNOxjHgAnnfBVipBNrbemacp': 0.0001666944490748458, 'xXHsVvTCfgKuyDHytQoThrSRhtKNjMohnOamxnBfZGcjLnGIxV': 0.0001666944490748458, 'CHWjgQigAVgAxNLTlS': 0.0001666944490748458, 'MEZpHWNhcbyMWmGtKoiGknBBASPmZRfmAIguPNRVVAvKuebDKXOHVTbNJwiSMOJUgnBVblaYjKTUcqOKNmtfioshhzsmSmtkPZmMoB': 0.0001666944490748458, 'xYzZiQukdGxMhImPAoHTLabLuMxUXYjSucEBgmHwyriuNiErWoETCGvUlSOqTPtTsiqesRBkYsvuOBAKOgmwVLVZQpHVkMPlNYhqgOzBokUFTAoOWCFJdtc': 0.0001666944490748458, 'lFlGvIxYqpAcpHRywLbRUKwchEOSPbrKffMxWezYTmmzIdNmbmDjToMZp': 0.0001666944490748458, 'CifUGPEDlcHJaLLCRDmDTNDPKnmPafqGFeKlchYtApesSqRdAlyUlTktejjSruQdsqiiXrzgoRjhsd': 0.0001666944490748458, 'afXwAHqtYcWimKPEMLNnsMOyOhrOOBJMcKOEXbFtkeiPPtRmVEtFrzdIjrgPtLSdWhYnVdwMPTNpwzwLszJhXFjUwxPLHPiUKLhOc': 0.0001666944490748458, 'AfgImDVSGlZDMtxVcjwRSPRuwFJpgbkVlEKkywjGaHMtTROpcMDpeIfoqCQaGzbFJKVSpYUrkKGJjHTWkpLWYtbWFRWhOGpkpoDudLEadhdXd': 0.0001666944490748458, 'UTzzzDNYdhRwkaduflseRdckURmTYdVskJYlZniiwhUsrLqFPLNPZJXDjJVvrnAFGODJiTnhQJMhvwzOaWDWRSPjCZdEyXqhB': 0.0001666944490748458, 'baELAChdDwnzTCuXrTnQEo': 0.0001666944490748458, 'MIZUECJMf': 0.0001666944490748458, 'jgxMvqurHcQpebBCCJacXqtIpbDPvTsLONdNNfkElE': 0.0001666944490748458, 'AScCTlgQDTJQOAKZKoNMKcHfDPHFNGdkNXcRPbiWh': 0.0001666944490748458, 'tVoKYJWVsgFbTRzzIYctJRatgvUTnQyMMJEVMYKTLfPSyPajACplBEhhxzmXbPxZxCngjySfZGDyzyNXfOIAmPvhzGiIDTXFmYPoZn': 0.0001666944490748458, 'ViYONiiItbGUlqCmhXTwPWrWYLXIZuQOWtowOxlwygDYRPsPqEeegznRfwccUzCsYfKpzPtxhlFMnSxCpagdkfjFXwnUoAmOMlYpyOlkuchKTkhxBUAvK': 0.0001666944490748458, 'NPyhDXiTRpUgYhYiRFGxmYxcMETCahiHIMTBwBopzNDxfItdItkTMTMnV': 0.0001666944490748458, 'MKKJSMYtGjyYZUrWTyhHhQVQgryOesxlanWRVgEXXIiiSKbXArUcNvfkCgJIVvVlsmWZsQxoudyGdrcdTZtXoDiHGsuPIl': 0.0001666944490748458, 'GWsGrkcicgutOQBMVJIzBtxTUmEYlxlrFdqAXnYWvZ': 0.0001666944490748458, 'BaaDwKvvWvIQfIFtpsijKRzsuIWIjPDBYRpoRCaDTtiAIGWrTyWxyJeNNDLudnbGhoydhMYllSywSgZOcjponPedEDpvKFtssmHDN': 0.0001666944490748458, 'zFtEkvqzLuvbdQtAXLKkXLmXiDXWsjCSdhaJCHWyXHjGzVjALAGjcoTIMaJtWBAnxxzOPyqmEHmhPmCLRocLNblyJqjLiMKUiVTKgjCOkXCPPE': 0.0001666944490748458, 'hzWrPKPhymmpNyRLapymCztcjKOdiJGpgfafIEWiHujofZkAEpOLXsnmiMkyajiclNRmenupwvAzBNJRbRDzSNeJXeZeBNoua': 0.0001666944490748458, 'PUGFdJexOAcrqDdVMMpvTHoSGdzInprQpOJIxmVxFUaEtTouITUTgZFroIMNFvcnTCuynOGYbGvMxBXmgcmix': 0.0001666944490748458, 'DIjHAfFluyRKxVTsELPwDduazoFRzSBxlEZnvnvWLuYFKLrqxBySbZZjKBXjzlKkJjlinomBBqIminDQkTOzEkiPscA': 0.0001666944490748458, 'hdmOeapFporyJBtoQSbePTsKmiYvgTEQkE': 0.0001666944490748458, 'tJEQRnWsJPEQVXKGIicDStTBDgSuPegcLGrsCdBjbDXmJohsPTs': 0.0001666944490748458, 'QkmBLMDRATDuaANOppcDTXyUmZrCBtkjmsIPvBLBOVnxx': 0.0001666944490748458, 'ZNhfyHOXwGbNWQCsUClnzFUIakawwgXVrpgIpvgzcNewLmHRTqPcKWOXzUpYJNtrFkxCDVIcmmIfzKbgJANvDNAIxXv': 0.0001666944490748458, 'SZDGLOfpHubuoVU': 0.0001666944490748458, 'VWAbdoDkLHNeULFoRlojYJhOtdZCmbJBLJENKbmEiETNBLEInqRlZZgRLXCUieuqAoTPLgifmSpKnHXERUORlkLewqZNEQDehGZSnwReATdVCSsyJmM': 0.0001666944490748458, 'wulkgJdqTQYtbqCdwBcvEOeLmsryukm': 0.0001666944490748458, 'Dbt': 0.0001666944490748458, 'ulczFTJLxaQDxCsNQFufiCJNtiBuHbGZclIpZlYetdC': 0.0001666944490748458, 'XBARlmxEZMUEvoFtSSUGMIYAGijtBFxPVYXcldlgHEfrR': 0.0001666944490748458, 'KhXMhhtiSAsTnujstqxcrLONFdQpjpLSkGVXXqvRKaBjqktjSigeJafjSHRQVuSJIgz': 0.0001666944490748458, 'gXXIPweNXffzXcjRRYqMQhpuICLMzBtivqZGsjcVqxcpfnfEwnrcIXSlKSuqnfWclOtasZdJXovrkfnLRraBhmEASNZvHWYxWTJtjS': 0.0001666944490748458, 'XKQCTauZvIBbdXLXNwnleKLvPcVpUewpkYcrErljbtxbnigBWoNzrszHDzCghdOBDlpliDrYufeQdlZzAXzpugt': 0.0001666944490748458, 'HiJfPVyEOrHmQcDZnpkGztfKoUZchYRNcCpMGMIDwfeNUefIOQmCYptJDKYVyKS': 0.0001666944490748458, 'ENCLCBwCfAkEQUEBDfluSZCwyTumRbavyvzBIyTmsSVhxvUjABKURedsKNWLHAmbqkQvhKGyICEYpOxZrRxYnGuTVJjMAongeTTgVdKRjdmriNt': 0.0001666944490748458, 'sIyDCwbsuMnSmimhojmBlMIYkZJJFFxOjdUhHvRdHaeKETKAGXxspFKuHkzSTtdQifmSYCOUZfidRnAictl': 0.0001666944490748458, 'kOgQtXSSNJRThdlxEupVfOyUQOByJyyUavz': 0.0001666944490748458, 'gPWmaxBIuLflQDDaLNbxDUhDS': 0.0001666944490748458, 'vgVAFYUBzlLUWmrUSqdGlLHWsFgNsRNzGlzCmykTEEICbdBtYBAcQWINXJxFuFWLs': 0.0001666944490748458, 'HTWhsAFFzMfvMOZpzNg': 0.0001666944490748458, 'SmBfqBFChQEIoogTVVCBCwceMuwANUpJilKBKiRZhTdRuxataqbDuAcAgbBradYxYElznZSFXqkuuUDlqXiWXhKfIgkhtPjpVgAAfDcZzLvGfPfhfux': 0.0001666944490748458, 'vuXqWcitNzKEiiXciEtlpWutaniojaCoeCFPGKGCaZytBmZMnqHapsWkGGMUhGEZHqoPKjIYCiNShUNLUdpfCNCJnlNaeegQhGoByXNTdumovNzTuDJNHB': 0.0001666944490748458, 'XIYkzQUfswVkBINOaveDUNzNkxxWRPLYzhRSHAaqXRGqiSrsdtGxAGHyRCF': 0.0001666944490748458, 'yMcailY': 0.0001666944490748458, 'PjaRLnWHQIVicGYpYkYUTKBYTLKTmXfSPtgQvHioOtQHmGuLUUJrgOHCGKwLaqCTd': 0.0001666944490748458, 'ehPPhxeTldNLArUvoUEMPTSkwDBmwwQUbNpipkjTEKZcjpkHCcBjkEWxGwrOxbwXxdheXJOGnZUQZjhEnABjyJMvMOrFLwsTzPlBCMmEzy': 0.0001666944490748458, 'fPrqswMfAzYGiGvHGQzzAjNKAIBlSvefdbIazUgxsxfRaqbEJFlNGQChsNjCLaixhJwccOlaXYkCqNFHUECEZ': 0.0001666944490748458, 'zcKAnMYLSEqypBsSSNubfHXQTmBXfQMIdSraNhKwdSZemSTMlgVWIqbXWnWGjUpYPSWIHlWsfpBBuzaIdegSayupsxeRzaugznwhxxiBrSSjugaVVNv': 0.0001666944490748458, 'YkLzAFEPcObbeDyWMRiUeCtTKQUTWaoBLDWucTbTv': 0.0001666944490748458, 'oQBgcdaCAKMprTHjNoQmYFZbpVxsLDAXgTJevzRIcUXHKlmLuuRPYBryIedzuxBBdplRKWhrMobJyfrqNCSjiuuxSsU': 0.0001666944490748458, 'XXOGqzwreGGGrWSScjdzKaTqChrQXlwJuoNedfkCZRRnnXPoYgpFOTwoIHDrZZOosDjjHckvewyvUVhRQJQBaxxFmXRPLqfwzPAnRWVydJ': 0.0001666944490748458, 'ShYxSLGDSxRYMBwsYTJOQNNZmRGhePjejqEOxBkBGrThGkExyAAVyjJooRVpabVSpxHpiygXB': 0.0001666944490748458, 'eWTkMudjzbNjkAhfZSnEkhWpDuzLpJQNjRwjUPnknavEDGEcEZOvCryLcnSJbogiUQWDMFTHJmAXSBgKfmkKM': 0.0001666944490748458, 'mhzRQSFLXIskGBmwQdsvtDdUrQ': 0.0001666944490748458, 'dOlJxIgDLwXNmvAWGSLKpmPwezMXSLcNwDm': 0.0001666944490748458, 'qmlSJHEBawwzeQbDmbJRWFFfcYPHiXirlgWCzmiziSmiBfKqHLpHqmCerVfZtgRVjkXEOcduHlp': 0.0001666944490748458, 'hYNXLbMxgvTWQaJtuaVeAZdyPZyQqPGYnhyjmJe': 0.0001666944490748458, 'cWrQswNDOdeTyVbcOnaemWHgqAGIbYbjRQYTmOruDkpCbQqOaUnZRbObnlRFLkYABPHOJpm': 0.0001666944490748458, 'OjcCtCPysQVYpliLsasBbAeFEOSCjGWZLmcUgfIfW': 0.0001666944490748458, 'KpUKBMFyCwDwCMJQBHSXHfGBbAuGfsZBZMkFIRurLRYmJaAkBCyfKgVrckwjmcJrvjDQgRrWDYjynZPpJuAjrlcnUYZVFlrPQILSQyavqgn': 0.0001666944490748458, 'wYnWbKHZjZbVzaDKMZvAjNCJbTDQdwOsyntApaenPgomDRKePOvwmlFaaexcbFuPU': 0.0001666944490748458, 'SLUsbELLlnmCKScMg': 0.0001666944490748458, 'pUUcmjxZpnrVSfhbSlO': 0.0001666944490748458, 'WJVPIXcdYZiCcVpzgJjCrXWKyFmjQBXrYrEUvAMBGBxFqKRzzwSwOTzAhoyfQSLIxzPENwNRkNHCQRCsmABqtONztgPzbcpeFu': 0.0001666944490748458, 'niveeiUzbtltukwHppmRNrIchoyPeUEMhrsOjXXLVqtmnsifANdrEPwULsoFrraLFqtqwzhfVHuVVJGGgmjSZcPpanphqvDeuZOhDXSqm': 0.0001666944490748458, 'qDtCAzdLmtqjXOuCPcSzBrnKoJWLiktMayuAHepVxMYqDYAUWmTccCfqXhryTHbAPGpofqYDOPXeoJvhCrkLIVsqZPIOgEBZHIvszqmZwkXoPBwBVrZiO': 0.0001666944490748458, 'tQBoYiXHqcTGuCHqicpiEDaROPjaLuEwLdmlObwdCtBynSDsHWQpAeEZTaxZK': 0.0001666944490748458, 'NPVTzYIdlDcVHVZorzlDvhBFgYwZGxoLLuZMKobmoyConTZaONcrKCjmkmRIjfeBgtQjLdyhASMHOsvDbnqSvAsmxCgkjHBzHqyejEpKDhZfuHy': 0.0001666944490748458, 'YWiETpUAzGNhKNhNciFuR': 0.0001666944490748458, 'QExLUKqHLFkreyLPzYvdkZhkeDpLnKHHCfXloKKSdVUdoSghosOyhwDsBrYCMfxgcuQLcVzYWabOirnViaSbTSjqAQaQpjlZhfE': 0.0001666944490748458, 'QikwoCkFrFKTuSsndcaQuGIOYGWLlxQPUreWxIqUJPfEYbBTyLHVrGPARiXJGBl': 0.0001666944490748458, 'xYrlWGzAszVWLN': 0.0001666944490748458, 'ZiUWKvcnqtmCCPsLaOScRKFOtETcmgGFcDdUuDYmuCkEqHNijMdtRCQPppE': 0.0001666944490748458, 'mJuXpBjqdhAiYStDHOEYvgTOMvuJEMjRZRkEYIYtclrJjDBrHQPyCfeDMkbAsjOsQJLgHVNdk': 0.0001666944490748458, 'wPBlRCQICIetYjakZgKcHywdVABIlAhYgYUOWlssOvrIszdEiJLCxyPNXLcbcgNkHNYvAiPGEwvzjxa': 0.0001666944490748458, 'VEsdXTSTbQlMuxPyhEoNjbVDzMwLknRIfYZsTxufjFigdp': 0.0001666944490748458, 'eRyBxPHddkSyjfaTUFidF': 0.0001666944490748458, 'EhMaFGZyuZthGQTXvCCEmxbqWNrpFVbpAoXkquKkIkGKhNzCIlCcnAjKXHdRPrCKRgBJichxzuBWWAxKigzIhnRGOyBJrlGXXgQNVgBgzvZsCrctQepgDp': 0.0001666944490748458, 'AtbfpxdWYmZpIZMEWWOyfmqlSuBsEJVoCmNIGllkjXLvElFNXscHgfNsWifYtlaLDUVUvajkBmVzUUWiOCHjAHYGSeDkCeQamSxEUebKqSuhhzUObQh': 0.0001666944490748458, 'NmFyFPAXkwKAxqQKxlsPWlfAmVfacHZAsa': 0.0001666944490748458, 'czalFYEEPupZcwCPoUwwyGhOcLcdRfyHAGYlezmfofwDbGmdtMewTYQfnVosGCnqDkEonqhhtYBwNXimmujjIi': 0.0001666944490748458, 'tqrcUATOgjVVdZn': 0.0001666944490748458, 'xaqQNHAfCbMgFhoUSxvlURmWwLYLTBJvoYfOwOnbbAogwCIihzwQgbkxcpVyQktqiiaMNINnWOYrhcz': 0.0001666944490748458, 'neJUnKJRNyzcMGOLROAygWjmbKaHWlUqFGmXFxuzdGRgbvOnsVeICzUFsVuNHdhJhQmirgrLzDZgyNNlgoJVPXrqsluAGzxJA': 0.0001666944490748458, 'RqhaHOWyRMdkruWJJrNqeEmYdcDTDmXbG': 0.0001666944490748458, 'EdFNDEZXFMczPd': 0.0001666944490748458, 'qTADgZgMIcEkjtXCpTqDILcTeOJIbDxpJFiohgXgnLuxpjpLLIzuDwjrhVaGCpUNbcbYRFLwjODvwZAYqHhJUHLhMvxnMpjhiO': 0.0001666944490748458, 'm': 0.0001666944490748458, 'VFNuWVMIlhrFhBsjiCPiuzCYfnToyCVBMgbtJSYGBxXEcuBuAMUFqTnriQVzIsvrZVEoQffznjrLSaBQNzeuzfSanocdW': 0.0001666944490748458, 'NgOzpjJQrDddrowBVgkQlikWVojAkzsORCt': 0.0001666944490748458, 'WvNCbnu': 0.0001666944490748458, 'gNLIjCbtNQgQvoYmjJwiRicSWSynZnYgWitixJmSLoqtDpIvMjdauXxZiCIIsNZcoGzPSDRWtYdBfSSxLCxxCAzmOIKuVxLBlErBUZQpnSFjwr': 0.0001666944490748458, 'nTWYPzj': 0.0001666944490748458, 'yZMlXXgVCROnlIYiTZrhUKjHKPIvhlgucMkHcDJPYuRmcUxnqthSBrYAnXnJhKpHdigzXZgYWZRMfFHwStcPaRWQKYwUOsWYNYXchdeAIsZLxeBFr': 0.0001666944490748458, 'lspPhoDGRubEahlibvMxWaUhiUepWOFsssTTcmOTPhcXtdgHvncCRpFBbbIDPyxXXApYFCx': 0.0001666944490748458, 'ClrDuLnxMudktuwFVQmLlQPPjgqFOHMNoLQlg': 0.0001666944490748458, 'CrloDzTzAylaiLZvbJnFg': 0.0001666944490748458, 'uKbcUtUHvRHUZrcImYyFWRpksgUmDxfqWTFGXyRzJLMXkqNrNQydmjPVnNXVhcZfQb': 0.0001666944490748458, 'PlrHKlQjIOfhYCdJXhOqVocXUNDmwYtPFiqOaZgrobQbveTlNoPSteBlyTQzQFWnFBhnKaOmBovNBuPoji': 0.0001666944490748458, 'EREcNtoIjzusqaCFUQZQBKPljsCAoc': 0.0001666944490748458, 'DTKIcOlibpKPESTWaRSwemLMTcSxiEqsWXzvcEZBPKXkDgnSmnzNkFeuNbDCHnqwQuwoEPWrcMOrKhjCfWDDMIKyWVMMvCIdUgDbKLpReluhD': 0.0001666944490748458, 'KugzzBJUwDVtzmVHxENxbtbFmeZhESWJzSfgMpzPlOcRoDeOECMfhNcYHssmnhd': 0.0001666944490748458, 'AESSIlQFQHiMgjcDvEGaXZUXiynApZfihDQGNgTMMpJovfMaOpIvbPjgTavYxMsJGpKAGVN': 0.0001666944490748458, 'mkSUYbRgfmnkCQNXsHmzROZEMdNKorUInLMYbygNeGzNJrGTHgzsMVCekhdKKlxkpZV': 0.0001666944490748458, 'pBJfcUwCjZitVyNYQKJnjtaygAvnpopvWOOxfZBJIePPhabuEdNvuMZmwPq': 0.0001666944490748458, 'PpCbyIMnalTFyKWVEwqjEEjZCjfXpbAJoextqxBYMVEURAqNcmpVIdVRsA': 0.0001666944490748458, 'XlHTrqQTRzffPMqTcWQNxVOLcBagfwEkCYixoJrKajSdkbGjtTIQrAcNhRolEginuCJZlVmqAiCmvANqMyQGKhHdF': 0.0001666944490748458, 'ckzphdnVhnssvjcdtnGxuZvFGRKObYcCShxhwuGKdOUFvOu': 0.0001666944490748458, 'AadnMOhYkTnvgIpvXYJDoCQwaFwEsHjfDqZcbe': 0.0001666944490748458, 'QIOGDGIxJztsYKbSVKguPzYkAvyUaXSayydJJSuqsLZtIlkTzRjlhHyuTNlqRqclMYfLcUHiLeiGUZmBVAzgvLhgmCdJqKfwOOxLQpixcdVytvOESvF': 0.0001666944490748458, 'YDnvNoEzhFhLYkvsEhjVAO': 0.0001666944490748458, 'IIljyesxmVmCDgitaxYKQfJhfqhVNEKYmKvDfWDmFvISSgOSyQeyxoPSCxGZatLpMuTivqdCjBZFJHkpJCqORYqGIeGVNjzNNCa': 0.0001666944490748458, 'GJSVysAVvuLVLSq': 0.0001666944490748458, 'DUvvAgAQOKDDuIqpAPOjkJTqhryXOERfmefldRMYVOuNdcUusheUjurSODEZDZlvCOvHIyxmVmdQtcXUMoleqbKusKhgrrPMbtoUXAd': 0.0001666944490748458, 'dbiCVKynKfyTxAyAjrVIAVvRAAQoQjmzpkZVvYwveCCskazLLcloQQ': 0.0001666944490748458, 'ahsIHadjeUwJkbBmJtLVCfKPNoDwPxYFXSSawgxLgDmgIiFYZWbpVEjyAbZFUuvdHibaTtguGcyYctfFwEDAvpqNzdCZAGfSjwnMqEoEnDOrINziG': 0.0001666944490748458, 'JuyGBw': 0.0001666944490748458, 'JsZXlKaXOALunSfALXhPKuXEWatpCbOGvQRQctzlzbcbwZXCudXywTdclAPtgSQXy': 0.0001666944490748458, 'Qi': 0.0001666944490748458, 'IwSkWEiNeSeoLwVVsdrqSxhEPrKXKLDKZidELjUedSJltdCOlUVSKYjfDEQzyEYUmVPQbVBukVUpR': 0.0001666944490748458, 'FoeZtXlNyxRaDLKZEZenyobGtHpeZApew': 0.0001666944490748458, 'dgIKswvaQiGbgIGtoqvrxujNFXCAttMhHXknCEPQxntxqHHGuNsBnqzpPnoiaGzaGRLaZVNiAQ': 0.0001666944490748458, 'cTJZvcpAnmmOShNlhywhnqXcpleEjgxWycuBqctUzFqjmnLOMmToMTdDIlnHVNtIPyyiMjpMsDxZIznZYKgYwwvGLUzSkRqRSZFYAO': 0.0001666944490748458, 'yMtRBeilZaziZkhPevFIVccNcFLfaHlWTfbsnAYCoeQpZOZsHpdjqsigowtfIAYWtccZiTvjsCHzVukuXwwqiuxgTOaLDULrljI': 0.0001666944490748458, 'aFYVKx': 0.0001666944490748458, 'CkIrpCuwdCtBPBFwlHYeiTRoXnflhQlPvulGguJUHXUljKwKyhiRVEbwkrXUdSHoRENWtCmOSfb': 0.0001666944490748458, 'nNHhljNqfWkoxFMqIhzjMwACGNTgnkDBPoHIBTVKyprJxyjXysBainCXPtSVSsxnKiY': 0.0001666944490748458, 'zDGCGliZPISmYNkAaApOMSeiWCDKrXtpJZMBZqlIHtlJokyJQfeJBIONEZhBGcjqidaMNcezmpeNEshlgLdTWnDJaHRNenoRgUNjQxppr': 0.0001666944490748458, 'hnFXHEWvnTBFsVHGK': 0.0001666944490748458, 'WxIRAgoPsfuoBDJUzfXDUAWuSHHUjLDXlTEAeMSgrmySqgfQJbOGGCKiVlYqussRUHbdiploECvUbgBIWawcuUMeDNEYxvSdTkPSJCrSYKwECENewTnYXE': 0.0001666944490748458, 'wLxWyamqJdSCTodyNgdLUIzGhNtIxdKDmlXtkPNPqgaCJCHPCVdGZkFSkO': 0.0001666944490748458, 'XBJISiqIuUEGuEPoLgRfDFiyyRdedBtWMgWEsVRJiwSmkHyydcmNrCvkWyzubctQHpXlI': 0.0001666944490748458, 'geDYghWUiNnPcdkohVlGCzYYcBLxooxAvFmvAhhwqAooFirljQUFzOs': 0.0001666944490748458, 'WReinEwYeuNxkpPzdZpkhsXSDjvYTjwHAQxJHwZMEjaaYPSpdFkPppzlRNHaVYnBluWsPdgSsOtEJNxsAsCbDqLqqnTzzEZiUiWzEXpKJoDsDGxYuBUSWxC': 0.0001666944490748458, 'OZXgxlpXeVDgnRiXWPEhSSdRNtuSbCOEgxIExNTNjJdMjUSmmkpHhVoBpVCQsxKor': 0.0001666944490748458, 'ihrtVEPSRzrWPCfZXAKIpfOSYHrQixOGSTjroyKcCSUeTEENEGMkjHNGXRRmQjEApNunluefWSlcHBKAhAVLPfdvWrmOJknYmqmFThpugcWWmUhinpblII': 0.0001666944490748458, 'ScnpkexGIRCtZzkrLEbgcdvVRbbqKeCucpArcPdMbVNNbxPNFpDkLepXMOZUAWkkzHnWdJahOBDZehZphUAkYhwDLroiLUcWLMLhwiwxtmMAHCuJAQ': 0.0001666944490748458, 'nonrzTVVhunZnTNSJLFbddqwjuLIykDUhsKjMBUhzRJLOnDkaVWNmLhMVTcSgpL': 0.0001666944490748458, 'slvDvYGvgMlpFCWCuxtGmGMPKpAQRQHaDsQxCZdgRwXVujvvjZKMACUWyV': 0.0001666944490748458, 'pijmzgPifCmlslDRpZcvcdaAkEEIKMeAcYHbgnvXsgsuKmwHyhIUSlWdByVvNmTJhcslEEcQnCyhttFDZACnu': 0.0001666944490748458, 'yUGWvJRtVWcUufshURnjUSSPCFGEBeeOhuhiolKPNegZy': 0.0001666944490748458, 'AKGHKTQIuQoaErdzGgrzKZZGzCfbJlAgtOKOERWBuVgiTQLStyebjcSnT': 0.0001666944490748458, 'HyONK': 0.0001666944490748458, 'tjIAokPSuXwQdtwOPEtFmhnipOdFHhsWGrMCwLdxBiSyhGklWOMmdFGWMLHTkAoFUqSWuFtDTGalrmfaTfJUrkvamk': 0.0001666944490748458, 'ZCBzRIJzLGUxFgUPdxOCaFKzQuIhxouVCGmqnHaNGqRemNcqkRfXprNzSOlbgfsDMLUJtuYfkMUrCeETnQEhmPgjyWuKeAxuZqNktaAfTWn': 0.0001666944490748458, 'AoxRzOzyu': 0.0001666944490748458, 'RPfDsdKDPkyXtIapH': 0.0001666944490748458, 'LjSdesYIXjuSTxYSpHpCeYUkjhkNVjedpFJZBL': 0.0001666944490748458, 'pfvVnCLuEtMsRMbnpVVMy': 0.0001666944490748458, 'dusmvffqKrbBKvKYLkeRfPvWDOswstjnUZTzfDdhYZKmRqSpekcZGqipZaLNtTVccqBloarpkeiknsQlSG': 0.0001666944490748458, 'scRudyehUPSxtUIrBiKeqjjyecedcXvxvidCbbZEAWe': 0.0001666944490748458, 'JugNtCiYjK': 0.0001666944490748458, 'FgZBDePiUngbjYUiqMJUmtXQGuNRktQtqpUdUhcjKKEQzpnAUgyybLoZaOLWTnUuvJGNQXsLdNXvxTEnXfJsVskuPVKgMWIcINOAeQ': 0.0001666944490748458, 'uwqpcIwewsiAUfqDpVhRuoYOcZKrkmCsZZyavOcYIMpWgfeUaAZxDYXcTnpfzRXxZDdwOFadRnlxCsYFs': 0.0001666944490748458, 'UuSrSgitkDSnJLnUHRiMNDphgRaWjioCAZiKBhgXvwdKuNJrGojNuAosxyQFYedlBbxuXXoZp': 0.0001666944490748458, 'VLXFEFMlnuVdeBVxAbxHZIvXyZWfwqoCiuFktFGPMCfSieXiXtmYIFqaSWCNyixBFIyjaKWCnjqYDLMYYjaGkZPXcOMrIUHui': 0.0001666944490748458, 'vXaqovhJxcYXiXJpQnPxOXjCpuQIrkuqZrVOZRogOnAtWKMAYTqldJykXIGVjBpqohbjpYGqNvFUC': 0.0001666944490748458, 'SNVdWodTQZCHKkBhPiTKIsiZus': 0.0001666944490748458, 'xDXcq': 0.0001666944490748458, 'jti': 0.0001666944490748458, 'CojtkKNhljDxQZmEZvGKTSiGFlUscneJjUcpvjiUibFSpwTsjaVnjnQMThgCZEeijPmVqtEeVdpBnbVXPsMJDX': 0.0001666944490748458, 'ngUMzaBRjTNCZD': 0.0001666944490748458, 'kSratsLZhGTFOSTPsdxxKsvoEnnrsWNWZhfKXEzeNTPRuQEuLKjcyhIyJqezpxVMnCXGtQjwRGMzKgjsHqBnTyVmndlXjHwvyKWGrWtaIw': 0.0001666944490748458, 'MAMdvGdUdzMRL': 0.0001666944490748458, 'KSVbswNZwIjEhWgQwVpSvKuLfkzfWzRRmBkMItPsZXZNtacTYMjlauSfthxrGniaFgigRGmeZb': 0.0001666944490748458, 'RbUvYSdrOi': 0.0001666944490748458, 'GCzfnwwBcBpdFDeTBghIZ': 0.0001666944490748458, 'LVtZHsTlPNQzZXXtmZfcPjHVxwqnuJXlfqSVksFczPuOFADUDeQLssPCBiX': 0.0001666944490748458, 'OkwYcmTLsNvMKPmiBVrHTRDpmIabgMDuhnZKZGLyYxLWIpGuGcIAhmBuGWNviMPMAXnisfo': 0.0001666944490748458, 'xLmlRbyhKwIqxmWLAmRFBdk': 0.0001666944490748458, 'zwXXqyxemfxQPdhxxXcKQlRvsNcEVJYkeVjjaNHicARdAlHKueGiZtTxUMNobmbeKtHECZWMDLCxjFWwJrMVrMMZcBYWcWbOFAvXXhBITRoecFjvFKrIgRn': 0.0001666944490748458, 'jPFKHrnMnONFiMECNdDgNfEHejCQifqxFdCmDPCTGGsYoULbjkamOWNYCCdxnRoZeOUHVQgekZASguY': 0.0001666944490748458, 'lIxbPljZsplnicfNqyNRQhxBepdzxBMSNNQdowAVdekpoAuXQDOqIEFEvKoZt': 0.0001666944490748458, 'DGUUBPUqEwtUYDewiwyXdjOWDbjfCcmJk': 0.0001666944490748458, 'hicsZEwHKGlpAXQAAgKemAmmnrPTEJyiLNXeJqYsLckhMlVVfoyixLVmzYEnkMZvgHWoncL': 0.0001666944490748458, 'XYGiwToHxehcBqsWxAwnzivgDcwhElVXi': 0.0001666944490748458, 'pNUuvRQOIPWcbRxrhTdPOTvTADUFllCHETWLqgWxFXQAMrxCrPKXeiMkDCIsdcoGVYzUyIZOCupcMsbRcmiucOdUEaQiRKNHWUpBFpEhscrVCpmjaP': 0.0001666944490748458, 'CCGvQlieARBWpbuCegMRPrqfJYPUhhYnPEIDbNJfZRENoDJTqOPhQuJVuEBrQpLRUKJYOzjWmuozrszZqAkhOJdrwZqjCkwOPL': 0.0001666944490748458, 'ZbSHsYXdJAqQWlvSBavlGsbJeYPmFfQdnW': 0.0001666944490748458, 'HwQwtjoPikEVhCOAdJpMnbyrXQWkScEDktedSqIrOwwItVjaDQbBTUBcBIjOV': 0.0001666944490748458, 'iFzPBZLhoJkZmbzbFgfDfaFKdKqavw': 0.0001666944490748458, 'QefnyTpXhrtrNvBugrAClBWHxBxLzhXmQGVrNVJzjjuLrGBUEZLkzKk': 0.0001666944490748458, 'MWkRmoqhMvFFL': 0.0001666944490748458, 'gUTZtISzHUSafCPTQ': 0.0001666944490748458, 'YSkJsfdOKwDsrzbVNcttCIspvdfAnhnwajMbI': 0.0001666944490748458, 'aEARhoBFkIKvMjJlOEzznIuzLHkHvShTmhNVBftldqRPJeUCYfpamkgVnVtaDKtZpDoPrqiXSAwAUKfsDkofEyrsVedvnMwAcdloqxPQlYDWDqfuWD': 0.0001666944490748458, 'uncmBUYFwZZOYnwGMlPmXnfEPNFHNyyfCrPnXfpJYBtVJjoogmrUZcyGWosKIPxAZKrvVfYqnnVrVJMZKKZdIpdfERYFQb': 0.0001666944490748458, 'heGGAfCuGYKZkAGvqerAqCO': 0.0001666944490748458, 'hvtakeXnUcibbtBHp': 0.0001666944490748458, 'MSdoxyeTooqYVnmGbVQkTXSHCeVGOBTIdLKiYjsQGUdClWYXSjgQyrPmBwmmqgUadDEPUxstrFjOlsrnJIi': 0.0001666944490748458, 'WJVUjInXW': 0.0001666944490748458, 'tKLwiBOmfmydTobFdXnZObNCtGBtfhEvJdkQYvnILfCdYPrFlAaGCdcTuvxxxyVOHiTIlULyPlzBi': 0.0001666944490748458, 'dCuvVrFbZcyCoWieldxoEZUZCKjIdrPpDbOdiYIKEOAiELWfqWevSgmdsWdAibmzrxYekqCSXXCnxhRZsmHVvVYOESC': 0.0001666944490748458, 'WVvCkdJxKQCnOuHDcQKBVmjcRaviGxVaYFrPNVYCMwYlQcFhYjqwsuSmAOlFnlpQzXEidQOpaBYXsArbrnBCKFlFfNllq': 0.0001666944490748458, 'qkKMkxQUoxiEADfbhaDJFBOUaxuLkNmzmPaQId': 0.0001666944490748458, 'tzOoKyCMglLGtbZpZvHVIZHXksOysPJuxHrFfmtTmezocSFHTipYPEoHPXHfdEFCkSytZIDJRoXnpEALjRgAkpATAn': 0.0001666944490748458, 'rwdokEsvMYqzAgIVPohZmVJzezMByNbqvlgaDXUcphgNcihWnHNQzyRHMxHqwrKZfZjHTWlwwBF': 0.0001666944490748458, 'QgXwPsdVrMTtpNFEmTttECibPfebaUcSXlxWZRtyITKMRlIwqOLihgzhEDouarbtsoiNrZKFhFfkSjlTZVfhOqpKiyQdlTlwwnC': 0.0001666944490748458, 'IOmQViNHGWlXFfGRmdMetvfxWZCTDlmepzoBnGfFVYcgOheOEGkSCHrbBezwDxs': 0.0001666944490748458, 'H': 0.0001666944490748458, 'MWMNojFDpedaAkNIgbQCfQuTsUNLpqOxtrEYZRJOZ': 0.0001666944490748458, 'BTONpFhqrKffRBubEBdZjrlJluwTnEstgPDwAJAAFetBUlZuTdcyVHMrssJSVrZjQbNwgrecTxZySzUZoCNUTZOiTTu': 0.0001666944490748458, 'JRdDbRoTtHFgnqjDChlPGBlDZRQezLwkNGPCzVBpoShnQipqlN': 0.0001666944490748458, 'HqlOsMliqRmrPWPIhUURpgImEDOpJUAiKJBWsAwAdqG': 0.0001666944490748458, 'DmkjCBCodUAoykZixYKNSlCGkKSBAaSfoVSVKQpOrqLsDXunS': 0.0001666944490748458, 'urrYbpSpxIHjkBtSWDqZOBaOZXqOKwKEqlBlbOBlOnrEQxvfAcQfsxTRgJibLo': 0.0001666944490748458, 'VbyturcviPXcgMvRMYIWWOgQXyyDJeMNxCJqlvyIwzhibRkPjzXlgBbGAOSystlJejvBTzLjLyrjVagVnvaoxNLJAgqfRNcVGRRaTZAJyCaXKWb': 0.0001666944490748458, 'izbsDgrcugNylEYwvDsamptJiZNJtCSzbHsmUcejPZXnFsrJEDAmPHXgrwDgjHqRNDnPmaFGNTvkfTtnMZW': 0.0001666944490748458, 'yKkPMHEidnnhVYaxgxBMJaPrPTqgSKDOUOawIuzrrJFFMIlhZAMpOBJKLkalrAPsbBmIffP': 0.0001666944490748458, 'XfDRxzfkWS': 0.0001666944490748458, 'jggJyoflVMgdObMQddFhSbWxOwmpytuGBRJYjQpMFAlFmrkTZlaycnmapCFcwsGOTncsMJYKGePjFdCikcvArZIsZBRkzTvYEPtLKyCyQHIOuwD': 0.0001666944490748458, 'WFcFqpHLBvnOmeBKXcyKUZjjAlvBBnTWMQTVnzjPY': 0.0001666944490748458, 'SOFWAPPtfdJQLqttOhcgYOsnzyCVLQrbXGmuQvAMSedhbDufnQHXnYMVXBnpdXraoRgVMuqdlhXHsmCDYqSrDOzvWXNraFwkbEbngPMYupcEs': 0.0001666944490748458, 'fbZogPUzGiMafIYmcGDUaIeMHRoLnZBACagDJBuZGJcUutzYkLfeCrgrkKBTE': 0.0001666944490748458, 'wJDBvQpScUQwpRbHtECQIwIXcgzBduqPIQQSzaHYErQTjfCuJWrQPmNRI': 0.0001666944490748458, 'JWMybkDTEGtpyJpGu': 0.0001666944490748458, 'wWapnoDhtKUnQVLinNTlmZe': 0.0001666944490748458, 'cYOKgUaEGZAsCLKcLerKzPIaPLM': 0.0001666944490748458, 'AnlIqqnkhxUEiomqCeTozypUxzIxlvLcp': 0.0001666944490748458, 'jAwocpsgCXhQYZQNWnERMkROucRGWsThQrIuxWpSFmiiqRDrUelRmWMCUKAtteOztDfTel': 0.0001666944490748458, 'gSprLeEzlgaCrkCZWUbOlzicDTPOntgdropaQJOTgSlcgXWWLUHoKnJXEhFyvypkWexfnqDUbjorPjmOlkpuJBZBFQDYeVHHGxZIqMQanobOMSDzVlW': 0.0001666944490748458, 'MSjXYaeLhSYPeBqCYvisMEAlJmxKxuCSG': 0.0001666944490748458, 'nvVfTfIHJGjzvKNyfgfZSuqFhdLmxryyEHUfCePvXPuDEnVFHINBGjFJkvyZSxWbYpqCPvHnkCYKVbTvSNOGChGXUMEKmXZRKGjBBpikRHe': 0.0001666944490748458, 'GLewZzktcKQeRRWAhxZWqGCQgQLEfadytThTvVSAdbPmZpVpJbhUEjUImUjYTtTDHPLVoCzhcwNGKGvPQtFpGUbWTxHTrHvNkHEifratjDudLIhsitU': 0.0001666944490748458, 'fyuqTnKRaTJAdlaCRUPdbPPkbfAngOhgMjbcJrOpArCLJdUyyoaWHenEyssBNOYeNgwTUhYFUnOmsIQblWUxhmSXmhzOFeYBZwd': 0.0001666944490748458, 'eTSuFCDCkfjPPyiGCadowdpDKHyEEiHkRHDmF': 0.0001666944490748458, 'LeyarcyIoovKpvNCwXotVPcysqrbsCjVCaaHhkPktuxrjwueVyFDeUmYYfgSvmhjpmWqpQMIzfZXZKtcQrCmaDuXvzHSXgvfNfGTWpKEEbkwFpTaugAAjc': 0.0001666944490748458, 'YaCkIyqGQFluQkVbzorYlBSRdu': 0.0001666944490748458, 'hBdZUGxmsjFypOavoBsxTPy': 0.0001666944490748458, 'qtSwDcuuhKqwChMlxxsuluDwUzZDkLiwavFqkvkJvfzzBxaDA': 0.0001666944490748458, 'JbJZvjKZqZqyJjFwOaYeafSiNMTMyIcwNtGdfNzoOzigsDLgLYJRtvUczUpBBtFzjbKHDbPRGqWPwOnbNjOvDsDavDtMJAphTUdUuGz': 0.0001666944490748458, 'qaIZllmUSsfxXhPTjEaPjKGHoIbPUoGMypZVoqgQiBUPBaYEuclUgtlBKOAQQkUvFlLROzKPzcIrKoLAAGoqOBtrnLE': 0.0001666944490748458, 'YSdzRMPJwoYagKZsvAXcocLEValZLLqtSdXRMaORG': 0.0001666944490748458, 'cABLIUUwnRWhFSyvGeKXO': 0.0001666944490748458, 'MMvuLBrdUFwCgWmVsRptScGaTaFpPUUTDibXcxYBzSOjlpzswa': 0.0001666944490748458, 'rbmdNjrjQRESJCxkuugwbfjfAMuEEZGfMIvzvxEqnsfInGFBkwibGrXWHUgJSwXjBbdcYIzLNoDwvdqWZeN': 0.0001666944490748458, 'CNQJFPEcgvVOTuOITMZHEMwnNMzuCQOCviZOXXVOwsiEhtRhLiRfesKNYLGrFmIEssOfJpzbZJICaKQXPXDyvoZSdQl': 0.0001666944490748458, 'NGavebQRqYyDtvAucAiZDI': 0.0001666944490748458, 'rFCdWHndqXTTOYoopNVoJdhyXITdXFSeSzxhMfyWMkBoMFdXqIGgxVzBdKBEOynokcLhzmfDgJeJACnuc': 0.0001666944490748458, 'gHGMnAmSxjeDgttfOXzJwbR': 0.0001666944490748458, 'uJBbmzWprycoBgMKbuzlelmjRQptHQQiMnYQSYsEQHtfLRZfKjIiJUxHJhTiZSkqlNGMDSbOeKmMtgAfombbESK': 0.0001666944490748458, 'XYMvlAUxLUawjkmarvflxZPpasNrZJkVzIjyrizFiPrddbCHNVSyglhCFKQKi': 0.0001666944490748458, 'TQUpEmOtzMicChZ': 0.0001666944490748458, 'wTBBsfEFHjUyiZGFAOtTuKAjDLjXfdJpAp': 0.0001666944490748458, 'KCfGilMDTvKjsZcLRxXjzaoSFSwFXeHNKJgPlUGMsCPrqxDCWpWdCQZqRHUcBqStuBQrfZbHAQhpkDzuoUFhmIvulDuEPYrGwAMqQQWYmfxOIfITuQm': 0.0001666944490748458, 'FUuEippocCAiuawqjxScSohYsxZIrxXkQPvAIxetoscdtvBUPpHIdYtptXibevnovIaiFQKkZSbGrCbjToTcGrYrbWKIayvSfFkzMFExhohUmvh': 0.0001666944490748458, 'NPtlHseAIwXyLfrodQZipqhLTc': 0.0001666944490748458, 'PvxTRHVlEPjFhbSCiKbwzwyDmqLknWo': 0.0001666944490748458, 'vkXcNPMBuckAinBenNEfuIThMMeWQaJZfsQFoWjQM': 0.0001666944490748458, 'aeblREtGQpezrHEaiETYOIzQbtBsBqaKk': 0.0001666944490748458, 'mErOXknrWFVGlBBYgkZSdeUgaUYJurUMPxGeTMRFixdtuhCCjMCYLSjioe': 0.0001666944490748458, 'ZUiykPvAdmcaWHZkBolyoheDKsjtRBPehhTlrPHGVOjXEsbfwJJXCeuIILSMHeTmsXnTtViyVfOKSpmDdJutmcqGRGjHVu': 0.0001666944490748458, 'JOUlikwbFjUVTmEspSiUmPrRwSduvGlxJrirEyoFuvsFrXzsadLhfWNimJEsmrIEykQaXTiwVthmoOwlLwBUMYbxJmyrGheHPJlQG': 0.0001666944490748458, 'lE': 0.0001666944490748458, 'LRMDymASEbXJyOFfGonuijqyLuHGBsrrZ': 0.0001666944490748458, 'nqsthvEpYkfxDNXNBJmKqkLsTeUrrWpJCAodOAMQp': 0.0001666944490748458, 'XSLNESTSHswtHRoLYXddvzpJQJxASqDExNguELGeRrweIoZFLsEXJansMGDIPgYzEEe': 0.0001666944490748458, 'SpbCExKsywQxTJgYhCpyJvahHwmeViHjKwypV': 0.0001666944490748458, 'abRAxaBWwCwdRNNJRmUbFOfkCbmJcRJGTQbGnJosQUEsdjJAmVCPmMprCmmqNtZvlrjQVWZVJBlKNqwXdlytjtilyoHqJKqPQhqPYnYkpenorafzoV': 0.0001666944490748458, 'fRqeQEmEkp': 0.0001666944490748458, 'bEqEtCgUPuRNFurmYVjwgnxvnypmHQbEOigYNZVBcjOTTbhOPuhMtZFkkhEGzjQIFADaJyBANSPqGvblRzM': 0.0001666944490748458, 'BAYuU': 0.0001666944490748458, 'SZekaHkUuIqrjs': 0.0001666944490748458, 'MhyzuMoroZSnFdRYbiWnWEZRdARORycVeSbaPBiSzpsXKWImwVschpAKYFdaGSLpMLMntOgefAgMjHXzoyztuk': 0.0001666944490748458, 'nIyyFGBvQQfbGuttEITNKJdpxPZtaEhvs': 0.0001666944490748458, 'obiKORtYWIyYrJeXaXjmqXlqSAUftqINAIzxIpBoJyjZxnIRvoWwIMCBJzdBPoclyrbpAQRrZmrUXekJIwcaVugaWZO': 0.0001666944490748458, 'KpWLsXArvynPkFvJJNMMNPDOOjIxfWrUVsXhonULHTDZZZAPcRifVdeyEhwcwfUCaEBUwwoOLKXdeVBkzGDhGDTffgXNDdbEcSQBGpqEBgTHSTo': 0.0001666944490748458, 'twpLmcUxtStHRlMIVlCHgnQcOGMeqkMaCUgMbomvBggdvVmnokmqPYkOVqvIvYJVLTTVHwQhMehEbKgmRrGqwdLkPdXSAJjuvyNkm': 0.0001666944490748458, 'SugvBQnBZwIkHVbzLskWCRDkZbyFiRpNxcmCzfyDhAPKkiKlqtaFMKekiSEfrVlCthWNPcgmXqQheCBTRRdtxPqYQ': 0.0001666944490748458, 'RsvYGIjjpBKVEqeZWyJTHnKdrGhjZYrKSNxfgSiiEVzCqPQUFFuySTnMSOLQUAHojLVHNHBokVFaaPGNBDBJbIqHRBpMwQwTZpJAnmJFYLZigOQspSTsB': 0.0001666944490748458, 'NKWMQmEhghUplqMAwStWXGPIEELNKXuwFFnBZvLRNUjNkqbHgXj': 0.0001666944490748458, 'qGaPHyWHzODHnAuloyyJBvghOzKwQfSWYUiSUnuQHDOJtwQOvOltRiazjROARKfVJTNcRNxOjVbZtOUitFfBymmFOKIiVmAVUGPZxk': 0.0001666944490748458, 'UUuOKFqEuLHhOWWSxaJKWGkOvnFeEU': 0.0001666944490748458, 'ZWlNAuATvKugvpANzjzScCmDtvOyAPoOPIsNsSmGyLSvEHOQsCrzNAawbLbAmiJKmEtExhjScPQBZNBQyDvpWczhvyibMMdaQtsJVOdiNE': 0.0001666944490748458, 'pUtWjvcgjbVCzCuAZbEzHuP': 0.0001666944490748458, 'UjunRCIoBinLlujgAIonNfFOi': 0.0001666944490748458, 'itgECSiVkvdfNtxHBxNveVnsmvfHDAUCHzfsUWvbfQJHLNUkivYRfKCoCiDrGFzjNfRicIsWQPcHvniYhzASBMxyHbuQdsLQQsuWXTN': 0.0001666944490748458, 'hRqWSBWtCDBAzTDBexPMWVlVbsjYJfAdZLxtXFGrLbKEDYBxGtDapjEsXHyycEVoaqEsSqMiRHmoexAzIzqzjxJCpjcqKfYjQvILRMxcKBt': 0.0001666944490748458, 'AEERzrjafvTYACvgcLZaztxFNoiEskJHjPCIXvvxKlBEQhuQCtQWV': 0.0001666944490748458, 'kqkgwyBVtygMRzZjDWKjMOUSTdyCjeZPAMtTzxbfVcQYaCMHfaHgNgXKfOcUgXyXptEjruOCmUmiPydZuTnONoJhnQcaC': 0.0001666944490748458, 'IMiXtTGyifunJiyobvyaHSqrcdXhTxrjErwKKwSNCSlNDXNHosbADtZEfLqgcUvrncoLoyozEJKqHcEXmVejPtwQypkmiyHunU': 0.0001666944490748458, 'cBJhwLuNEuchKcpsDhoYLaQSmIFhzyqKWBOyABDFe': 0.0001666944490748458, 'JEhReGrwDYYLGvxgLahIDMRIdhntfRFxaUfJridzoWnIPNbRozHtoFhKsELOKyzsrcdIYnNtgjGRzeQETvwZvqQgbld': 0.0001666944490748458, 'KwVChEMdjcyVLPGnJynwhwSooZKaSlWpUVNpiGdOggYYEQNteqeWxEEdcUf': 0.0001666944490748458, 'EOMEvyzkTADbkvz': 0.0001666944490748458, 'TDSpoeZwzvQEEYRRXue': 0.0001666944490748458, 'BvuRaubcGGstTRlkyhEoJhgZXJifVPynuLgoEoWiTbHQjfFqsdVTsTfsrUVKlLJriLbJwJesvazkfLoKttxLnggepOYPgMDkds': 0.0001666944490748458, 'mIAKXeilKsxpGelWrIimAiqqfnKEXrjsifeqsiEWIqXEHGglVLEQuF': 0.0001666944490748458, 'jRLIRG': 0.0001666944490748458, 'GFccHPImdMOgyYAKMIZdhKKSJhadHCCgPKrhFtMZDRJWtyUXWNBsRawjxZtXXqTYitVbwG': 0.0001666944490748458, 'CkVfceUHauONpmXXBHkvLnNMMTYKYLevpyrARMqYIAIiUesQwtizIYafrFVRLRCLBjGQCObMMTcaAIbIeVphwhreSHnpaXAzps': 0.0001666944490748458, 'tgiAoplKfcBHtLjLvtpOfGPBifUbDFGtwVyyYaiXGAJEzMZRYYQIqWlwlxQDDcrAYKtDEkkXkdLHSwWIzySEYIfVShMGLBwznmcmemqXICyLaUe': 0.0001666944490748458, 'gCnECGOpxcYcqyqfzQzrJNmKbdcBVgObZfgcowVAMqbCieJdyxMwWuoQNVialsmpUZx': 0.0001666944490748458, 'GSRWKSadoizkYPOQrygrzkEUVSUvseQvLGIQKWGtELlLPAFDQARanuBWPLXeGIRfrpIxdsKhHaZnNVsoKIRtZLnMtQtlkfqQCrLfJJi': 0.0001666944490748458, 'ukMjFQteBFWrXLiQFkoDBpShupoLlBewmRaGSaVkOdupqaRDcbBuJedPkFcMCFRNYeVuYbJKTOFAoQOuayOwlzdIgpsSVDcVeAhmkgoWJYGKScCbvUrPKk': 0.0001666944490748458, 'zAHjMDCxLPpnrfcHrRPrREiREnyGBREEskoapGmHOhxmWHiuSwntKETHSqKPpQAXCJneCBNkfwqUVxcYcAlJibFXpSjEGMWBzbnoRgvHyOqpOLrqQv': 0.0001666944490748458, 'WslOzejeGasOTcsJrLIOTKcOAcQtAbRpNnFclywrvWTpGJokDAJxAqJuWatFIOiXOpBJoBFOkNkGzLNunvSDNJOVEzDYqEfJXPZSoGkvxDBNukaOqVrNmOK': 0.0001666944490748458, 'AhaiDBXAWtQRFgVkNiqKgnoWTjwItxzMIUQvaqSELUiwXDBWmtHGpWeYkKcvsM': 0.0001666944490748458, 'pcJoSRmeNSHwweOKXnLSfoDzviInUkIxuCq': 0.0001666944490748458, 'sukBXjEUexfQDIPTYZYjxgLqfgVNJAhpxziukwzqjqEaYjzufYzPnZzemWtnDAMBoTdZDdUDkj': 0.0001666944490748458, 'KpsQfkINwJgmWfKOLxcmppuHxXKPXyzjOzrXbcbVbLlnozcdYzaFnSjECNqyFlQiXBukhGWcjOrpr': 0.0001666944490748458, 'mRD': 0.0001666944490748458, 'xArbItBHobszPOYPhwrHrEWGRcOgiRQejxrtXTFWxPxLLGmAX': 0.0001666944490748458, 'hupziVeZFDfZeKQuDCrYNILtOfWxGNX': 0.0001666944490748458, 'RM': 0.0001666944490748458, 'KUQaBjSBhBZxcedcXozEPYsNMsoKjNyViYnrYwkHtXdYSdLIkMeHJLcgZhWJWBvSnSNZMlpJDEYOjZmSFWNkiIP': 0.0001666944490748458, 'HEOdmRetdjscLKUEPhCZpJrQRnUmicKZWbNKJJXSbEkcIvmWMXpUAqrooglmXrMtlssPzVXGdtbpShJLmshMcpTnkKYqmYHUVREdXBAFOWgBkl': 0.0001666944490748458, 'MeDnSATTUAUceLZlEcjqkuKGWTvpvlERNLiaqzTkGZtRyVmzzZBmHrXVROiYNKabjWXCFNzMItxFIBbPsyvJVKQQOUiDq': 0.0001666944490748458, 'txGJSLoTKftTprTagNspIAIxpCmVOYtdtmvdOdfDVYlqzGszL': 0.0001666944490748458, 'jVEwoHVRdZmhberXndFIfx': 0.0001666944490748458, 'WTTByYpRAsKzvwvsakyTdhmLdlcgadQYqh': 0.0001666944490748458, 'iShGNKrrYoGnrQHwPjStvXlzujulIKMNykczVqwnt': 0.0001666944490748458, 'yttuAbcekuOzrFJWTYwGNvSHCZoHvEyJQgCvyzvTgXoePPOFbVBtPZbuSebCYPiOlBGieYPrcashgRsFrmlniZYSJjbNAuZuXWAhhicZmC': 0.0001666944490748458, 'kZUIulChzHkKqjmdDsdzUCcdEYVWdNSzKDewmEzDgizHflJiRVNOkz': 0.0001666944490748458, 'QJsOBNueyRvdADnPSFRaPMUhaeTrmEoCkowpsHHTExXziXHUrEoKCqtIvbABznjkxRhJdFstlJZVzTDDH': 0.0001666944490748458, 'npaytQjxexDhJALCRevgJSdLqQvmxQolbKjabJxhKZcLKsEDiAjOhaOadIWtkhdWstTNkqnCVnsQsIIqnDsZZHqLMIUvGevDnWnlmffVYu': 0.0001666944490748458, 'bCesgayxxjFdiqwHsgttuIWZyaOfWnBuksgEYKYFUmjLlgqELmbPVSiztezvZKozlbOIYLkmHniUvYQmT': 0.0001666944490748458, 'nLCizFdYgEjacsiesJCMpznwjCIklZjQjptMaDOCBrcbAnZaWEfqXbusnJs': 0.0001666944490748458, 'DexmPGxVAAHHasccCEhATpznrzNKQtUMhOSeLipQDAzLWCgZyBFQqYnxiYMdbEDtsxzfFIaDKwDuCAMDeRKmHtKfNSSNpPxXbSZWCDWUBFWXo': 0.0001666944490748458, 'lDgGUUDCeLekYoTUFJoNfkEjkrteYBpnPZfGYPhzJGu': 0.0001666944490748458, 'yXagUsjZhqTUdEqseTIawLAnqOiBF': 0.0001666944490748458, 'DXonpOZayqEPonihuUOMdSXRBkerxSbVEgmSKRiPsDOuvDpldxjnnSRwBbhXdSpmBrApSluiYlBdxFXChcdMBQQjas': 0.0001666944490748458, 'AiPaNgwxqWbyCUDpWYqdwonsiPQ': 0.0001666944490748458, 'GXljiybEdcXkXvYBOPRIgbwzaRSdKovMVPOAmpMIPwSwEOxefWiXrOKUDYUcBbiAcaHWznpTCIRBrT': 0.0001666944490748458, 'qpNkJaPajFbATeD': 0.0001666944490748458, 'EHSsIVVdtRQuqbRkCcfEHuNTRoJJzbgHXUTjIhtxxINgkDLaKzIrqLmgmHrNJgczFHePgqPkdbZWqfurrBgrYy': 0.0001666944490748458, 'PaNqFMOlZuuIaZCtgcWBIQzVDHAyeFtIgIAAMYLGuPbZAhClqZhyCfChhpvWmBETWnNBwVCsHhBvyKvtveSxLUEkGAslxjiDHKGbBnqog': 0.0001666944490748458, 'wMfktnZIrgGuBOQVNdDhITLcZrLiWGWEPXtBIzdrwIHhZW': 0.0001666944490748458, 'QlIrRyxAXDVDmrQjYNCtoaOoeptDTZgoBtypyDCZMInabrwKYsoTbypJBbjQLwjjkfdiuikWFuXtoTRxSuecvldaUqiMdujhTIBgGZIkZvnPsTjkkvQKtD': 0.0001666944490748458, 'VgOxePHbEMeiBjWIXgOEEWtMohCmzAfclUDyMQTXIkFycdcGmbCoekbovXeTOLtQErlCQjhYMJbznwVRomZCOGslJebjaSFVDdJBflTGaAbWwae': 0.0001666944490748458, 'kBTHoFtkegXWqVXSwWdJIgjoGlKPbfOjDH': 0.0001666944490748458, 'WUAJlGkPwifadfQMbQDVQxnJqiPtsMaEuaZvCjQZkkMmXqcUsNWruHHvmdTRHFphcWtqanvzqEFpaClAevjVCdTGRfUWByPAuWayfYgYdIoExgSMvyjoC': 0.0001666944490748458, 'dAlqBBNkpwteGqGChKIOvwZuIfOIWuBbZBpOpIGXOQ': 0.0001666944490748458, 'OsmgRIiwtXvHsoUtrGMkSILLZTgWpPxbCdqnbOpnIthkApBiPtPgkTwLciOdRBKHJNUUOj': 0.0001666944490748458, 'oqyxlqtYHnZQvqM': 0.0001666944490748458, 'CEMBQJZubYskljzqljvfVvRjUHNzSjLPdAJCyHjeTJPTQGHfnHicunsDNyTcOjDjyzhoOgwXmVUKHKUVITKjURLjnDDGdSq': 0.0001666944490748458, 'EQHzdQmlipFGjSrUFRZlrGTVhjURntWvLhSvBKocSawtfzBQNHXMFjkPaNiSrAG': 0.0001666944490748458, 'InHqqlzBVdBxCzzaYrsLWyyZiLQtGWLLaDWqbFLnYdXAxQNrBhlLvPShYxqjBWGxrjXIYzKDhIQebCLMfqNiEncWu': 0.0001666944490748458, 'eKXuaMuZZKeaQfZJhngPjbncArkDfNK': 0.0001666944490748458, 'RAuDNITIQSMNKWulYYyqWzDNEHInoYMlAAIBmIjWlXDHqyqiAxKJNhRbGHdFKqwuOIKDOhMLpIYOxuLILawmI': 0.0001666944490748458, 'hFyOtdZWJbqOkajAt': 0.0001666944490748458, 'dALURIuqxemaoSWTyfOxNAKchqcPbcblwzvMy': 0.0001666944490748458, 'PhTRNalnIEAlApgSssoaJoRScwwlnZffXjzlUAnBeQunfVrNCXFPrDrkwyPftrKfJmVHfzLkEFtlPsvIOsmvhhaQAvN': 0.0001666944490748458, 'fhtmRDrJdGnVzghUxiXtAaVmszpcNNdBZfKsioAEBXHRljktBguBmPpXDlSqIWMnytYACXHtMRinvXHelfJFnRKIQWwTGIGldxKMJraLqWJJEpS': 0.0001666944490748458, 'ctnqyCtRNkrCcgmzwpxluUFzWJqnytqxMeKyeVlRGukHjerxBMGoIwUPozUeQDVGLjkrCKEwewOcQCJdNwfiEPNlEAHtjmkYjdnOA': 0.0001666944490748458, 'NDZdLnhBYCkbxQHrj': 0.0001666944490748458, 'mEEhxUjSek': 0.0001666944490748458, 'EgDvEJaZVYpsnhUdnlmAkEVhkh': 0.0001666944490748458, 'ukHrVNasuKhoLVbHZOVnhLTOmaZfdBbIUymDJQOCIweFvOXNXUNOlHgGPbPawD': 0.0001666944490748458, 'RowYtJYyzyisRVwpYcHdwHaaoZaKvfpXhLWraiNdSeeIDzAPLKJHdRVMLiyqpetKfvEPRVkXAXAJyPXpZLPaaWtInIojIuoejfmaRtZFAynrHEHlnsJaZAd': 0.0001666944490748458, 'RqFaUQalXVGoCIAOlAbPVSRMtmBynUbLSqwqfuicBhEopeTkHXQ': 0.0001666944490748458, 'GJfJGdexoGPYIobFsLqQWvrexLPLaZOWfNlFUmxkSqQQggUXpTFRxUHvbEoIrunyNBjuftqTMacUKpGBIIuMn': 0.0001666944490748458, 'EIMRQNbpUVZFLUTRiCZyYNCedfmpOWWokDqGQJlDlXfHXGaAqMbVbQPTBBvVlRQOnJIoqrUDZKEoPgBVcilZudyMkwDRtumQepeYiOrFnHjpseOEgLHHpo': 0.0001666944490748458, 'LbkQEvPQHtLnbGLGtewgomctvBofIUTBshiQjziruVBVLNLtXomQxiRxmscjDiZxLQPhSIkKplDSvAF': 0.0001666944490748458, 'MalBzDabebPovsndJWMliBljuvtPXoqWHQgUQNxKjCtKSBHIxnuSQCwUbRlctJeWxQzaFyClSEwnzeEsp': 0.0001666944490748458, 'fjfBnRutCxqVnIWuZgYKShnTuGKjNamzXIWbKShHDdiMbALhIkJVRoofvRJwqteNQaHeNUvLCdzyGfW': 0.0001666944490748458, 'dTOhylZhysxlSjkBZecJvWLAsqtpAUBuImADsaQDBtVQXuYUVOQQMplINuefYpUZdScjPyhGQKcXLzAzwGSEUARNizreQhRCxaryTHtsdVNCAoX': 0.0001666944490748458, 'aYRKIINMcJzlDysdW': 0.0001666944490748458, 'gEavwRCASmIqCWzANpuGZasaKBxlCZ': 0.0001666944490748458, 'lTjWGawoboexhDHIVNKkXNriLRrWNRGpVrFNMYkkHpLWyAJKOUUlEiocupyhqKYfcWInvyZKglQrDzgTLfSTVMZdEdikUKESbjaMHMncsisyE': 0.0001666944490748458, 'RTGBlphjBIgLcsfiGQMJiQzgYdbgKHDdrzBBujKDNHzyGSzcfedYFcdSreGnkGZSaHlEXbacMSUyAT': 0.0001666944490748458, 'SmuBNNUaZitivSepayMdyWyABZPkwyysaUUizkavrUpEmAEzTchPZkoDozuBiAr': 0.0001666944490748458, 'uyEzSbowh': 0.0001666944490748458, 'yNSlUTzQPyYiwbhfTxMVxlsNZR': 0.0001666944490748458, 'eruWmEBHFUtVAhiuUofXZVevnPuHazLlslnQtqvHABtIoHboRKkKUwnARAIzYKONtoJCLXCeRFDZPkijIFPDTR': 0.0001666944490748458, 'rCedhiMLDbvtLVbiJShgDdWwjGOlBYMfjywWQmNWSzlaryngwChZEyMOItvcqbhoaHKCOOCgnyDwTyRRuvVLc': 0.0001666944490748458, 'QkKVosPJcwRasMNdtQQNIIfujyWWGAVNujHKxAEEOlIlqVgEWUybjxLlsUxFQqFsNYFfnaxGiTCyQlXLGMXebQONqHBcT': 0.0001666944490748458, 'qNBEnmjlGnbNRUeDVCSeLAXvWBxiVhivRtyLQlkvHeL': 0.0001666944490748458, 'vLCTJPunOpCflArEzXCZjIlbZkGdOAlibzxaGRpGUYhDJDlAwTmsUFbfL': 0.0001666944490748458, 'YQshgohULLMfLRehVp': 0.0001666944490748458, 'WcwzvdXTcqxYDQvxagsPBYdeX': 0.0001666944490748458, 'CajtXwVBtLHXEmW': 0.0001666944490748458, 'dhdoSpnCv': 0.0001666944490748458, 'LdLvjstzWJMJOkGvFJDKqZWANdFGUkHlorttcHXGhLzNBFNHDZAKXhWpRouXnZzlAmcOJdcdRlEAsoqlsEZShQbxFwcVfLa': 0.0001666944490748458, 'SSSGydePpxPpobQuTkQ': 0.0001666944490748458, 'OtZMDNcRyMCEcjZLcGugiqzLcImHnIbnMEZWrGEetCkvjCqUyRCppRAETTILnvbcrqYwTXGQyKRloPDFYLnzqZOirHauVxgHpbZ': 0.0001666944490748458, 'HLxnounTZTasYNtaeCADSDVJgdXiOgwIpolZnHJcHNLfUFrQyCyAMhizhzKJIbDtTHOcFCkYLyteWczawbpBmDjdrLvxirqmeAkKGJkKL': 0.0001666944490748458, 'pKHaXGRqkO': 0.0001666944490748458, 'OvkQTrUburarYMVqLHRqfogCxsJDzxfpYkrHMSbiQheazONcKDPZvLZhgWVLUdeijHQVMflEAcQRsomcjBvfujkiYdXGhqbQRAqHDPwYaEAqUSTRFe': 0.0001666944490748458, 'zghSVdyVLBNhfKbzketrGQypyxRSRGVmdsRIjXXEQxzseJheloftHbrWqRYeeTXRjqwlZu': 0.0001666944490748458, 'CvwcBelOfzIcKCjWGohvkZQeTFKZApFBeIWgor': 0.0001666944490748458, 'HfXLISNOspjfYXhJcgvuzyuZuJe': 0.0001666944490748458, 'eUjWdVWUEYqOXiFBpdjRaTMiJSGGUSoGYgJfnkWTyoMaRahCdhphYEhJnIdGLbxxKZXadcYsOmwillwgjuv': 0.0001666944490748458, 'jHXhUwHbcPBusPytHRfdzkubSGqzEmudEvscROpLWmTFjpV': 0.0001666944490748458, 'gDvgvlGjPYGgNXxTgirSTMkNIaDJSGlIPKmNMuDdneyCJLAtN': 0.0001666944490748458, 'ZCuSdGnBtAderRRKunwpu': 0.0001666944490748458, 'zswyVVbDftWmaEIAPL': 0.0001666944490748458, 'ZGzwNMYPlbyuugwEHMU': 0.0001666944490748458, 'fQZAhovFbVnqFibcOvfGdmwPRSeZxkWJBVYleRoriJDZNFVXgCauxeW': 0.0001666944490748458, 'bwYlsaKTCBolKRIRzfhyIRM': 0.0001666944490748458, 'IRPlOiNZnfOgvwSFytOlEExCTqaUBcZjyFYRPUfMoMUAZaSVSasTDHHHBBteOlzaueRTwI': 0.0001666944490748458, 'FaGDAfLmjJWiSjMIgYInVLXEXHFHibkknqPfgQiuxkwhJJK': 0.0001666944490748458, 'ccOLdDmtBChIZDgMMwareyfPnuyuqR': 0.0001666944490748458, 'SGROSNcaljgxRdfNjPXlMHPxgFGuZicRclfAAcuHkvMKudbGFKzREbyQgqrEIjNNCEUOXbvpwBjlLrpOYUZGLtONdelxjStRPHI': 0.0001666944490748458, 'qfKcOcdvRSOiqnQKjSukaPrHnPtorcLHtiFOApVPKVnkXYFJsCCCBNIRzAtIPpRoEwrKLMdMNvzZMtnYRIUHGOixkLhshiUQlEXkijwVV': 0.0001666944490748458, 'oDHnTcmzYmWIDixOqNsYkALKzOGitnCWCmqYNbbPyMT': 0.0001666944490748458, 'XMCaXqiKZSQhOPZdKRrLnvYjWMAwLzuqmfJgSKCxxajZeeqvxqffijWSRBvlyzHHYBHdyrBybjEGXbrNTsFeGBLxYzyptoKEbMYpDhVJCcOjIejyYnlKrd': 0.0001666944490748458, 'UIiYJHzOHwBRfVnPzmUCSMbXwNjpABPIevjPWcCwakxotFCnwA': 0.0001666944490748458, 'ylUVewHEGJdpVHuEWNwkGOzjc': 0.0001666944490748458, 'n': 0.0001666944490748458, 'rsWmtalygracqOPuHSQjaqibxJYGTAWtvyuEPtVeDzdXxa': 0.0001666944490748458, 'XhPuEHPXIpEJnEfbbXMSvcuzgBTSnpdLwReEMvVYyvyhCAmaBTmOSmppxSkTtilwUpbnH': 0.0001666944490748458, 'KZTtcfELjmWRQUvqAMgGgVacADtSajvSiCUgOPHtA': 0.0001666944490748458, 'rNWJMEVXjIPOOHdrwcVBaw': 0.0001666944490748458, 'LeDQvFxtfRdDlqDgsOBNsfKCMUyhTBFHhbzFRevsLmuFQhpaIeiEuqvPIpjXLAgxbWmyazyMpxMpSQbjeYnIvONumvZDndMcnjUwqzjaA': 0.0001666944490748458, 'CLKylTfWFTeEPIhMroOjpxbRNQqZREObBabAMSYqhoWlMsgCnQFjxRkAvvsTnUJRqXlpvfhWptLuJOkrcvPQKAyYaqSdCLzKigN': 0.0001666944490748458, 'TqPKsHUyUCKAPseIPWrQoSGtqhejjPDtdhaIyMrOZJpKlCUtGFULrlzabrVHOfM': 0.0001666944490748458, 'ieTcTTCGKSrzwUlPhKsSUYXKtxTbswZxaOPUBSnbRpcPmALssMgaBEXSddWQcswrurAWDhznvKbWvQGhcjjMotKvEJDro': 0.0001666944490748458, 'dfzHFYELS': 0.0001666944490748458, 'hHqVHiwiPPUgyBQaBEkCBVCVrEgvcdxXBR': 0.0001666944490748458, 'ohlJOaxwHZVcbWBZCYmNJQHwKALZBurSlanGDsqrixsebGYuFnIWEWXvcvwRnmIlmOymAvSknvGsj': 0.0001666944490748458, 'lFvSfKoPeFlNgVs': 0.0001666944490748458, 'PKgNbuyZURLTSVAHBeIaNdnMZigJHztfhSI': 0.0001666944490748458, 'WPXbmGGdWBfBfABIbfgIiexUSpLONacUzQbIaOnImUdYhtHaHEImKktLhVpwtbiJwodOPiTEfccebdfGykQjaNMSY': 0.0001666944490748458, 'VmQtLCpzOXarhYQBWEtkkyvLzUSVGqxmHMwdJUCmcBXDfWHCMnrPyffUNoiQKrHTlUNpEMVAILdurRQBoXhYHX': 0.0001666944490748458, 'mTiffRckwcMld': 0.0001666944490748458, 'ZeWQphkiAkodrGOEHtuxdZQvftSycSlcYtqakrVJaAahTqTNEKEZWiomsqUfMKeIIrJcbUVqXJRxuGoTeAkZrZV': 0.0001666944490748458, 'wrxqtfrMwiPEKMmgUgiHQbWMDWvRzTbJzNTflYWhDwAHZsHJcBksElXJwHDoRSDRkrASQ': 0.0001666944490748458, 'LqsMmgBgTFlCAYSAzeqgCVhiUNyjrmlJMFNOGnwNNIByINQOcStbkcDeZGsqJozogLdJMJlJvXUCelXkrgwbzHeayGqfTrnZqke': 0.0001666944490748458, 'VbaUrgmzxKyUpZprmTnWgkThXhgGorSKbnbfVLyGGsZadqlMLlWLhLXcTwzNTxNbdvpvmAoxnLIPjhokbPVHbkTsLcODmITQJFpOVqmcgv': 0.0001666944490748458, 'OHIVKJjeCXidQgGqqBzRHanJIwaczkKxFruLcjeFcacCwrIWgmTNnniXdFyuQtiKzaWvNHuiUjiGvUEfpZZnJVpnFr': 0.0001666944490748458, 'VkWxTDyIkgPkzuBdObBdATKYtmAEGAHqIfCFPzvMnxkfOEnuVgvoGEAXYnGIzXYUpZuVRfFzUEQHdfDTBmuuArEnu': 0.0001666944490748458, 'mUdhHGKJTiuHbWLNygQoWfkoShJWnRhkYKylrjmTkZMgDThvgMiNtoKWeRIJLoxjRaZOlaQwNvsmlswBnZiCHfZrvwUbBg': 0.0001666944490748458, 'kskzzossGdFMKNaRADcaSXJxrlsKpZfrPbsiFKPBAiGKiSRJxcLXW': 0.0001666944490748458, 'UvaWeSvVRyjFnmRxrJidVUFegQGSxjgtyWoStggyLrYalkgDhnjNgEuGMQAPUwnWqRd': 0.0001666944490748458, 'TpuRuDTFxGchPNoNudyfEaPzFMZQiuRtakfkSwOZvROAvQNCDiAxoVTZxogFRsTGWFhdoCOJJbntqqahXxqMDZbDWHtPhefUZG': 0.0001666944490748458, 'oCQAKovtp': 0.0001666944490748458, 'OCtpdfcRtpxdZfSTUiqfqHrcXKzCASnMsewdzieUJDhiJSUnmWsCxZCvwoxszyGJcbIHOMomvVtpqMtzdnYGLhQgfpjUKaKIQYdnmQbLSHRgGqMsYN': 0.0001666944490748458, 'txiVCMeByEnfcYhGm': 0.0001666944490748458, 'UIZosHwFhWyFSUrjEFFhA': 0.0001666944490748458, 'JDsaZKb': 0.0001666944490748458, 'RcvsQxePppVZUrlwvecTCnHxFwppuQOIagZljMvPgiUCgnOVFNExqSsvmiwKUDzzbdS': 0.0001666944490748458, 'lwSIu': 0.0001666944490748458, 'gMYiUXIpxNdKXY': 0.0001666944490748458, 'QbOzJnYuBSayvBJXIwcUqRTthxaQhHJLMlFjUkeBnJIXKRSmqRWTQMXalOpJGPtkvqJyxTzjUirxYrUDRcoTtgipHpRwgFhGleUQyHpzHs': 0.0001666944490748458, 'fODLmCxoLBxWB': 0.0001666944490748458, 'xBqjZSzqwnjfVFvhsGGzjCQyQQwKUvOqeTUDYESuKhPMITLXBrKFDHLIp': 0.0001666944490748458, 'oBDpbWEOCssQEoZrGtGnqEzRnXkfSvoPHQswgBAdQlwYuhMpgppWiWblrVeVvtbyaOzAhsztPGhEIHeaaLFxUNzJZJjnoP': 0.0001666944490748458, 'IDFBaxIJzMsLzJCUbWsskYtOJbLpRoJUerJKJHKlfUfAFvuoJdUpzYBaWtOPaxvXMovBJT': 0.0001666944490748458, 'pwqeqaOnfXpZaDoMerFdPFZhTVXFwDTdSvanBcikmhukCKRxVCfgoaRxTUhCiWuDYQIYcpOKelL': 0.0001666944490748458, 'qOeQlyWfEjXmiJeVZRgAOYWTnYelNgxprjQvcBCrdTfIMJKzQoUKbvxtMuDawcLvzKpEHQahNvf': 0.0001666944490748458, 'SMtLMHzbMMOdpzRqkPLuxqpVgorhHeESVkKUDWinxUBjmPMcJZ': 0.0001666944490748458, 'APdCiuxDdHTQPqsGLQmsYVhgLiWyVrryuqtxjRDivpeGUTKFeWBUq': 0.0001666944490748458, 'RmaqdxMNnSNVmXKkCnBakYahdbJBNuBipAxBOlKSeNQXGmLfGLMqqFDfbozIeEidzxsDFGOEQyVcGLhHs': 0.0001666944490748458, 'ULWUkxisDeSrFdjmuBKKwwxayOHMRMQ': 0.0001666944490748458, 'lJzKAzBDINbcDvBEXxEAXQuKAqjRLRBgnGvivihxtAJxwTHJqTEAZvPpsnhGijXFSEaogPQDNsv': 0.0001666944490748458, 'VnAzpCLFBbzAiAzyfrCFKjOkWbGpMYGnVMDrHRKWKCiifGqyUfXIJIakmQxTNv': 0.0001666944490748458, 'SAtNTCL': 0.0001666944490748458, 'cdmnONHLJXTxsrHsCbmCESexmdNDbjNXUoOgcnGTsPqozCgDFSSCopdiBdVzGZmkTOdozZJvHwLteircCqnDyNJhZaGUo': 0.0001666944490748458, 'uNEqQUwlOsPmSACyiGKZGHmPxsaQieWoMeKTsjSvBX': 0.0001666944490748458, 'JXKYmRKDYkmjV': 0.0001666944490748458, 'gGdcNcxgEXKkl': 0.0001666944490748458, 'HxORXqtFUIphjt': 0.0001666944490748458, 'AwqSgUrszcTqCbRKnOCRWcWGwLPjeBUIMJfMunY': 0.0001666944490748458, 'IRreRLNUQIigXZnvmvNriaRsEreEGetUQTMMznFBXDUsuJaSCdAUOJbewZtNZ': 0.0001666944490748458, 'AmTRyeGiGrqlPuaPWoWcKCncAGxlpgCeoFUtM': 0.0001666944490748458, 'CiaZHnQgeQUTUZrbCiyIJKlLWBOKurGrSgkBaQ': 0.0001666944490748458, 'cyxUgWSdiceQtYFHxQEFuqzbEMrvuFPlFMSoMQsBCVKZSgIWGVnXTLHxIezTrwMOkBWgEzsxtKRcmSrfk': 0.0001666944490748458, 'ArnpFzJUanPBlbZ': 0.0001666944490748458, 'soxCUTYkerxntwiOEwOVPBLVzYJglfJqcPaxhUptoinxSizRZuaDSncNVlKZUPyHNJpoYHpGNWyYoSlPVSPigdwWbIskbGkAnZ': 0.0001666944490748458, 'OvLiDmrZaNjoSodEDUnsPgWjNUQopkHuCaAgbZUZpteeDEl': 0.0001666944490748458, 'RGeeZzudYNcDrgAipXFamLmBGKIkvjmLWzzWMIZIvoYbKKoPSBhWxivfthXwrEAgdLnBaMXASVNUqUoMSFJLdTcwrWGZnrpMRO': 0.0001666944490748458, 'YGgDPvAYlEwChWaOrFDkiPyVXPcmjkhuBAXHMdUTpqYBidaNBDgGpDYmhNiVaBJUISUpZNaBZ': 0.0001666944490748458, 'VmQwGUbqYHhNIHgsoEkjYWOkJIXZeJAfAhasiEBXZYoRONJCmpewMgslcPJAVLAYmMVFBpTJYPBSME': 0.0001666944490748458, 'MShQKaFBIcJANbBTERRbFcliPQkNdSULdePhcngjuTiawayrGdRTIBEFKSBlQmkLHiQNzQfCKJgMLbsDsVYgwlYFyFnjfpIFJshJbr': 0.0001666944490748458, 'XJzbgjmuNefyzZWEQCNWCUhLsBWVWoOXHvXWfsuYnvBoFoqpgqqcOvGsNjuYzUTyqcfJgjqAZWCzWTjHsarDuUzrIyTWZlrBXKkoshLYm': 0.0001666944490748458, 'ydmMMRRUpNb': 0.0001666944490748458, 'CLVaqMsWXrZTLbKRqvwJjMSsOszJdejsuotNIZkFxWBxHsPYraNasVNBtucXMOdpBsqpRxTqluXfZqNRPnUTeliTcCUPCrxUHR': 0.0001666944490748458, 'YBaBBAMtmZJgXVmEIhgBCsDNlNBniCcslOGPVXNNWMaDLFcgzjGjkadYNVyfWIclkhugtpO': 0.0001666944490748458, 'IRJrbRMhxkieVoxFroP': 0.0001666944490748458, 'YlDXmwMIxWSvIMuVjkpjdyLAmSTnUEkHNPANmFGuJyKDRBiSKZmMhpsaXNpILdnYR': 0.0001666944490748458, 'NhTgpdQ': 0.0001666944490748458, 'fduPORVmrvBFFMJUcquPuNHhcsGEeVJZkBIfhJpIbnfLJFmViJOrVOEySa': 0.0001666944490748458, 'uYdrEPjKpCZZZkiBEwDSQqhuyrMFcPKTHpOqBtxkeIiiXsATXzRwCBWMsE': 0.0001666944490748458, 'TrSJozFlPMfquXcAWJiijjDRSrcpTMOnCVVensfaMqRjQKfrBEqYhYIQKWDcJsbVimnGrJJqmtHcsYiXdIXDuAm': 0.0001666944490748458, 'yQVLEToZWDsGuEHPPiUMoSWstrNvFqAOGSvDVMWvs': 0.0001666944490748458, 'RTBFmDmEaPbSJtHyBUurENRmSzqdQHdftnIPqpQRDSguwZaBBkOgnmLyyqlTtiIkWWdZltNoBkWuLzx': 0.0001666944490748458, 'XxcNDBKcFNHwQsZbcscpXCYVejmZfzvSdpTblA': 0.0001666944490748458, 'UElQLEaaInsTyWkMjUVmeoevfcTqzdlRYZMjxTOOyZeHTDRZNqHQOWkilcACORxgTrkEmGFsvCBiGgKVR': 0.0001666944490748458, 'CvulcbUhzPCJEADrhSFoUlFqhCa': 0.0001666944490748458, 'tZowLwCekzlnIDYHKcXXEDvqPLcsnIoBRwgdfeYvWpuqeJmZlkYGvPr': 0.0001666944490748458, 'CvksGXzXyFUbroqqdWMxEpaYrUpEQMVstFthvWObVpZgfoNMasnhXCbvtkJkozZlyhLWGcfRQ': 0.0001666944490748458, 'GcibekWFPtpQPDdFmALCoPBwsjKKSZQkHq': 0.0001666944490748458, 'KBuACgazmPHdkmUoGDTEZfEPSGQwRiLxyugqolqoUYFdTpWtAmQHbxLgfSmvhOcyusVXKxNyGHFzKHzVsLgEdLSzYsQTKvJmysOEgjZOkPvgsaZxoQQYEWz': 0.0001666944490748458, 'TmDbMyqdaxlpmMvsoSrLGBqRglCBuIcmwWBSvP': 0.0001666944490748458, 'EyiVMgtcZrJqsTFlsUsjebbQQhafCxuScLwNZhsARrpabKnublcKImlCfTrIeaPLmBdPKxTEIYhPJMtbMmfZbeubzouVlAanBwehyYo': 0.0001666944490748458, 'oQwRWKxrJehYUtcLSGDMBMZsyrrLEvUAoORqDDzkkRPKOTYiGMpKjakCGYnwYetRUdbCqNbDOF': 0.0001666944490748458, 'OCYvkjwsMeVXKuTqvztjUDsNyOFfBlusFOpYVjWGYZoGApcsZCmNvhIqJBzbCNYkzmwTcIgMpWqAyC': 0.0001666944490748458, 'qbDjZFzyHulvOFNXvlYcrMvjoRbYVhFFPqotkugeQDpBckYghSIVoKvjDilowIkspojJEQqAy': 0.0001666944490748458, 'tTs': 0.0001666944490748458, 'EcLUSSuNXtGclRZlludQTKHgsgdOmqpDtNRuIlJnWGcnyIaUbGndXKoSQvvoqZYaOixIBZqRGSHobrRuRCAFWpYnERIrVIpBZJAcKmXgkLlpTNF': 0.0001666944490748458, 'NitTmKujEFpZIlmiorRPsbCVnNJyzpClBfiInOLMTkeSljkIAqWHOjAQquNzwewcbOIhcxRXTiSxgFzEZNhUzBoJYRzOPFPetYsIGbGdYXhOmWqwOL': 0.0001666944490748458, 'hPUpjmarYgHjFcussTWCgwhEcyZLVEDanTkgahIhrBYinig': 0.0001666944490748458, 'bVJywzkJSoWNNSfqmtRzMqNgTFQxtWFreyIZPKeIWiyTIHoAobyWRSbpChtJJxIKsXlTskfsStmVnUtPumpJVpzcKSDdGQLgngBmaSUqibRNYQnzBEJ': 0.0001666944490748458, 'McXEoLHKaQTqjGtwy': 0.0001666944490748458, 'PjUYmF': 0.0001666944490748458, 'aJWAiDhKkvVlhrHwAWRIAsLSKbJgfoFikynCTWvrjSCrArrAKZwljKwCKVAbdtitAeWgckmnatbaJmiWPqvcZsJruXXpTKKYYGqCCkHfSkXvHDXlctsFPgS': 0.0001666944490748458, 'DMihpjnldAjugvDzQeYNHKMPMLDQjcPAhyDpbTvqeWFMXmBCWB': 0.0001666944490748458, 'gwXfFJOeNbqPCbvloeHusMv': 0.0001666944490748458, 'wQAySToafpQJegxMJGGLvlAwJHWffjafYwcVzBlezW': 0.0001666944490748458, 'OUyItoXLLyqgWIQARymuLhpwZtlWjCZlaJdQgCg': 0.0001666944490748458, 'NrEZHRmnsOcWL': 0.0001666944490748458, 'wnWzAfIMHsVWerniNmcwjLgxFh': 0.0001666944490748458, 'hupaHHqyccMYNNEBnGUNpRoZSUXTcjrsgiafMUwsiB': 0.0001666944490748458, 'SMAqvhgGJisGgjpvCouBGUmAZhRldJbnPuZqNtmBjZnlCFuxXUvHriVoMAEEWXoGchXHPByjDOYLVGJdPzjCzxZvsQDwkJfSaaBqoaXzGzmetbktelusH': 0.0001666944490748458, 'LLOZzuhwQqNbDBKReGyOKJGVOUrWZXLXZITxH': 0.0001666944490748458, 'otKzmdokkLyxiXOUELhJhqHxfeiqeGvpkVWQgawyDWmVTbLLXSjyIQNfUoUZGMOBpuOXZbUJkgdxgPTXIhFBFBxEFBxUym': 0.0001666944490748458, 'jorbSFeJmWwshafPZJpbuhRmykPTzrcNtWgCMmfqgQRVpDymsXaNvKV': 0.0001666944490748458, 'VMViCbDpWZlBJvBdbCfpqTzAwYLZSosSsgmzPrgIvsIByciNERFykzwtGjBhwscxNzWsrcHruESgzAxKMuGTklAEfYGjnohFwzamR': 0.0001666944490748458, 'zYsBrUtpToXzYuRZiTCYACbYAumLsxhANszZLZYvjVqWWopKKBVyPoMAUHMIj': 0.0001666944490748458, 'PXOSJgvNzFuStxFdOlKcfWjAycNcyF': 0.0001666944490748458, 'jFvwMTMAXEuLxZUIWeivnYdqgBmUAJvAWtimnUPpgPxTcCmUrjH': 0.0001666944490748458, 'UDCLGFKlhfAHYcLpDsnPAGX': 0.0001666944490748458, 'EJPJkaPYldPoYclhWrGWkB': 0.0001666944490748458, 'ONYthQktKVIIckjFyBKGbcHgmWmYVgoAWkqDVGukUgDRiiVtjxFFAEMgkCNfRBPRVlpqFsxojxEdpQ': 0.0001666944490748458, 'Cadopu': 0.0001666944490748458, 'AIsgjvXVHztwWv': 0.0001666944490748458, 'GGzqpwLYtoKbebniNZWxQpATJHMAVUwGwZtewLsfdQwFgsARNcHxbGCIMXzwQmLsZUaBFGFWjxvoSgApndMwKCAGUl': 0.0001666944490748458, 'axistFFjpldOAdPzArIeJpoMBQoICSqABXVIDefqpvbXgvvjtNvkXDZuHFFQSTlqZRt': 0.0001666944490748458, 'OERbxpkpmcirCUENkYTcivIseDjQrcfuioBZyAxRPiNoFsLTipsxtDseMxbQYdBVMyN': 0.0001666944490748458, 'dsznErcjwxrHxeZweNQEEICiXzTGPakXDxoksRA': 0.0001666944490748458, 'vDNQurtBnaUvRCZXrvLjvyWyDZCqqSLsaKFMagWlraDKasMGqjAmAVlogiMoeALpiPLFQoEWaiGIIrJRZgkTBz': 0.0001666944490748458, 'VAZxwsPwcZohgkTvdKejHCVimmUodQYhGQcrfmayYczwOAvXfAnAKsxbjEUeyrsgowanPTEcdRzzRsgfbhsOBakuXAFLy': 0.0001666944490748458, 'QgVwFRuAwgtcMlvrTnRsE': 0.0001666944490748458, 'QYxDRENcmHDwDKENZZUPcrkAhdGSpVzpjoqCBpFBWsNAyjIrZexOAlMzOPrAOUwcioRjuFKmnhZATnIzijr': 0.0001666944490748458, 'uTGNadYSKFkkzqFgnDcdwnaoZSLiLOSroIUBeSMrmpFdKkKafYJbQdPwTKUSuHQfDWmoampOOAYqlBSqubBymR': 0.0001666944490748458, 'kSTNymLWnLezTBxDoDYoRZvKCWRuPBxtBCNOuTmlOZkXhflVUnRTsAdBAdCTqtVQY': 0.0001666944490748458, 'yHnxXE': 0.0001666944490748458, 'lQgrnHboTgFGqDzVwfFguyMEzeTuDtZbicvUrkjLHSMglKqbeLaERbeIQiopuHlPraiAwAl': 0.0001666944490748458, 'uDGFLkPCcuCCrhBqIiJPTgpwMukGIBNRBisRcSSxdadwFUrWFguyuBR': 0.0001666944490748458, 'BRoKUvOXhGnKmuPKjSNWGOqYnuRPhiFhkFQrVGytFMLtOqDaGLsgArKDO': 0.0001666944490748458, 'fANDWWiFiAQDHiUwZjAoQkPGZWSEB': 0.0001666944490748458, 'iVwlYnNIAMeqfGuKGFZSbyhVLfbdLGAJQfFzCfvXwGoizhFSnNPPrrvJQNNXNlBUxUOlyfYoHwLJmpNYecFMkcKOYcRzZlCHaaMNLxOojRXKd': 0.0001666944490748458, 'DJBCz': 0.0001666944490748458, 'lMAqWbPJrARTItORqSKprcdeQycrqsQixPCfssQlCSCfnOyBfMquBu': 0.0001666944490748458, 'SHcGNeQmCBolfrmlGVjWHDMfZckfLKBypceFxZbnoDOMVAASjtmqWuqddFYTNQEEiYBsGjtuYGEilRcrGQrkiSnXejkenaOejI': 0.0001666944490748458, 'RnmQgOtpMqPGUW': 0.0001666944490748458, 'AMvJbtuBJ': 0.0001666944490748458, 'RkHHhjTSAUPvVIvHrjnGfuxIUJTaScLAVeKoHGgMmufbHGoANkKIohdeVLTmpxzZnJxtzNcNAYiWcSjxuVNyeQzLie': 0.0001666944490748458, 'npyxEAEoatLjVlOEdatVehIEpLZgnvoRQZvaoxsZknEghvUQtkOwIJbLCZvBZHdyyPUrxBU': 0.0001666944490748458, 'VdClIUxnalcIvxBYrwoLOtg': 0.0001666944490748458, 'sNhKiRfdUZwFRIEKWSlVhjWIkzcxfsXPYAQQjvMjggJuOKtNXSNMtWQmKifLmgxbkOtatJfDPLfOHSwzzU': 0.0001666944490748458, 'CvpGQyvXRkMkvHfRrwTBoEawOUKdUufKvzPPovlPtcBIxJjXsKnCerQDjouukknWfT': 0.0001666944490748458, 'DKwCPFyqzpkPlFpHDcgbHYwkvHymLnBVSdYyLnEFfhwbA': 0.0001666944490748458, 'jLODLtBxrsvezhWKXEpGZeCTslsMOuVghJgIMaXmvMlJPVhcOtSJzpiwvLUUcgwcR': 0.0001666944490748458, 'TH': 0.0001666944490748458, 'csqiPFzptkasuhIeQ': 0.0001666944490748458, 'MXBlRIdCC': 0.0001666944490748458, 'tFTHRng': 0.0001666944490748458, 'tY': 0.0001666944490748458, 'bVboBByCYjLmFPBlkVBSpetKwVPLQVGPrQJPpmmxvxbhClbSBpC': 0.0001666944490748458, 'QRMPCdJPZsPfXqXGFBtzRNX': 0.0001666944490748458, 'OLdaIdQYrTjuYrOEmfPEoziVnJSBdaYrsnA': 0.0001666944490748458, 'sKArEnLdfKMhlwcVztuSiIZcApjSdgSefSbvzcOHaGcBLXVAXQTGZQOhixyJrmKOEFmZPAGvwKtSrwKFrhwFpfoyXAFalDHigxOdhVUWzq': 0.0001666944490748458, 'mIdDGxuewMQhcaUOqOhigQHysMsApOisOzavLJsjqLQXZgKOMSsMcbrRHwRXCGvjXergzHXbc': 0.0001666944490748458, 'QLMBEJzQiqUObkhkdyoIroXyxqbQaCgvhtzTTNKQQTrlZhlJZhvkhwhnaVrRS': 0.0001666944490748458, 'HCVAiCdbSMpGLDDUYkEbnfGYTELuYwAYADtUsSJyXtXkcrFVSURkZFVtAvgGdsniquqoNKuFrhPBTnxrwyYBRnhKKPPToWmIPW': 0.0001666944490748458, 'GRSSUNsUGSPyWovLUTqMYdbANGhdHloXREmzrzogkjgwexVmuduGWKUHWnMHBWzTnujKBzcaGLvlghALvtpsyJWrcZJSlVDQkkGRhqstKFSzOKwfu': 0.0001666944490748458, 'dsEYoVdRnBnQfOnoJIbkUTNXyrLeTNuSjGsnIvVWLedHdLLuydDymtpoZyhrMoUVIu': 0.0001666944490748458, 'hlT': 0.0001666944490748458, 'VbUVhvpbWIEEoAWHpRAUv': 0.0001666944490748458, 'cMjacxygzWTWOfY': 0.0001666944490748458, 'EbJAZrGvuvIxYFbJO': 0.0001666944490748458, 'vIePUlFVICpMbxacCxpkiQJJoIKYaDHHHHbhFTEuhfHPGCrfoUtHklu': 0.0001666944490748458, 'KukxNTxfXgdueUwLjtsMqJMhYHTtqjoHuLhiNVMzADAHGewMCWHzZjjcbYWwxD': 0.0001666944490748458, 'sNwCphrhykxeLaHgSqRJQjiLZRSMSTOTFmfWmFUDepQeDauGGQaWzXSvXGBKxvEhQTOZpRNERYFKAThmhfpxvDlhRqdfZCtGXlAak': 0.0001666944490748458, 'CwjtieckWNdtJHTSmqhPOEIDrsilwJWxPuOSQIaRXgAmxMnqhbNXoEuNaSnnRiFZzTHrVZNqXNNUsqnYoxDEVeoPIiJEWlfYhhGjniDnYxW': 0.0001666944490748458, 'ByayJm': 0.0001666944490748458, 'YMRWpGygxcJFEOaqpyLxlgTvzbsBxZfBdfMfLZJqCtKncCxmzLCtPMbsjLyiFSAftNpFieybLIHIxoIipZofdaeYZiZPYEQunsFJXqVCXJwyJgViB': 0.0001666944490748458, 'weaSCKTfvOarPQmxiHKDUsfzxieNNYkkuWpLNkaPWnqkKiUixpTPKbaWJOtlyxOcVYWMnLepNilOWXMbqnqbEtvMPgemKdJvPDpboXnhVa': 0.0001666944490748458, 'hMrMnDrBZdMGdgfvElBWneRLdtEaSXhSZETfbhDdnykhicbFu': 0.0001666944490748458, 'BgxJtvdRqeGKJPZAWCdvAZEIk': 0.0001666944490748458, 'ztbiUkFXSPSiklVrdFTMhjgInpUZKOLzNriVDGRnnxPHYaGGEBh': 0.0001666944490748458, 'RhJJHGLWfyAkmlJQlkcwLNZAEUCtbastuZiZxecSGDWdSdTuTPPeUsXwdVQgraHOmtqNCIEcKSEFLOM': 0.0001666944490748458, 'lAZMFfHOqOtcDyUamERlNoByFZPsafoVmBvEWiHrfwzZzkZYnbVBjdwPudqRRRsKGGMUWpnRpjQapIAOamXUNejaNoktudx': 0.0001666944490748458}, 'value_distribution_score': 0.999101296027306, 'max_probability_key': None, 'consistency_score': 0.248, 'redundancy_score': 0.0, 'variability_score': 0.499550648013653, 'quality_score': 0.24918354933788434}, 'JcWtsFNEUi': {'data_type': 'Numeric', 'data_subtype': 'Float', 'mean': 689943254.1395783, 'median': 1.0, 'variance': 4.0175438372547206e+20, 'skewness': 27.194664534220987, 'kurtosis': 740.1346887409991, 'max': 548351177228.1937, 'min': -23384305747.4349, 'is_float': True, 'histogram': {'x': [-17666950917.67861, -6232241258.1660385, 5202468401.346535, 16637178060.859108, 28071887720.37168, 39506597379.884254, 50941307039.39683, 62376016698.9094, 73810726358.42197, 85245436017.93454, 96680145677.44711, 108114855336.95969, 119549564996.47226, 130984274655.98483, 142418984315.4974, 153853693975.00998, 165288403634.52255, 176723113294.03513, 188157822953.5477, 199592532613.06027, 211027242272.57285, 222461951932.08542, 233896661591.598, 245331371251.11057, 256766080910.62314, 268200790570.1357, 279635500229.6483, 291070209889.1609, 302504919548.67346, 313939629208.18604, 325374338867.6986, 336809048527.2112, 348243758186.72375, 359678467846.2363, 371113177505.7489, 382547887165.2615, 393982596824.77405, 405417306484.2866, 416852016143.7992, 428286725803.31177, 439721435462.82434, 451156145122.3369, 462590854781.8495, 474025564441.36206, 485460274100.87463, 496894983760.3872, 508329693419.8998, 519764403079.41235, 531199112738.9249, 542633822398.4375], 'y': [2, 11, 736, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]}, 'percentage_buckets': [-23384305747.4349, -17666950917.67861, -17095215434.702984, -16466306403.429792, -15774506469.029282, -15013526541.18872, -14176448620.564102, -13255662907.877022, -12242798623.921234, -11128647911.569868, -9903082127.983364, -8554959766.038211, -7072025167.898542, -5440797109.944906, -3646446246.195907, -1672660296.072008, 498504249.0642812, 2886785248.714199, 5513894348.329109, 8403714357.90551, 11582516368.43955, 15079198580.026997, 18925549012.773186, 23156534488.793995, 27810618512.416885, 32930110938.402065, 38561552606.98576, 44756138442.427826, 51570182861.4141, 59065631722.299, 67310625469.27239, 76380118590.94312, 86356561024.78091, 97330647702.0025, 109402143046.94624, 122680787926.38435, 137287297293.76628, 153354457597.8864, 171028333932.41855, 190469597900.4039, 211854988265.18777, 235378917666.45004, 261255240007.83853, 289719194583.36584, 321029544616.4459, 355470929652.834, 393356453192.86084, 435030529086.8904, 480872012570.3229, 531297644402.09863], 'data_type_dist': {'Numeric': 750}, 'data_subtype_dist': {'Int': 186, 'Float': 564}, 'column': 'JcWtsFNEUi', 'empty_cells': 0, 'empty_percentage': 0.0, 'nr_duplicates': 564, 'duplicates_percentage': 75.2, 'duplicates_score': 0.752, 'empty_cells_score': 0.0, 'data_type_distribution_score': 0.0, 'z_score_outliers': [598], 'mean_z_score': 0.075730471959154, 'z_test_based_outlier_score': 0.0013333333333333333, 'lof_outliers': [13, 25, 27, 32, 34, 49, 52, 60, 70, 74, 86, 93, 96, 97, 101, 104, 106, 107, 121, 132, 149, 166, 206, 223, 240, 246, 264, 265, 288, 293, 297, 316, 329, 344, 362, 372, 380, 385, 395, 398, 412, 433, 434, 436, 442, 445, 454, 457, 458, 468, 489, 501, 510, 513, 537, 570, 586, 587, 598, 600, 604, 605, 611, 614, 628, 630, 638, 645, 650, 657, 659, 662, 693, 695, 696, 701, 718, 724, 739, 742], 'lof_based_outlier_score': 0.10666666666666667, 'percentage_of_log_based_outliers': 10.666666666666668, 'similarities': [('oZUxhCSA', 0.0), ('xkCaitlB', 0.0), ('cxWEVwECy', 0.0), ('uvZdEBLZ', 0.0), ('ZpDsCbso', 0.0), ('IuXFoKpu', 0.0)], 'similarity_score': 0.0, 'most_similar_column_name': 'oZUxhCSA', 'bucket_probabilities': {-17666950917.67861: 0.0026666666666666666, -6232241258.1660385: 0.014666666666666666, 5202468401.346535: 0.9813333333333333, 16637178060.859108: 0.0, 28071887720.37168: 0.0, 39506597379.884254: 0.0, 50941307039.39683: 0.0, 62376016698.9094: 0.0, 73810726358.42197: 0.0, 85245436017.93454: 0.0, 96680145677.44711: 0.0, 108114855336.95969: 0.0, 119549564996.47226: 0.0, 130984274655.98483: 0.0, 142418984315.4974: 0.0, 153853693975.00998: 0.0, 165288403634.52255: 0.0, 176723113294.03513: 0.0, 188157822953.5477: 0.0, 199592532613.06027: 0.0, 211027242272.57285: 0.0, 222461951932.08542: 0.0, 233896661591.598: 0.0, 245331371251.11057: 0.0, 256766080910.62314: 0.0, 268200790570.1357: 0.0, 279635500229.6483: 0.0, 291070209889.1609: 0.0, 302504919548.67346: 0.0, 313939629208.18604: 0.0, 325374338867.6986: 0.0, 336809048527.2112: 0.0, 348243758186.72375: 0.0, 359678467846.2363: 0.0, 371113177505.7489: 0.0, 382547887165.2615: 0.0, 393982596824.77405: 0.0, 405417306484.2866: 0.0, 416852016143.7992: 0.0, 428286725803.31177: 0.0, 439721435462.82434: 0.0, 451156145122.3369: 0.0, 462590854781.8495: 0.0, 474025564441.36206: 0.0, 485460274100.87463: 0.0, 496894983760.3872: 0.0, 508329693419.8998: 0.0, 519764403079.41235: 0.0, 531199112738.9249: 0.0, 542633822398.4375: 0.0013333333333333333}, 'value_distribution_score': 0.9796195652173914, 'max_probability_key': 5202468401.346535, 'consistency_score': 0.1504, 'redundancy_score': 0.0, 'variability_score': 0.36253985507246383, 'quality_score': 0.17097995169082128}}, 'total_row_count': 5999, 'test_row_count': 1, 'train_row_count': 1, 'validation_row_count': 1, 'ludwig_data': {'ludwig_save_path': '/home/george/mindsdb/mindsdb_storage/1_0_2/test_one_label_prediction'}, 'train_end_at': datetime.datetime(2019, 4, 16, 16, 40, 41, 779849), 'column_importances': {'oZUxhCSA': 0.5, 'xkCaitlB': 0.5, 'cxWEVwECy': 0.5, 'uvZdEBLZ': 0.5, 'ZpDsCbso': 0.5, 'IuXFoKpu': 0.5}, 'updated_at': datetime.datetime(2019, 4, 16, 16, 41, 6, 93904), 'model_when_conditions': [{}]} \ No newline at end of file
List of available trained models It would be great if we can have some options like mindsdb.getExistingModels some kind of feature to see all the existing models that I trained. So that I can pick easily one and proceed with my stuff.
yes, we can do this, lets add it to the feature requests. I can guide you on how to do it too. https://github.com/mindsdb/main/issues/new?template=feature-mindsb-request.md Hello, this is not a bug, I have added it as feature only, let me know when can you guide me? @torrmal I could probably help out with this @AbhishekRaoC for sure, please feel free to do so, if you add the method to the mindsdb controller it will be fantastic, make sure to use the collections that we have: libs/data_entities/* best regards, Jorge
2019-04-16T19:01:36Z
[]
[]
mindsdb/mindsdb
169
mindsdb__mindsdb-169
[ "167" ]
2af7453a8ba6a7ace60e49d448fd4915091020db
diff --git a/docs/examples/basic/__init__.py b/docs/examples/basic/__init__.py deleted file mode 100644 diff --git a/docs/examples/basic/predict.py b/docs/examples/basic/predict.py --- a/docs/examples/basic/predict.py +++ b/docs/examples/basic/predict.py @@ -12,4 +12,4 @@ result = Predictor(name='home_rentals_price').predict(when={'number_of_rooms': 2,'number_of_bathrooms':1, 'sqft': 1190}) # you can now print the results -print('The predicted price is ${price} with {conf} confidence'.format(price=result.predicted_values[0]['rental_price'], conf=result.predicted_values[0]['prediction_confidence'])) +print('The predicted price is ${price} with {conf} confidence'.format(price=result[0]['rental_price'], conf=result[0]['rental_price_confidence'])) diff --git a/docs/examples/nlp/predict.py b/docs/examples/nlp/predict.py --- a/docs/examples/nlp/predict.py +++ b/docs/examples/nlp/predict.py @@ -1,13 +1,9 @@ -""" - -""" - from mindsdb import * +mdb = Predictor(name='real_estate_desc') + # Here we use the model to make predictions (NOTE: You need to run train.py first) -result = MindsDB().predict( - predict='number_of_rooms', - model_name='real_estate_desc', +result = mdb.predict( when={ "description": """A true gem rooms: 2 diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -34,8 +34,6 @@ def __init__(self, name, root_folder=CONFIG.MINDSDB_STORAGE_PATH, log_level=CONF self.name = name self.root_folder = root_folder self.uuid = str(uuid.uuid1()) - self.predict_worker = None - # initialize log self.log = MindsdbLogger(log_level=log_level, send_logs=False, log_url=log_server, uuid=self.uuid) @@ -430,21 +428,20 @@ def predict(self, when={}, when_data = None, update_cached_model = False): breakpoint = CONFIG.DEBUG_BREAK_POINT when_ds = None if when_data is None else getDS(when_data) - heavy_transaction_metadata = {} - - heavy_transaction_metadata['name'] = self.name - - if update_cached_model: - self.predict_worker = None # lets turn into lists: when when = [when] if type(when) in [type(None), type({})] else when - heavy_transaction_metadata['when_data'] = when_ds - light_transaction_metadata = {} + heavy_transaction_metadata = {} + if when_ds is None: + heavy_transaction_metadata['when_data'] = None + else: + heavy_transaction_metadata['when_data'] = when_ds + heavy_transaction_metadata['model_when_conditions'] = when + heavy_transaction_metadata['name'] = self.name + light_transaction_metadata = {} light_transaction_metadata['name'] = self.name - light_transaction_metadata['model_when_conditions'] = when light_transaction_metadata['type'] = transaction_type light_transaction_metadata['data_preparation'] = {} diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -155,7 +155,6 @@ def _execute_predict(self): old_hmd = {} for k in self.hmd: old_hmd[k] = self.hmd[k] - with open(CONFIG.MINDSDB_STORAGE_PATH + '/' + self.lmd['name'] + '_light_model_metadata.pickle', 'rb') as fp: self.lmd = pickle.load(fp) @@ -163,10 +162,18 @@ def _execute_predict(self): self.hmd = pickle.load(fp) for k in old_lmd: - if old_lmd[k] is not None: self.lmd[k] = old_lmd[k] + if old_lmd[k] is not None: + self.lmd[k] = old_lmd[k] + else: + if k not in self.lmd: + self.lmd[k] = None for k in old_hmd: - if old_hmd[k] is not None: self.hmd[k] = old_hmd[k] + if old_hmd[k] is not None: + self.hmd[k] = old_hmd[k] + else: + if k not in self.hmd: + self.hmd[k] = None if self.lmd is None: self.log.error('No metadata found for this model') diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -21,7 +21,7 @@ def _get_data_frame_from_when_conditions(self): """ columns = self.transaction.lmd['columns'] - when_conditions = self.transaction.lmd['model_when_conditions'] + when_conditions = self.transaction.hmd['model_when_conditions'] when_conditions_list = [] # here we want to make a list of the type ( ValueForField1, ValueForField2,..., ValueForFieldN ), ... @@ -82,7 +82,7 @@ def _get_prepared_input_df(self): df = self.transaction.hmd['when_data'] df = df.where((pandas.notnull(df)), None) - elif self.transaction.lmd['model_when_conditions'] is not None: + elif self.transaction.hmd['model_when_conditions'] is not None: # if no data frame yet, make one df = self._get_data_frame_from_when_conditions()
diff --git a/docs/examples/basic/test.py b/docs/examples/basic/test.py --- a/docs/examples/basic/test.py +++ b/docs/examples/basic/test.py @@ -6,18 +6,14 @@ from_data="home_rentals.csv", # the path to the file where we can learn from, (note: can be url) to_predict='rental_price', # the column we want to learn to predict given all the data in the file - #sample_margin_of_error=0.02, - stop_training_in_x_seconds=160, - stop_training_in_accuracy=0.95 ) #use the model to make predictions result = mdb.predict( when={"number_of_rooms": 2, "sqft": 1100, 'location': 'great', 'days_on_market': 10, "number_of_bathrooms": 1}) -result[0].explain() print(result[0]['rental_price']) -print(result[0]._predicted_values) +print(result[0]) #3306 (5%) #3837 (37%) #3836 (26%) @@ -29,10 +25,9 @@ when = {"sqft": 700} result = mdb.predict( when=when) -result[0].explain() print(result[0]['rental_price']) #2205 #828 #3306 #3076 -#2677 \ No newline at end of file +#2677
AttributeError on running docs/example/basic/predict.py AttributeError : 'PredictTransactionOutputData' object has no attribute 'predicted_values' Steps to reproduce the behavior: 1. Go to /docs/example/basic/ 2. Run init.py, train.py, test.py and predict.py in order. 3. See error Predicted price for given configuration of house was expected TraceBack : ![mindsdb2](https://user-images.githubusercontent.com/39651310/56485748-ed714f00-64f2-11e9-8f25-35e60197ff6e.png) - OS: [Ubuntu 18.04 ]
Yes, those examples are outdate, was planning to get to them at some point, will try to fix them today.
2019-04-22T08:35:17Z
[]
[]
mindsdb/mindsdb
180
mindsdb__mindsdb-180
[ "183" ]
eb2f686deef01b505298687e28b6fb30d82634ef
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -6,7 +6,7 @@ from scipy.misc import imread import os, sys -from ludwig import LudwigModel +from ludwig.api import LudwigModel import pandas as pd # @TODO: Define generci interface, similar to 'base_module' in the phases @@ -365,39 +365,39 @@ def train(self): if len(timeseries_cols) > 0: training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') - with disable_ludwig_output(): + #with disable_ludwig_output(): - model = LudwigModel(model_definition) + model = LudwigModel(model_definition) - # <---- Ludwig currently broken, since mode can't be initialized without train_set_metadata and train_set_metadata can't be obtained without running train... see this issue for any updates on the matter: https://github.com/uber/ludwig/issues/295 - #model.initialize_model(train_set_metadata={}) - #train_stats = model.train_online(data_df=training_dataframe) # ??Where to add model_name?? ----> model_name=self.transaction.lmd['name'] + # <---- Ludwig currently broken, since mode can't be initialized without train_set_metadata and train_set_metadata can't be obtained without running train... see this issue for any updates on the matter: https://github.com/uber/ludwig/issues/295 + #model.initialize_model(train_set_metadata={}) + #train_stats = model.train_online(data_df=training_dataframe) # ??Where to add model_name?? ----> model_name=self.transaction.lmd['name'] - if self.transaction.lmd['rebuild_model'] is True: - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) + if self.transaction.lmd['rebuild_model'] is True: + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) + else: + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) + #,model_load_path=self.transaction.lmd['ludwig_data']['ludwig_save_path']) + + for k in train_stats['train']: + if k not in self.transaction.lmd['model_accuracy']['train']: + self.transaction.lmd['model_accuracy']['train'][k] = [] + self.transaction.lmd['model_accuracy']['test'][k] = [] + elif k is not 'combined': + # We should be adding the accuracy here but we only have it for combined, so, for now use that, will only affect multi-output scenarios anyway + pass else: - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) - #,model_load_path=self.transaction.lmd['ludwig_data']['ludwig_save_path']) - - for k in train_stats['train']: - if k not in self.transaction.lmd['model_accuracy']['train']: - self.transaction.lmd['model_accuracy']['train'][k] = [] - self.transaction.lmd['model_accuracy']['test'][k] = [] - elif k is not 'combined': - # We should be adding the accuracy here but we only have it for combined, so, for now use that, will only affect multi-output scenarios anyway - pass - else: - self.transaction.lmd['model_accuracy']['train'][k].extend(train_stats['train'][k]['accuracy']) - self.transaction.lmd['model_accuracy']['test'][k].extend(train_stats['test'][k]['accuracy']) - - ''' - @ TRAIN ONLINE BIT That's not working - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - for i in range(0,100): - train_stats = model.train_online(data_df=training_dataframe) - # The resulting train_stats are "None"... wonderful -_- - ''' + self.transaction.lmd['model_accuracy']['train'][k].extend(train_stats['train'][k]['accuracy']) + self.transaction.lmd['model_accuracy']['test'][k].extend(train_stats['test'][k]['accuracy']) + + ''' + @ TRAIN ONLINE BIT That's not working + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) + for i in range(0,100): + train_stats = model.train_online(data_df=training_dataframe) + # The resulting train_stats are "None"... wonderful -_- + ''' ludwig_model_savepath = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, self.transaction.lmd['name'] + '_ludwig_data') diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -121,9 +121,10 @@ def _adapt_column(self, col_stats, col): 'y': [] } icm['data_distribution']['clusters'] = {} - for k, v in col_stats['histogram'].items(): - icm['data_distribution']['data_histogram']['x'].append(k) - icm['data_distribution']['data_histogram']['y'].append(v) + + for i in range(len(col_stats['histogram']['x'])): + icm['data_distribution']['data_histogram']['x'].append(col_stats['histogram']['x'][i]) + icm['data_distribution']['data_histogram']['y'].append(col_stats['histogram']['y'][i]) scores = ['consistency_score', 'redundancy_score', 'variability_score'] for score in scores: diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -902,6 +902,7 @@ def run(self, input_data, modify_light_metadata): if dictionary_lenght_percentage > 10 and len(col_data) > 50 and is_full_text==False: dictionary = [] dictionary_available = False + col_stats = { 'data_type': data_type, 'data_subtype': curr_data_subtype,
diff --git a/integration_testing/run_travis_tests.py b/integration_testing/run_travis_tests.py --- a/integration_testing/run_travis_tests.py +++ b/integration_testing/run_travis_tests.py @@ -111,8 +111,8 @@ def run_tests(): # Print statements are in for debugging, remove later, but keep the funcion calls to make sure the interface is working models = mdb.get_models() - lmd = mdb.get_model_data(models[0]['name']) - #print(lmd) + amd = mdb.get_model_data(models[0]['name']) + print(amd) except: print(traceback.format_exc())
Dependency and Import error **Describe the bug** I am getting this error during installation: >ERROR: requests 2.21.0 has requirement urllib3<1.25,>=1.21.1, but you'll have urllib3 1.25.2 which is incompatible. The installation completes complete but throws an error when importing mindsdb > Python 3.7.3 (default, Mar 26 2019, 21:43:19) >[GCC 8.2.1 20181127] on linux >Type "help", "copyright", "credits" or "license" for more information. >>import mindsdb >/home/user/.venvs/mindsdb/lib/python3.7/site-packages/requests/__init__.py:91: >RequestsDependencyWarning: urllib3 (1.25.2) or chardet (3.0.4) doesn't match a supported >version! RequestsDependencyWarning) >Traceback (most recent call last): > File "<stdin>", line 1, in <module> > File "/home/user/.venvs/mindsdb/lib/python3.7/site-packages/mindsdb/__init__.py", line 9, > in <module> > from mindsdb.libs.controllers.predictor import Predictor > File "/home/user/.venvs/mindsdb/lib/python3.7/site->packages/mindsdb/libs/controllers/predictor.py", line 14, in <module> > from mindsdb.libs.controllers.transaction import Transaction > File "/home/user/.venvs/mindsdb/lib/python3.7/site->packages/mindsdb/libs/controllers/transaction.py", line 7, in <module> > from mindsdb.libs.backends.ludwig import LudwigBackend > File "/home/user/.venvs/mindsdb/lib/python3.7/site->packages/mindsdb/libs/backends/ludwig.py", >line 9, in <module> > from ludwig import LudwigModel > ImportError: cannot import name 'LudwigModel' from 'ludwig' >(/home/user/.venvs/mindsdb/lib/python3.7/site-packages/ludwig/__init__.py) checked on requiremente text for requests and urilib3 in requirment.txt and are met in my machine ![error](https://github.com/koyo-jakanees/codePrac/blob/master/media/images/mindsdb.png?raw=true) **To Reproduce** Steps to reproduce the behavior: 1. set up a venv 2. pip3 install mindsdb --user 3.import mindsdb 3. See error **Expected behavior** import mindsdb to work. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - Manjaro Linux -DISTRIB_ID=ManjaroLinux -DISTRIB_RELEASE=18.0.4 -DISTRIB_CODENAME=Illyria -ID=manjaro -ID_LIKE=arch hp laptop core i3 cpu installation **Additional context** Works fine in conda env, issue arises on using default python env
2019-04-30T17:46:58Z
[]
[]
mindsdb/mindsdb
188
mindsdb__mindsdb-188
[ "190" ]
044a11652c67137be579735f2a1b8f9bd9784f60
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -257,7 +257,10 @@ def _create_ludwig_dataframe(self, mode): elif ludwig_dtype == 'sequence': arr_str = self.transaction.input_data.data_array[row_ind][col_ind] - arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd['column_stats'][col]['separator']))) + if arr_str is not None: + arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd['column_stats'][col]['separator']))) + else: + arr = '' data[col].append(arr) # Date isn't supported yet, so we hack around it @@ -365,41 +368,41 @@ def train(self): if len(timeseries_cols) > 0: training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') - #with disable_ludwig_output(): + with disable_ludwig_output(True): - model = LudwigModel(model_definition) + model = LudwigModel(model_definition) - # <---- Ludwig currently broken, since mode can't be initialized without train_set_metadata and train_set_metadata can't be obtained without running train... see this issue for any updates on the matter: https://github.com/uber/ludwig/issues/295 - #model.initialize_model(train_set_metadata={}) - #train_stats = model.train_online(data_df=training_dataframe) # ??Where to add model_name?? ----> model_name=self.transaction.lmd['name'] + # <---- Ludwig currently broken, since mode can't be initialized without train_set_metadata and train_set_metadata can't be obtained without running train... see this issue for any updates on the matter: https://github.com/uber/ludwig/issues/295 + #model.initialize_model(train_set_metadata={}) + #train_stats = model.train_online(data_df=training_dataframe) # ??Where to add model_name?? ----> model_name=self.transaction.lmd['name'] - if self.transaction.lmd['rebuild_model'] is True: - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) - else: - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) - #,model_load_path=self.transaction.lmd['ludwig_data']['ludwig_save_path']) - - for k in train_stats['train']: - if k not in self.transaction.lmd['model_accuracy']['train']: - self.transaction.lmd['model_accuracy']['train'][k] = [] - self.transaction.lmd['model_accuracy']['test'][k] = [] - elif k is not 'combined': - # We should be adding the accuracy here but we only have it for combined, so, for now use that, will only affect multi-output scenarios anyway - pass + if self.transaction.lmd['rebuild_model'] is True: + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) else: - self.transaction.lmd['model_accuracy']['train'][k].extend(train_stats['train'][k]['accuracy']) - self.transaction.lmd['model_accuracy']['test'][k].extend(train_stats['test'][k]['accuracy']) + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) + #,model_load_path=self.transaction.lmd['ludwig_data']['ludwig_save_path']) + + for k in train_stats['train']: + if k not in self.transaction.lmd['model_accuracy']['train']: + self.transaction.lmd['model_accuracy']['train'][k] = [] + self.transaction.lmd['model_accuracy']['test'][k] = [] + elif k is not 'combined': + # We should be adding the accuracy here but we only have it for combined, so, for now use that, will only affect multi-output scenarios anyway + pass + else: + self.transaction.lmd['model_accuracy']['train'][k].extend(train_stats['train'][k]['accuracy']) + self.transaction.lmd['model_accuracy']['test'][k].extend(train_stats['test'][k]['accuracy']) - ''' - @ TRAIN ONLINE BIT That's not working - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - for i in range(0,100): - train_stats = model.train_online(data_df=training_dataframe) - # The resulting train_stats are "None"... wonderful -_- - ''' + ''' + @ TRAIN ONLINE BIT That's not working + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) + for i in range(0,100): + train_stats = model.train_online(data_df=training_dataframe) + # The resulting train_stats are "None"... wonderful -_- + ''' - ludwig_model_savepath = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, self.transaction.lmd['name'] + '_ludwig_data') + ludwig_model_savepath = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, self.transaction.lmd['name'] + '_ludwig_data') model.save(ludwig_model_savepath) model.close() @@ -409,6 +412,8 @@ def train(self): def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) + print(model_definition) + print(predict_dataframe) model_definition = self.transaction.hmd['ludwig_data']['model_definition'] model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -392,6 +392,7 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No heavy_transaction_metadata['name'] = self.name heavy_transaction_metadata['from_data'] = from_ds heavy_transaction_metadata['test_from_data'] = test_from_ds + heavy_transaction_metadata['bucketing_algorithms'] = {} light_transaction_metadata = {} light_transaction_metadata['version'] = str(__version__) diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -95,7 +95,7 @@ def _execute_learn(self): self.lmd['current_phase'] = MODEL_STATUS_ANALYZING self.lmd['columns'] = self.input_data.columns # this is populated by data extractor - self._call_phase_module('StatsGenerator', input_data=self.input_data, modify_light_metadata=True) + self._call_phase_module('StatsGenerator', input_data=self.input_data, modify_light_metadata=True, hmd=self.hmd) self.lmd['current_phase'] = MODEL_STATUS_TRAINING if self.lmd['model_backend'] == 'ludwig': diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -16,6 +16,8 @@ from mindsdb.config import CONFIG from mindsdb.libs.data_types.mindsdb_logger import log from mindsdb.libs.constants.mindsdb import * +import imagehash +from PIL import Image def get_key_for_val(key, dict_map): @@ -169,7 +171,7 @@ def get_value_bucket(value, buckets, col_stats): """ if buckets is None: return None - + if col_stats['data_subtype'] in (DATA_SUBTYPES.SINGLE, DATA_SUBTYPES.MULTIPLE): if value in buckets: bucket = buckets.index(value) @@ -178,6 +180,9 @@ def get_value_bucket(value, buckets, col_stats): elif col_stats['data_subtype'] in (DATA_SUBTYPES.BINARY, DATA_SUBTYPES.INT, DATA_SUBTYPES.FLOAT): bucket = closest(buckets, value) + elif col_stats['data_subtype'] in (DATA_SUBTYPES.IMAGE): + bucket = self.hmd['bucketing_algorithms'][col_name].predict(np.array(imagehash.phash(Image.open(value)).reshape(1, -1)))[0] + print(bucket) else: bucket = len(buckets) # for null values @@ -233,13 +238,15 @@ def __exit__(self, *_): @contextmanager # @TODO: Make it work with mindsdb logger/log levels... maybe -def disable_ludwig_output(): +def disable_ludwig_output(disable=True): try: try: old_tf_loglevel = os.environ['TF_CPP_MIN_LOG_LEVEL'] except: old_tf_loglevel = '2' - os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + + if not disable: + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Maybe get rid of this to not supress all errors and stdout with suppress_stdout_stderr(): yield diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -11,6 +11,10 @@ from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder from sklearn.metrics import matthews_corrcoef +from sklearn.metrics.pairwise import cosine_similarity +from sklearn.cluster import MiniBatchKMeans +import imagehash +from PIL import Image from mindsdb.config import CONFIG from mindsdb.libs.constants.mindsdb import * @@ -728,7 +732,7 @@ def _log_interesting_stats(self, stats): self.log.infoChart(stats[col_name]['data_subtype_dist'], type='list', uid='Data Type Distribution for column "{}"'.format(col_name)) - def run(self, input_data, modify_light_metadata): + def run(self, input_data, modify_light_metadata, hmd=None): """ # Runs the stats generation phase # This shouldn't alter the columns themselves, but rather provide the `stats` metadata object and update the types for each column @@ -777,7 +781,7 @@ def run(self, input_data, modify_light_metadata): col_data_dict = {} for i, col_name in enumerate(non_null_data): - col_data = non_null_data[col_name] # all rows in just one column + col_data = non_null_data[col_name] full_col_data = all_sampled_data[col_name] data_type, curr_data_subtype, data_type_dist, data_subtype_dist, additional_info, column_status = self._get_column_data_type(col_data, i, input_data.data_array, col_name) @@ -883,6 +887,48 @@ def run(self, input_data, modify_light_metadata): #"percentage_buckets": list(histogram.keys()) } + elif curr_data_subtype == DATA_SUBTYPES.IMAGE: + image_hashes = [] + for img_path in col_data: + img_hash = imagehash.phash(Image.open(img_path)) + seq_hash = [] + for hash_row in img_hash.hash: + seq_hash.extend(hash_row) + + image_hashes.append(np.array(seq_hash)) + + kmeans = MiniBatchKMeans(n_clusters=20, batch_size=round(len(image_hashes)/4)) + + kmeans.fit(image_hashes) + + if hmd is not None: + hmd['bucketing_algorithms'][col_name] = kmeans + + x = [] + y = [0] * len(kmeans.cluster_centers_) + + for cluster in kmeans.cluster_centers_: + similarities = cosine_similarity(image_hashes,kmeans.cluster_centers_) + + similarities = list(map(lambda x: sum(x), similarities)) + + index_of_most_similar = similarities.index(max(similarities)) + x.append(col_data[index_of_most_similar]) + + indices = kmeans.predict(image_hashes) + for index in indices: + y[index] +=1 + + col_stats = { + 'data_type': data_type, + 'data_subtype': curr_data_subtype, + 'percentage_buckets': kmeans.cluster_centers_, + 'histogram': { + 'x': x, + 'y': y + } + } + # @TODO This is probably wrong, look into it a bit later else: # see if its a sentence or a word
diff --git a/integration_testing/image_testing/predict.csv b/integration_testing/image_testing/predict.csv --- a/integration_testing/image_testing/predict.csv +++ b/integration_testing/image_testing/predict.csv @@ -4,3 +4,7 @@ Photo File Path,Photo Name /home/george/photos/DSC_0664-01.jpeg,Me14 /home/george/photos/DSC_0661-01.jpeg,Me15 /home/george/photos/DSC_0660-01.jpeg,Me16 +/home/george/photos/DSC_0626-01.jpeg,Me8,10 +/home/george/photos/DSC_0625-01.jpeg,Me9,9.5 +/home/george/photos/DSC_0622-01.jpeg,Me10,10 +/home/george/photos/DSC_0617-01.jpeg,Me11,9.6 diff --git a/integration_testing/image_testing/train.csv b/integration_testing/image_testing/train.csv --- a/integration_testing/image_testing/train.csv +++ b/integration_testing/image_testing/train.csv @@ -10,3 +10,25 @@ Photo File Path,Photo Name,Score /home/george/photos/DSC_0625-01.jpeg,Me9,9.5 /home/george/photos/DSC_0622-01.jpeg,Me10,10 /home/george/photos/DSC_0617-01.jpeg,Me11,9.6 +/home/george/photos/DSC_0642-01.jpeg,Me1,10 +/home/george/photos/DSC_0637-01.jpeg,Me2,9.9 +/home/george/photos/DSC_0636-01.jpeg,Me3,9.8 +/home/george/photos/DSC_0635-01.jpeg,Me4,9.95 +/home/george/photos/DSC_0634-01.jpeg,Me5,10 +/home/george/photos/DSC_0627-01.jpeg,Me6,9.993 +/home/george/photos/DSC_0626-02.jpeg,Me7,9.78 +/home/george/photos/DSC_0626-01.jpeg,Me8,10 +/home/george/photos/DSC_0625-01.jpeg,Me9,9.5 +/home/george/photos/DSC_0622-01.jpeg,Me10,10 +/home/george/photos/DSC_0617-01.jpeg,Me11,9.6 +/home/george/photos/DSC_0642-01.jpeg,Me1,10 +/home/george/photos/DSC_0637-01.jpeg,Me2,9.9 +/home/george/photos/DSC_0636-01.jpeg,Me3,9.8 +/home/george/photos/DSC_0635-01.jpeg,Me4,9.95 +/home/george/photos/DSC_0634-01.jpeg,Me5,10 +/home/george/photos/DSC_0627-01.jpeg,Me6,9.993 +/home/george/photos/DSC_0626-02.jpeg,Me7,9.78 +/home/george/photos/DSC_0626-01.jpeg,Me8,10 +/home/george/photos/DSC_0625-01.jpeg,Me9,9.5 +/home/george/photos/DSC_0622-01.jpeg,Me10,10 +/home/george/photos/DSC_0617-01.jpeg,Me11,9.6
AttributeError during prediction step **Describe the bug** After running the training step in the suicide_rate data (see #182), I run the `predict.py` script, which was the following: ``` from mindsdb import Predictor # use the model to make predictions result = Predictor(name='suicide_rates').predict(when={'country':'Greece','year':1981,'sex':'male','age':'35-54','population':300000}) # you can now print the results print(result) ``` Running the above resulted the following error: ``` WARNING:mindsdb-logger-core-logger:libs/phases/base_module.py:57 - 'Target phase is different than PHASE_END, Only change this for debug purposes' INFO:mindsdb-logger-core-logger:libs/phases/base_module.py:63 - '[START] DataExtractor' INFO:mindsdb-logger-core-logger:libs/phases/base_module.py:76 - '[END] DataExtractor, execution time: 0.190 seconds' Traceback (most recent call last): File "predict.py", line 12, in <module> result = Predictor(name='suicide_rates').predict(when={'country':'Greece','year':1981,'sex':'male','age':'35-54','population':300000}) File "/home/milia/.venvs/mindsdb/lib/python3.6/site-packages/mindsdb/libs/controllers/predictor.py", line 472, in predict transaction = Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, breakpoint=breakpoint) File "/home/milia/.venvs/mindsdb/lib/python3.6/site-packages/mindsdb/libs/controllers/transaction.py", line 53, in __init__ self.run() File "/home/milia/.venvs/mindsdb/lib/python3.6/site-packages/mindsdb/libs/controllers/transaction.py", line 259, in run self._execute_predict() File "/home/milia/.venvs/mindsdb/lib/python3.6/site-packages/mindsdb/libs/controllers/transaction.py", line 191, in _execute_predict predictions = self.model_backend.predict() File "/home/milia/.venvs/mindsdb/lib/python3.6/site-packages/mindsdb/libs/backends/ludwig.py", line 411, in predict predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) File "/home/milia/.venvs/mindsdb/lib/python3.6/site-packages/mindsdb/libs/backends/ludwig.py", line 260, in _create_ludwig_dataframe arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd['column_stats'][col]['separator']))) AttributeError: 'NoneType' object has no attribute 'rstrip' ``` **To Reproduce** Steps to reproduce the behavior: 1. Run `train.py` (see #182). 2. Run `predict.py`. 3. See error **Expected behavior** I expected to see the prediction results. **Desktop (please complete the following information):** - OS: Ubuntu 18.04.2 LTS - python 3.6.7 - pip 19.1 - mindsdb 1.0.5 - virtualenv 15.1.0 - tensorflow 1.13.1 - ludwig 0.1.1
2019-05-02T19:27:02Z
[]
[]
mindsdb/mindsdb
206
mindsdb__mindsdb-206
[ "192" ]
1ac86b3a27d2f32d67a6b098aa9b9ea26df8993a
diff --git a/docs/examples/basic/train.py b/docs/examples/basic/train.py --- a/docs/examples/basic/train.py +++ b/docs/examples/basic/train.py @@ -6,7 +6,7 @@ - learn a model to predict the best retal price for a given property. -In order to to this we have a dataset "data_sources/home_rentals.csv" +In order to to this we have a dataset "data_sources/home_rentals.csv" (or download from https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv) """ @@ -18,4 +18,3 @@ to_predict='rental_price', # the column we want to learn to predict given all the data in the file from_data="home_rentals.csv" # the path to the file where we can learn from, (note: can be url) ) - diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -265,7 +265,7 @@ def test(): mdb.learn( - from_data="https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv", + from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", # the path to the file where we can learn from, (note: can be url) to_predict='rental_price', # the column we want to learn to predict given all the data in the file sample_margin_of_error=0.02 diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -78,7 +78,7 @@ def test(): mdb = Predictor(name='home_rentals') mdb.learn( - from_data="https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv", + from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", # the path to the file where we can learn from, (note: can be url) to_predict='rental_price', # the column we want to learn to predict given all the data in the file #sample_margin_of_error=0.02, diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -1014,7 +1014,7 @@ def test(): mdb = Predictor(name='home_rentals') mdb.learn( - from_data="https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv", + from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", # the path to the file where we can learn from, (note: can be url) to_predict='rental_price', # the column we want to learn to predict given all the data in the file sample_margin_of_error=0.02
diff --git a/test.py b/test.py --- a/test.py +++ b/test.py @@ -4,6 +4,6 @@ # We tell mindsDB what we want to learn and from what data Predictor(name='home_rentals_price').learn( to_predict='rental_price', # the column we want to learn to predict given all the data in the file - from_data="https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv" + from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv" # the path to the file where we can learn from, (note: can be url) )
Maximum retry error Cannot access the CSV file for training. It says maximum retry exceeded with URL: /mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv, even though not more than 2 requests were made. This popped up the first time. Steps to reproduce the behavior: As mentioned in the docs, `import mindsdb mdb = mindsdb.Predictor(name='real_estate_model') mdb.learn( from_data="https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv" to_predict='rental_price')` Expected behavior: The model should have been trained from given csv Screenshots: ![Screenshot from 2019-05-06 00-04-23](https://user-images.githubusercontent.com/34389951/57198628-8d6ab600-6f92-11e9-92dc-483f44a3ec07.png) Desktop: - OS: Ubuntu 16.04
Hi, this issue seems to be with the given url: https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv However, I can use it just fine, and the issue seems to be related with the certificate. Do you use a weird list of certificates on your machine ? What happens with you run `wget 'https://raw.githubusercontent.com/mindsdb/mindsdb/master/docs/examples/basic/home_rentals.csv'` ? or `wget 'https://github.com'` ? The issue might have been temporary, just a coincidence (I remember some news about an important root cert becoming invalid for a short while a few days ago). Could you re-try the same code again now, just to make sure it wasn't that ? One way or another, the issue is not mindsdb related, hence why I marked it as `invalid`, but I'd like to help you get the library running. If you just want to try mindsdb and don't care about using that particular URL feel free to just `mindsdb/docs/examples/basic/home_rentals.csv`, which is the exact same file (download manually from the browser or from your local mindsdb directory if you installed it via cloning it from git) I've checked this locally and it works great. @prayanshratan Can you try and run it again, maybe it was network error on your side. @George3d6 I think now downloading raw content from Github is going to start giving issues because we will need to provide a key. ## solution Please, move all example files to an s3 bucket and make it public and update the examples
2019-05-13T13:32:56Z
[]
[]
mindsdb/mindsdb
209
mindsdb__mindsdb-209
[ "186" ]
636f46079d03359fd728f8fb197bf0bb441ac546
diff --git a/__init__.py b/__init__.py old mode 100644 new mode 100755 diff --git a/docs/conf.py b/docs/conf.py old mode 100644 new mode 100755 diff --git a/docs/examples/basic/predict.py b/docs/examples/basic/predict.py old mode 100644 new mode 100755 diff --git a/docs/examples/basic/train.py b/docs/examples/basic/train.py old mode 100644 new mode 100755 diff --git a/docs/examples/nlp/__init__.py b/docs/examples/nlp/__init__.py old mode 100644 new mode 100755 diff --git a/docs/examples/nlp/predict.py b/docs/examples/nlp/predict.py old mode 100644 new mode 100755 diff --git a/docs/examples/nlp/train.py b/docs/examples/nlp/train.py old mode 100644 new mode 100755 diff --git a/docs/examples/time_series/__init__.py b/docs/examples/time_series/__init__.py old mode 100644 new mode 100755 diff --git a/docs/examples/time_series/predict.py b/docs/examples/time_series/predict.py old mode 100644 new mode 100755 diff --git a/docs/examples/time_series/train.py b/docs/examples/time_series/train.py old mode 100644 new mode 100755 diff --git a/docs/video_docs/learn.py b/docs/video_docs/learn.py old mode 100644 new mode 100755 diff --git a/docs/video_docs/predict.py b/docs/video_docs/predict.py old mode 100644 new mode 100755 diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py old mode 100644 new mode 100755 diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/config/__init__.py b/mindsdb/config/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/config/helpers.py b/mindsdb/config/helpers.py old mode 100644 new mode 100755 diff --git a/mindsdb/external_libs/__init__.py b/mindsdb/external_libs/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/external_libs/mysql_scramble.py b/mindsdb/external_libs/mysql_scramble.py old mode 100644 new mode 100755 diff --git a/mindsdb/external_libs/stats.py b/mindsdb/external_libs/stats.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/__init__.py b/mindsdb/libs/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/backends/__init__.py b/mindsdb/libs/backends/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/constants/__init__.py b/mindsdb/libs/constants/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/constants/mindsdb.py b/mindsdb/libs/constants/mindsdb.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/controllers/__init__.py b/mindsdb/libs/controllers/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_sources/__init__.py b/mindsdb/libs/data_sources/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_sources/file_ds.py b/mindsdb/libs/data_sources/file_ds.py old mode 100644 new mode 100755 --- a/mindsdb/libs/data_sources/file_ds.py +++ b/mindsdb/libs/data_sources/file_ds.py @@ -6,6 +6,7 @@ import codecs import json import traceback +import codecs from mindsdb.libs.data_types.data_source import DataSource from pandas.io.json import json_normalize @@ -28,6 +29,9 @@ def clean(self, header): col = re.sub('_+','_',col) if col[-1] == '_': col = col[:-1] + while col[0] == '_': + col = col[1:] + col_count[col] = 1 if col not in col_count else col_count[col]+1 if col_count[col] > 1: col = col+'_'+str(col_count[col]) @@ -121,7 +125,12 @@ def _getDataIo(self, file): byte_str = data.read() # Move it to StringIO try: - data = StringIO(byte_str.decode('UTF-8')) + # Handle Microsoft's BOM "special" UTF-8 encoding + if byte_str.startswith(codecs.BOM_UTF8): + data = StringIO(byte_str.decode('utf-8-sig')) + else: + data = StringIO(byte_str.decode('utf-8')) + except: log.error(traceback.format_exc()) log.error('Could not load into string') diff --git a/mindsdb/libs/data_types/__init__.py b/mindsdb/libs/data_types/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/data_source.py b/mindsdb/libs/data_types/data_source.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/light_metadata.py b/mindsdb/libs/data_types/light_metadata.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/mindsdb_logger.py b/mindsdb/libs/data_types/mindsdb_logger.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/probability_evaluation.py b/mindsdb/libs/data_types/probability_evaluation.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/transaction_data.py b/mindsdb/libs/data_types/transaction_data.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/transaction_output_data.py b/mindsdb/libs/data_types/transaction_output_data.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/data_types/transaction_output_row.py b/mindsdb/libs/data_types/transaction_output_row.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/__init__.py b/mindsdb/libs/helpers/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/file_helpers.py b/mindsdb/libs/helpers/file_helpers.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/multi_data_source.py b/mindsdb/libs/helpers/multi_data_source.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/parser.py b/mindsdb/libs/helpers/parser.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/sqlite_helpers.py b/mindsdb/libs/helpers/sqlite_helpers.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/text_helpers.py b/mindsdb/libs/helpers/text_helpers.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/helpers/train_helpers.py b/mindsdb/libs/helpers/train_helpers.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/model_examination/__init__.py b/mindsdb/libs/model_examination/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/model_examination/column_evaluator.py b/mindsdb/libs/model_examination/column_evaluator.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/model_examination/probabilistic_validator.py b/mindsdb/libs/model_examination/probabilistic_validator.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/__init__.py b/mindsdb/libs/phases/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/base_module.py b/mindsdb/libs/phases/base_module.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/data_extractor/__init__.py b/mindsdb/libs/phases/data_extractor/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/model_analyzer/__init__.py b/mindsdb/libs/phases/model_analyzer/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/stats_generator/__init__.py b/mindsdb/libs/phases/stats_generator/__init__.py old mode 100644 new mode 100755 diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py old mode 100644 new mode 100755 diff --git a/mindsdb/scraps.py b/mindsdb/scraps.py old mode 100644 new mode 100755 diff --git a/setup.py b/setup.py old mode 100644 new mode 100755
diff --git a/docs/examples/basic/test.py b/docs/examples/basic/test.py old mode 100644 new mode 100755 diff --git a/integration_testing/data_generators.py b/integration_testing/data_generators.py old mode 100644 new mode 100755 diff --git a/integration_testing/files_that_fail/README.md b/integration_testing/files_that_fail/README.md old mode 100644 new mode 100755 diff --git a/integration_testing/files_that_fail/fail_2019_01_01_14_24.csv b/integration_testing/files_that_fail/fail_2019_01_01_14_24.csv old mode 100644 new mode 100755 diff --git a/integration_testing/files_that_fail/fail_2019_01_01_14_28.csv b/integration_testing/files_that_fail/fail_2019_01_01_14_28.csv old mode 100644 new mode 100755 diff --git a/integration_testing/image_testing/main.py b/integration_testing/image_testing/main.py old mode 100644 new mode 100755 diff --git a/integration_testing/image_testing/predict.csv b/integration_testing/image_testing/predict.csv old mode 100644 new mode 100755 diff --git a/integration_testing/image_testing/train.csv b/integration_testing/image_testing/train.csv old mode 100644 new mode 100755 diff --git a/integration_testing/run_a_file.py b/integration_testing/run_a_file.py old mode 100644 new mode 100755 --- a/integration_testing/run_a_file.py +++ b/integration_testing/run_a_file.py @@ -1,20 +1,11 @@ from mindsdb import Predictor -mdb = Predictor(name='marvel') +mdb = Predictor(name='suicide_model') +mdb.learn(from_data="integration_testing/suicide.csv", to_predict='suicides_no') -mdb.learn(from_data="marvel-wikia.xlsx", to_predict='FIRST_APPEARANCE') +# use the model to make predictions +result = Predictor(name='suicide_rates').predict(when={'country':'Greece','year':1981,'sex':'male','age':'35-54','population':300000}) -print('------------------------------------------------------------Done training------------------------------------------------------------') -""" -predicted = mdb.predict(when={ - 'Date':'11/03/2020', - 'Time':'18.00.00', - 'NMHC_GT': 1360.0, - 'AH': 0.655 -}) -print('------------------------------------------------------------Preidiction output------------------------------------------------------------') -for val in predicted: - print(val['CO_GT']) - print(val['CO_GT_confidence']) -""" +# you can now print the results +print(result) diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py old mode 100644 new mode 100755 diff --git a/integration_testing/run_travis_tests.py b/integration_testing/run_travis_tests.py old mode 100644 new mode 100755 diff --git a/test.json b/test.json old mode 100644 new mode 100755 diff --git a/test.py b/test.py old mode 100644 new mode 100755
Tensorflow issue with column names Training fails for this file: https://www.kaggle.com/russellyates88/suicide-rates-overview-1985-to-2016#master.csv With error: ``` raise ValueError("'%s' is not a valid scope name" % name) ValueError: 'country' is not a valid scope name ``` Spotted when testing issue #182, thought the person reporting had no problem with it, it seems.
2019-05-15T09:23:49Z
[]
[]
mindsdb/mindsdb
211
mindsdb__mindsdb-211
[ "187" ]
6ed850fdfe195570e4a6dd0d04cf6149458b797a
diff --git a/mindsdb/libs/constants/mindsdb.py b/mindsdb/libs/constants/mindsdb.py --- a/mindsdb/libs/constants/mindsdb.py +++ b/mindsdb/libs/constants/mindsdb.py @@ -26,7 +26,7 @@ class DATA_SUBTYPES: # CATEGORICAL SINGLE = 'Binary Category' - MULTIPLE = 'Category' # Kind of unclear on the implementation + MULTIPLE = 'Category' # FILE_PATH IMAGE = 'Image' diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -308,9 +308,6 @@ def get_model_data(self, model_name): else: icm['importance_score'] = lmd['column_importances'][col] amd['data_analysis']['input_columns_metadata'].append(icm) - - - # ADAPTOR CODE return amd @@ -507,7 +504,6 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No for k in ['from_data', 'test_from_data']: if old_hmd[k] is not None: heavy_transaction_metadata[k] = old_hmd[k] - Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log, breakpoint=breakpoint) diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -112,11 +112,12 @@ def _get_text_type(self, data): if max_number_of_words < words: max_number_of_words += words - if max_number_of_words == 1: - return DATA_TYPES.CATEGORICAL, DATA_SUBTYPES.SINGLE - if max_number_of_words <= 3 and len(key_count) < total_length * 0.8: - # @TODO This used to be multiple... but, makes no sense for cateogry, should be discussed - return DATA_TYPES.CATEGORICAL, DATA_SUBTYPES.SINGLE + # If all sentences are less than or equal and 3 words, assume it's a category rather than a sentence + if max_number_of_words <= 3: + if len(key_count.keys()) < 3: + return DATA_TYPES.CATEGORICAL, DATA_SUBTYPES.SINGLE + else: + return DATA_TYPES.CATEGORICAL, DATA_SUBTYPES.MULTIPLE else: return DATA_TYPES.SEQUENTIAL, DATA_SUBTYPES.TEXT @@ -226,24 +227,28 @@ def _get_column_data_type(self, data, col_index, data_array, col_name): type_dist[curr_data_type] = type_dist.pop('Unknown') subtype_dist[curr_data_subtype] = subtype_dist.pop('Unknown') - all_values = [] - for row in data_array: - all_values.append(row[col_index]) - - all_distinct_vals = set(all_values) - - # Let's chose so random number - if (len(all_distinct_vals) < len(all_values)/200) or ( (len(all_distinct_vals) < 120) and (len(all_distinct_vals) < len(all_values)/6) ): - curr_data_type = DATA_TYPES.CATEGORICAL - if len(all_distinct_vals) < 3: - curr_data_subtype = DATA_SUBTYPES.SINGLE - else: - curr_data_subtype = DATA_SUBTYPES.MULTIPLE - type_dist = {} - subtype_dist = {} + # @TODO: Extremely slow for large datasets, make it faster + if curr_data_type != DATA_TYPES.CATEGORICAL: + all_values = [] + for row in data_array: + all_values.append(row[col_index]) + + all_distinct_vals = set(all_values) + + # The numbers here are picked randomly, the gist of it is that if values repeat themselves a lot we should consider the column to be categorical + nr_vals = len(all_values) + nr_distinct_vals = len(all_distinct_vals) + if nr_vals/15 > nr_distinct_vals: + curr_data_type = DATA_TYPES.CATEGORICAL + if len(all_distinct_vals) < 3: + curr_data_subtype = DATA_SUBTYPES.SINGLE + else: + curr_data_subtype = DATA_SUBTYPES.MULTIPLE + type_dist = {} + subtype_dist = {} - type_dist[curr_data_type] = len(data) - subtype_dist[curr_data_subtype] = len(data) + type_dist[curr_data_type] = len(data) + subtype_dist[curr_data_subtype] = len(data) return curr_data_type, curr_data_subtype, type_dist, subtype_dist, additional_info, 'Column ok'
diff --git a/integration_testing/run_travis_tests.py b/integration_testing/run_travis_tests.py --- a/integration_testing/run_travis_tests.py +++ b/integration_testing/run_travis_tests.py @@ -111,7 +111,7 @@ def run_tests(): # Print statements are in for debugging, remove later, but keep the funcion calls to make sure the interface is working models = mdb.get_models() - amd = mdb.get_model_data(models[0]['name']) + amd = mdb.get_model_data('test_one_label_prediction') print(amd) except:
Categorical value get interpreted as texts Some of the categorical values from the kaggel suicide dataset get interpreted as `text` rather than `categorical`. This might have to do with the stats generator sample size being too small. See #182 in order to see how this can be replicated.
2019-05-15T17:04:07Z
[]
[]
mindsdb/mindsdb
218
mindsdb__mindsdb-218
[ "208" ]
6ed850fdfe195570e4a6dd0d04cf6149458b797a
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -3,11 +3,15 @@ from mindsdb.libs.helpers.general_helpers import disable_ludwig_output from dateutil.parser import parse as parse_datetime -from scipy.misc import imread import os, sys +import shutil +from tensorflow.python.client import device_lib from ludwig.api import LudwigModel +from ludwig.data.preprocessing import build_metadata import pandas as pd +from scipy.misc import imread + # @TODO: Define generci interface, similar to 'base_module' in the phases class LudwigBackend(): @@ -341,6 +345,7 @@ def _create_ludwig_dataframe(self, mode): ,'width': width ,'resize_image': True ,'resize_method': 'crop_or_pad' + ,'num_channels': 3 } } @@ -358,6 +363,25 @@ def _create_ludwig_dataframe(self, mode): return df, model_definition + def get_model_dir(self): + model_dir = None + for thing in os.listdir(self.transaction.lmd['ludwig_data']['ludwig_save_path']): + if 'api_experiment' in thing: + model_dir = os.path.join(self.transaction.lmd['ludwig_data']['ludwig_save_path'],thing,'model') + if model_dir is None: + model_dir = os.path.join(self.transaction.lmd['ludwig_data']['ludwig_save_path'],'model') + return model_dir + + def get_useable_gpus(self): + local_device_protos = device_lib.list_local_devices() + gpus = [x for x in local_device_protos if x.device_type == 'GPU'] + #bus_ids = [x.locality.bus_id for x in gpus] + gpu_indices = [i for i in range(len(gpus))] + if len(gpu_indices) == 0: + return None + else: + return gpu_indices + def train(self): training_dataframe, model_definition = self._create_ludwig_dataframe('train') if self.transaction.lmd['model_order_by'] is None: @@ -369,19 +393,30 @@ def train(self): training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') with disable_ludwig_output(True): - - model = LudwigModel(model_definition) - # <---- Ludwig currently broken, since mode can't be initialized without train_set_metadata and train_set_metadata can't be obtained without running train... see this issue for any updates on the matter: https://github.com/uber/ludwig/issues/295 #model.initialize_model(train_set_metadata={}) #train_stats = model.train_online(data_df=training_dataframe) # ??Where to add model_name?? ----> model_name=self.transaction.lmd['name'] + ludwig_save_is_working = False + + if not ludwig_save_is_working: + shutil.rmtree('results',ignore_errors=True) + if self.transaction.lmd['rebuild_model'] is True: - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) + model = LudwigModel(model_definition) + merged_model_definition = model.model_definition + train_set_metadata = build_metadata( + training_dataframe, + (merged_model_definition['input_features'] + + merged_model_definition['output_features']), + merged_model_definition['preprocessing'] + ) + model.initialize_model(train_set_metadata=train_set_metadata, gpus=self.get_useable_gpus()) + + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=ludwig_save_is_working, skip_save_progress=True, gpus=self.get_useable_gpus()) else: - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=True) - #,model_load_path=self.transaction.lmd['ludwig_data']['ludwig_save_path']) + model = LudwigModel.load(model_dir=self.get_model_dir()) + train_stats = model.train(data_df=training_dataframe, model_name=self.transaction.lmd['name'], skip_save_model=ludwig_save_is_working, skip_save_progress=True, gpus=self.get_useable_gpus()) for k in train_stats['train']: if k not in self.transaction.lmd['model_accuracy']['train']: @@ -394,19 +429,21 @@ def train(self): self.transaction.lmd['model_accuracy']['train'][k].extend(train_stats['train'][k]['accuracy']) self.transaction.lmd['model_accuracy']['test'][k].extend(train_stats['test'][k]['accuracy']) - ''' - @ TRAIN ONLINE BIT That's not working - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - for i in range(0,100): - train_stats = model.train_online(data_df=training_dataframe) - # The resulting train_stats are "None"... wonderful -_- - ''' - - ludwig_model_savepath = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, self.transaction.lmd['name'] + '_ludwig_data') - - model.save(ludwig_model_savepath) - model.close() - + ''' + @ TRAIN ONLINE BIT That's not working + model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) + for i in range(0,100): + train_stats = model.train_online(data_df=training_dataframe) + # The resulting train_stats are "None"... wonderful -_- + ''' + + ludwig_model_savepath = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, self.transaction.lmd['name'] + '_ludwig_data') + if ludwig_save_is_working: + model.save(ludwig_model_savepath) + model.close() + else: + shutil.rmtree(ludwig_model_savepath,ignore_errors=True) + shutil.move(os.path.join('results',os.listdir('results')[0]),ludwig_model_savepath) self.transaction.lmd['ludwig_data'] = {'ludwig_save_path': ludwig_model_savepath} self.transaction.hmd['ludwig_data'] = {'model_definition': model_definition} @@ -414,8 +451,6 @@ def predict(self, mode='predict', ignore_columns=[]): predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) model_definition = self.transaction.hmd['ludwig_data']['model_definition'] - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - if self.transaction.lmd['model_order_by'] is None: timeseries_cols = [] else: @@ -431,9 +466,10 @@ def predict(self, mode='predict', ignore_columns=[]): for date_appendage in ['_year', '_month','_day']: predict_dataframe[ignore_col + date_appendage] = [None] * len(predict_dataframe[ignore_col + date_appendage]) - with disable_ludwig_output(): - model = LudwigModel.load(self.transaction.lmd['ludwig_data']['ludwig_save_path']) - predictions = model.predict(data_df=predict_dataframe) + with disable_ludwig_output(True): + model_dir = self.get_model_dir() + model = LudwigModel.load(model_dir=model_dir) + predictions = model.predict(data_df=predict_dataframe, gpus=self.get_useable_gpus()) for col_name in predictions: col_name_normalized = col_name.replace('_predictions', '') diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -344,6 +344,17 @@ def export_model(self, model_name): for file_name in [model_name + '_heavy_model_metadata.pickle', model_name + '_light_model_metadata.pickle']: full_path = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, file_name) zip_fp.write(full_path, os.path.basename(full_path)) + + # If the backend is ludwig, save the ludwig files + try: + ludwig_model_path = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, model_name + '_ludwig_data') + for root, dirs, files in os.walk(ludwig_model_path): + for file in files: + full_path = os.path.join(root, file) + zip_fp.write(full_path, full_path[len(CONFIG.MINDSDB_STORAGE_PATH):]) + except: + pass + print(f'Exported model to {storage_file}') return True except Exception as e: diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -256,11 +256,11 @@ def disable_ludwig_output(disable=True): old_tf_loglevel = os.environ['TF_CPP_MIN_LOG_LEVEL'] except: old_tf_loglevel = '2' - - if not disable: - os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Maybe get rid of this to not supress all errors and stdout - with suppress_stdout_stderr(): + if disable: + with suppress_stdout_stderr(): + yield + else: yield finally: os.environ['TF_CPP_MIN_LOG_LEVEL'] = old_tf_loglevel diff --git a/mindsdb/libs/model_examination/column_evaluator.py b/mindsdb/libs/model_examination/column_evaluator.py --- a/mindsdb/libs/model_examination/column_evaluator.py +++ b/mindsdb/libs/model_examination/column_evaluator.py @@ -1,6 +1,7 @@ from mindsdb.libs.helpers.general_helpers import evaluate_accuracy, get_value_bucket from mindsdb.libs.phases.stats_generator.stats_generator import StatsGenerator from mindsdb.libs.data_types.transaction_data import TransactionData +from mindsdb.libs.constants.mindsdb import * class ColumnEvaluator(): """ @@ -35,9 +36,14 @@ def get_column_importance(self, model, output_columns, input_columns, full_datas elif 'histogram' in validation_set_output_stats[output_column]: all_columns_prediction_distribution[output_column] = validation_set_output_stats[output_column]['histogram'] + ignorable_input_columns = [] for input_column in input_columns: + if stats[input_column]['data_type'] != DATA_TYPES.FILE_PATH: + ignorable_input_columns.append(input_column) + + for input_column in ignorable_input_columns: # See what happens with the accuracy of the outputs if only this column is present - ignore_columns = [col for col in input_columns if col != input_column ] + ignore_columns = [col for col in ignorable_input_columns if col != input_column] col_only_predictions = model.predict('validate', ignore_columns) col_only_accuracy = evaluate_accuracy(col_only_predictions, full_dataset, stats, output_columns) diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -46,8 +46,13 @@ def run(self): probabilistic_validators[col] = ProbabilisticValidator( col_stats=self.transaction.lmd['column_stats'][col]) + ignorable_input_columns = [] + for input_column in input_columns: + if self.transaction.lmd['column_stats'][input_column]['data_type'] != DATA_TYPES.FILE_PATH: + ignorable_input_columns.append(input_column) + # Run on the validation set multiple times, each time with one of the column blanked out - for column_name in input_columns: + for column_name in ignorable_input_columns: ignore_columns = [] ignore_columns.append(column_name)
diff --git a/integration_testing/image_testing/predict.csv b/integration_testing/image_testing/predict.csv --- a/integration_testing/image_testing/predict.csv +++ b/integration_testing/image_testing/predict.csv @@ -4,7 +4,7 @@ Photo File Path,Photo Name /home/george/photos/DSC_0664-01.jpeg,Me14 /home/george/photos/DSC_0661-01.jpeg,Me15 /home/george/photos/DSC_0660-01.jpeg,Me16 -/home/george/photos/DSC_0626-01.jpeg,Me8,10 -/home/george/photos/DSC_0625-01.jpeg,Me9,9.5 -/home/george/photos/DSC_0622-01.jpeg,Me10,10 -/home/george/photos/DSC_0617-01.jpeg,Me11,9.6 +/home/george/photos/DSC_0626-01.jpeg,Me8 +/home/george/photos/DSC_0625-01.jpeg,Me9 +/home/george/photos/DSC_0622-01.jpeg,Me10 +/home/george/photos/DSC_0617-01.jpeg,Me11
Ludwig GPU support * Make sure that luwig is probably using gpu-based tensorflow if the user's machine has at least one GPU. * Try to support as many type of machines as we can (as in, different version of cudnn and various other shared libs tf-gpu depends upon).
2019-05-17T12:14:45Z
[]
[]
mindsdb/mindsdb
223
mindsdb__mindsdb-223
[ "222" ]
b8068cb3b387afc2f05d6ee18a56f34e022f51be
diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -47,7 +47,7 @@ def _translate_df_to_timeseries_format(self, df, model_definition, timeseries_co for feature_def in model_definition['input_features']: if feature_def['name'] not in self.transaction.lmd['model_group_by'] and feature_def['name'] not in previous_predict_col_names: feature_def['type'] = 'sequence' - if feature_def['name'] not in timeseries_cols: + if feature_def['name'] not in [timeseries_col_name]: other_col_names.append(feature_def['name']) @@ -205,12 +205,12 @@ def _create_ludwig_dataframe(self, mode): if col in timeseries_cols: encoder = 'rnn' - cell_type = 'gru_cudnn' + cell_type = 'rnn' ludwig_dtype = 'order_by_col' if data_subtype in DATA_SUBTYPES.ARRAY: encoder = 'rnn' - cell_type = 'gru_cudnn' + cell_type = 'rnn' ludwig_dtype = 'sequence' elif data_subtype in (DATA_SUBTYPES.INT, DATA_SUBTYPES.FLOAT): @@ -296,6 +296,12 @@ def _create_ludwig_dataframe(self, mode): custom_logic_continue = True + if col in timeseries_cols: + timeseries_cols.remove(col) + timeseries_cols.append(col + '_day') + timeseries_cols.append(col + '_month') + timeseries_cols.append(col + '_year') + elif data_subtype in (DATA_SUBTYPES.TIMESTAMP): if self.transaction.input_data.data_array[row_ind][col_ind] is None: unix_ts = 0 @@ -361,7 +367,7 @@ def _create_ludwig_dataframe(self, mode): if len(timeseries_cols) > 0: df.sort_values(timeseries_cols) - return df, model_definition + return df, model_definition, timeseries_cols def get_model_dir(self): model_dir = None @@ -373,6 +379,8 @@ def get_model_dir(self): return model_dir def get_useable_gpus(self): + if self.transaction.lmd['use_gpu'] == False: + return [] local_device_protos = device_lib.list_local_devices() gpus = [x for x in local_device_protos if x.device_type == 'GPU'] #bus_ids = [x.locality.bus_id for x in gpus] @@ -383,11 +391,7 @@ def get_useable_gpus(self): return gpu_indices def train(self): - training_dataframe, model_definition = self._create_ludwig_dataframe('train') - if self.transaction.lmd['model_order_by'] is None: - timeseries_cols = [] - else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd['model_order_by'])) + training_dataframe, model_definition, timeseries_cols = self._create_ludwig_dataframe('train') if len(timeseries_cols) > 0: training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') @@ -448,14 +452,9 @@ def train(self): self.transaction.hmd['ludwig_data'] = {'model_definition': model_definition} def predict(self, mode='predict', ignore_columns=[]): - predict_dataframe, model_definition = self._create_ludwig_dataframe(mode) + predict_dataframe, model_definition, timeseries_cols = self._create_ludwig_dataframe(mode) model_definition = self.transaction.hmd['ludwig_data']['model_definition'] - if self.transaction.lmd['model_order_by'] is None: - timeseries_cols = [] - else: - timeseries_cols = list(map(lambda x: x[0], self.transaction.lmd['model_order_by'])) - if len(timeseries_cols) > 0: predict_dataframe, model_definition = self._translate_df_to_timeseries_format(predict_dataframe, model_definition, timeseries_cols) diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -400,7 +400,7 @@ def delete_model(self, model_name): def learn(self, to_predict, from_data = None, test_from_data=None, group_by = None, window_size_samples = None, window_size_seconds = None, window_size = None, order_by = [], sample_margin_of_error = CONFIG.DEFAULT_MARGIN_OF_ERROR, ignore_columns = [], rename_strange_columns = False, - stop_training_in_x_seconds = None, stop_training_in_accuracy = None, send_logs=CONFIG.SEND_LOGS, backend='ludwig', rebuild_model=True): + stop_training_in_x_seconds = None, stop_training_in_accuracy = None, send_logs=CONFIG.SEND_LOGS, backend='ludwig', rebuild_model=True, use_gpu=True): """ Tells the mind to learn to predict a column or columns from the data in 'from_data' @@ -498,6 +498,7 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No light_transaction_metadata['unusual_columns_buckets_importances'] = None light_transaction_metadata['columnless_prediction_distribution'] = None light_transaction_metadata['all_columns_prediction_distribution'] = None + light_transaction_metadata['use_gpu'] = use_gpu light_transaction_metadata['malformed_columns'] = {'names': [], 'indices': []} @@ -522,7 +523,7 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log, breakpoint=breakpoint) - def predict(self, when={}, when_data = None, update_cached_model = False): + def predict(self, when={}, when_data = None, update_cached_model = False, use_gpu=True): """ You have a mind trained already and you want to make a prediction @@ -552,6 +553,7 @@ def predict(self, when={}, when_data = None, update_cached_model = False): light_transaction_metadata = {} light_transaction_metadata['name'] = self.name light_transaction_metadata['type'] = transaction_type + light_transaction_metadata['use_gpu'] = use_gpu light_transaction_metadata['data_preparation'] = {} transaction = Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, breakpoint=breakpoint)
diff --git a/integration_testing/run_tests.py b/integration_testing/run_tests.py --- a/integration_testing/run_tests.py +++ b/integration_testing/run_tests.py @@ -45,7 +45,7 @@ def test_timeseries(): logger.info('Starting timeseries test !') ts_hours = 12 separator = ',' - data_len = 1200 + data_len = 600 train_file_name = 'train_data.csv' test_file_name = 'test_data.csv' @@ -54,9 +54,9 @@ def test_timeseries(): try: # add ,'ascii' in the features list to re-implement the group by - features = generate_value_cols(['datetime','int','float', 'ascii'],data_len, separator, ts_hours * 3600) - features[3] = list(map(lambda x: str(x[0]) if len(x) > 0 else 'Nrmm', features[3])) - labels = [generate_labels_1(features, separator)] + features = generate_value_cols(['date','int'],data_len, separator, ts_hours * 3600) + #features[3] = list(map(lambda x: str(x[0]) if len(x) > 0 else 'Nrmm', features[3])) + labels = [generate_labels_2(features, separator)] feature_headers = list(map(lambda col: col[0], features)) label_headers = list(map(lambda col: col[0], labels)) @@ -82,6 +82,7 @@ def test_timeseries(): mdb = mindsdb.Predictor(name='test_date_timeseries') logger.debug(f'Succesfully create mindsdb Predictor') except: + print(traceback.format_exc()) logger.error(f'Failed to create mindsdb Predictor') exit(1) @@ -92,9 +93,10 @@ def test_timeseries(): to_predict=label_headers # timeseries specific argsw ,order_by=feature_headers[0] - ,window_size_seconds=ts_hours* 3600 * 1.5 - #,window_size=6 - ,group_by = feature_headers[3] + #,window_size_seconds=ts_hours* 3600 * 1.5 + ,window_size_samples=6 + #,group_by = feature_headers[3] + ,use_gpu=False ) logger.info(f'--------------- Learning ran succesfully ---------------') except: @@ -112,7 +114,7 @@ def test_timeseries(): exit(1) try: - results = mdb.predict(when_data=test_file_name) + results = mdb.predict(when_data=test_file_name,use_gpu=False) models = mdb.get_models() mdb.get_model_data(models[0]['name']) for row in results: @@ -378,7 +380,7 @@ def test_multilabel_prediction(): setup_testing_logger() -test_one_label_prediction_wo_strings() +#test_one_label_prediction_wo_strings() test_timeseries() -test_multilabel_prediction() -test_one_label_prediction() +#test_multilabel_prediction() +#test_one_label_prediction()
Timeseries order-by not working for date @surendra1472 recently found this bug. When the order-by column is a date, timeseries crashes because the column (order by column is this case) is split into 3 columns for year, month and day, but the sorting is still done on the original column.
2019-05-19T18:36:31Z
[]
[]
mindsdb/mindsdb
304
mindsdb__mindsdb-304
[ "283", "282", "278" ]
fcb3df015eadf188d5c9d4942d1b7b4a9b107e49
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.5.0' +__version__ = '1.5.1' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc' diff --git a/mindsdb/libs/backends/lightwood.py b/mindsdb/libs/backends/lightwood.py --- a/mindsdb/libs/backends/lightwood.py +++ b/mindsdb/libs/backends/lightwood.py @@ -148,7 +148,7 @@ def callback_on_iter(self, epoch, mix_error, test_error, delta_mean, accuracy): self.transaction.log.debug(f'We\'ve reached training epoch nr {epoch} with an r2 score of {value_rounded} on the testing dataset') else: value_pct = round(value * 100,2) - self.transaction.log.debug(f'We\'ve reached training epoch nr {epoch} with an error of {value_pct}% on the testing dataset') + self.transaction.log.debug(f'We\'ve reached training epoch nr {epoch} with an accuracy of {value_pct}% on the testing dataset') def train(self): lightwood.config.config.CONFIG.USE_CUDA = self.transaction.lmd['use_gpu'] diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -206,7 +206,7 @@ def _create_ludwig_dataframe(self, mode): for row_ind in indexes: if ludwig_dtype == 'order_by_col': - ts_data_point = self.transaction.input_data.data_frame[col][row_ind] + ts_data_point = self.transaction.input_data.data_frame[col].iloc[row_ind] try: ts_data_point = float(ts_data_point) @@ -215,7 +215,7 @@ def _create_ludwig_dataframe(self, mode): data[tf_col].append(ts_data_point) elif ludwig_dtype == 'sequence': - arr_str = self.transaction.input_data.data_frame[col][row_ind] + arr_str = self.transaction.input_data.data_frame[col].iloc[row_ind] if arr_str is not None: arr = list(map(float,arr_str.rstrip(']').lstrip('[').split(self.transaction.lmd['column_stats'][col]['separator']))) else: @@ -243,7 +243,7 @@ def _create_ludwig_dataframe(self, mode): ,'type': 'numerical' }) - date = parse_datetime(self.transaction.input_data.data_frame[col][row_ind]) + date = parse_datetime(self.transaction.input_data.data_frame[col].iloc[row_ind]) data[tf_col + '_year'].append(date.year) data[tf_col + '_month'].append(date.month) @@ -258,32 +258,32 @@ def _create_ludwig_dataframe(self, mode): timeseries_cols.append(col + '_year') elif data_subtype in (DATA_SUBTYPES.TIMESTAMP): - if self.transaction.input_data.data_frame[col][row_ind] is None: + if self.transaction.input_data.data_frame[col].iloc[row_ind] is None: unix_ts = 0 else: - unix_ts = parse_datetime(self.transaction.input_data.data_frame[col][row_ind]).timestamp() + unix_ts = parse_datetime(self.transaction.input_data.data_frame[col].iloc[row_ind]).timestamp() data[tf_col].append(unix_ts) elif data_subtype in (DATA_SUBTYPES.FLOAT): - if type(self.transaction.input_data.data_frame[col][row_ind]) == str: - data[tf_col].append(float(str(self.transaction.input_data.data_frame[col][row_ind]).replace(',','.'))) + if type(self.transaction.input_data.data_frame[col].iloc[row_ind]) == str: + data[tf_col].append(float(str(self.transaction.input_data.data_frame[col].iloc[row_ind]).replace(',','.'))) else: - data[tf_col].append(self.transaction.input_data.data_frame[col][row_ind]) + data[tf_col].append(self.transaction.input_data.data_frame[col].iloc[row_ind]) elif data_subtype in (DATA_SUBTYPES.INT): - if type(self.transaction.input_data.data_frame[col][row_ind]) == str: - data[tf_col].append(round(float(str(self.transaction.input_data.data_frame[col][row_ind]).replace(',','.')))) + if type(self.transaction.input_data.data_frame[col].iloc[row_ind]) == str: + data[tf_col].append(round(float(str(self.transaction.input_data.data_frame[col].iloc[row_ind]).replace(',','.')))) else: - data[tf_col].append(self.transaction.input_data.data_frame[col][row_ind]) + data[tf_col].append(self.transaction.input_data.data_frame[col].iloc[row_ind]) elif data_subtype in (DATA_SUBTYPES.IMAGE): - if os.path.isabs(self.transaction.input_data.data_frame[col][row_ind]): - data[tf_col].append(self.transaction.input_data.data_frame[col][row_ind]) + if os.path.isabs(self.transaction.input_data.data_frame[col].iloc[row_ind]): + data[tf_col].append(self.transaction.input_data.data_frame[col].iloc[row_ind]) else: - data[tf_col].append(os.path.join(os.getcwd(), self.transaction.input_data.data_frame[col][row_ind])) + data[tf_col].append(os.path.join(os.getcwd(), self.transaction.input_data.data_frame[col].iloc[row_ind])) else: - data[tf_col].append(self.transaction.input_data.data_frame[col][row_ind]) + data[tf_col].append(self.transaction.input_data.data_frame[col].iloc[row_ind]) if custom_logic_continue: continue @@ -318,7 +318,7 @@ def _create_ludwig_dataframe(self, mode): model_definition['input_features'].append(input_def) else: output_def = { - 'name': col + 'name': tf_col ,'type': ludwig_dtype } model_definition['output_features'].append(output_def) @@ -351,7 +351,7 @@ def _get_useable_gpus(self): return gpu_indices def train(self): - training_dataframe, model_definition, timeseries_cols, has_heavy_data, col_map = self._create_ludwig_dataframe('train') + training_dataframe, model_definition, timeseries_cols, has_heavy_data, self.transaction.lmd['ludwig_tf_self_col_map'] = self._create_ludwig_dataframe('train') if len(timeseries_cols) > 0: training_dataframe, model_definition = self._translate_df_to_timeseries_format(training_dataframe, model_definition, timeseries_cols, 'train') @@ -426,15 +426,15 @@ def train(self): self.transaction.hmd['ludwig_data'] = {'model_definition': model_definition} def predict(self, mode='predict', ignore_columns=[]): - predict_dataframe, model_definition, timeseries_cols, has_heavy_data, col_map = self._create_ludwig_dataframe(mode) + predict_dataframe, model_definition, timeseries_cols, has_heavy_data, _ = self._create_ludwig_dataframe(mode) model_definition = self.transaction.hmd['ludwig_data']['model_definition'] if len(timeseries_cols) > 0: predict_dataframe, model_definition = self._translate_df_to_timeseries_format(predict_dataframe, model_definition, timeseries_cols) for ignore_col in ignore_columns: - for tf_col in col_map: - if ignore_col == col_map[tf_col]: + for tf_col in self.transaction.lmd['ludwig_tf_self_col_map']: + if ignore_col == self.transaction.lmd['ludwig_tf_self_col_map'][tf_col]: ignore_col = tf_col try: predict_dataframe[ignore_col] = [None] * len(predict_dataframe[ignore_col]) @@ -449,8 +449,8 @@ def predict(self, mode='predict', ignore_columns=[]): for col_name in predictions: col_name_normalized = col_name.replace('_predictions', '') - if col_name_normalized in col_map: - col_name_normalized = col_map[col_name_normalized] + if col_name_normalized in self.transaction.lmd['ludwig_tf_self_col_map']: + col_name_normalized = self.transaction.lmd['ludwig_tf_self_col_map'][col_name_normalized] predictions = predictions.rename(columns = {col_name: col_name_normalized}) return predictions diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -499,12 +499,12 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No heavy_transaction_metadata['test_from_data'] = test_from_ds heavy_transaction_metadata['bucketing_algorithms'] = {} heavy_transaction_metadata['predictions'] = None + heavy_transaction_metadata['model_backend'] = backend light_transaction_metadata = {} light_transaction_metadata['version'] = str(__version__) light_transaction_metadata['name'] = self.name light_transaction_metadata['data_preparation'] = {} - light_transaction_metadata['model_backend'] = backend light_transaction_metadata['predict_columns'] = predict_columns light_transaction_metadata['model_columns_map'] = from_ds._col_map light_transaction_metadata['model_group_by'] = group_by @@ -576,7 +576,7 @@ def learn(self, to_predict, from_data = None, test_from_data=None, group_by = No Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log) - def predict(self, when={}, when_data = None, update_cached_model = False, use_gpu=False, unstable_parameters_dict={}): + def predict(self, when={}, when_data = None, update_cached_model = False, use_gpu=False, unstable_parameters_dict={}, backend=None): """ You have a mind trained already and you want to make a prediction @@ -590,7 +590,6 @@ def predict(self, when={}, when_data = None, update_cached_model = False, use_gp transaction_type = TRANSACTION_PREDICT when_ds = None if when_data is None else getDS(when_data) - # lets turn into lists: when when = [when] if type(when) in [type(None), type({})] else when @@ -602,6 +601,9 @@ def predict(self, when={}, when_data = None, update_cached_model = False, use_gp heavy_transaction_metadata['model_when_conditions'] = when heavy_transaction_metadata['name'] = self.name + if backend is not None: + heavy_transaction_metadata['model_backend'] = backend + light_transaction_metadata = {} light_transaction_metadata['name'] = self.name light_transaction_metadata['type'] = transaction_type diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -90,9 +90,12 @@ def save_metadata(self): for k in null_out_fields: save_hmd[k] = None + for k in self.hmd: if k not in null_out_fields: save_hmd[k] = self.hmd[k] + if k == 'model_backend' and type(self.hmd['model_backend']) != type(str()): + save_hmd[k] = None try: with open(fn, 'wb') as fp: diff --git a/mindsdb/libs/phases/data_transformer/data_transformer.py b/mindsdb/libs/phases/data_transformer/data_transformer.py --- a/mindsdb/libs/phases/data_transformer/data_transformer.py +++ b/mindsdb/libs/phases/data_transformer/data_transformer.py @@ -107,7 +107,7 @@ def run(self, input_data, mode=None): if data_type == DATA_TYPES.CATEGORICAL: self._cast_all_data(input_data, column, 'category') - if self.transaction.lmd['model_backend'] == 'lightwood': + if self.transaction.hmd['model_backend'] == 'lightwood': if data_type == DATA_TYPES.DATE: self._aply_to_all_data(input_data, column, self._standardize_datetime) self._aply_to_all_data(input_data, column, self._lightwood_datetime_processing) @@ -130,7 +130,7 @@ def run(self, input_data, mode=None): min_val_occurances = min(occurance_map.values()) sum_val_occurances = sum(occurance_map.values()) - if self.transaction.lmd['model_backend'] in ('lightwood'): + if self.transaction.hmd['model_backend'] in ('lightwood'): lightwood_weight_map = {} for val in occurance_map: lightwood_weight_map[val] = 1 - occurance_map[val]/sum_val_occurances @@ -164,8 +164,9 @@ def run(self, input_data, mode=None): input_data.data_frame = input_data.data_frame.append(copied_rows_test) input_data.test_df = input_data.test_df.append(copied_rows_test) - input_data.data_frame = input_data.data_frame.append(copied_rows_validate) - input_data.validation_df = input_data.validation_df.append(copied_rows_validate) + if len(copied_rows_validate) > 0: + input_data.data_frame = input_data.data_frame.append(copied_rows_validate) + input_data.validation_df = input_data.validation_df.append(copied_rows_validate) copied_rows_train = [] copied_rows_test = [] @@ -179,8 +180,8 @@ def run(self, input_data, mode=None): data_frame_length = data_frame_length + 1 copied_row = valid_rows.iloc[ciclying_map[val]] - self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY].append(data_frame_length) - self.transaction.input_data.train_indexes[KEY_NO_GROUP_BY].append(data_frame_length) + self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY].append(data_frame_length - 1) + self.transaction.input_data.train_indexes[KEY_NO_GROUP_BY].append(data_frame_length - 1) copied_rows_train.append(copied_row) @@ -188,8 +189,8 @@ def run(self, input_data, mode=None): data_frame_length = data_frame_length + 1 copied_row = valid_rows.iloc[ciclying_map[val]] - self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY].append(data_frame_length) - self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY].append(data_frame_length) + self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY].append(data_frame_length - 1) + self.transaction.input_data.test_indexes[KEY_NO_GROUP_BY].append(data_frame_length - 1) copied_rows_test.append(copied_row) @@ -197,8 +198,8 @@ def run(self, input_data, mode=None): data_frame_length = data_frame_length + 1 copied_row = valid_rows.iloc[ciclying_map[val]] - self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY].append(data_frame_length) - self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY].append(data_frame_length) + self.transaction.input_data.all_indexes[KEY_NO_GROUP_BY].append(data_frame_length - 1) + self.transaction.input_data.validation_indexes[KEY_NO_GROUP_BY].append(data_frame_length - 1) copied_rows_validate.append(copied_row) diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -64,12 +64,11 @@ def run(self): # create a vector that has True for each feature that was passed to the model tester and False if it was blanked features_existence = [True if np_col not in ignore_columns else False for np_col in input_columns] - # A separate probabilistic model is trained for each predicted column, we may want to change this in the future, @TODO + pv = {} for pcol in output_columns: for i in range(len(self.transaction.input_data.validation_df[pcol])): probabilistic_validators[pcol].register_observation(features_existence=features_existence, real_value=self.transaction.input_data.validation_df[pcol].iloc[i], predicted_value=ignore_col_predictions[pcol][i], hmd=self.transaction.hmd) probabilistic_validators[pcol].register_observation(features_existence=[True for col in input_columns], real_value=self.transaction.input_data.validation_df[pcol].iloc[i], predicted_value=normal_predictions[pcol][i], hmd=self.transaction.hmd) - self.transaction.lmd['accuracy_histogram'] = {} total_accuracy = 0 diff --git a/mindsdb/libs/phases/model_interface/model_interface.py b/mindsdb/libs/phases/model_interface/model_interface.py --- a/mindsdb/libs/phases/model_interface/model_interface.py +++ b/mindsdb/libs/phases/model_interface/model_interface.py @@ -19,10 +19,15 @@ def run(self, mode='train'): phase_name = PHASE_MODEL_INTERFACE - if self.transaction.lmd['model_backend'] == 'ludwig': + if self.transaction.hmd['model_backend'] == 'ludwig': self.transaction.model_backend = LudwigBackend(self.transaction) - if self.transaction.lmd['model_backend'] == 'lightwood': + elif self.transaction.hmd['model_backend'] == 'lightwood': self.transaction.model_backend = LightwoodBackend(self.transaction) + else: + self.transaction.model_backend = self.transaction.hmd['model_backend'] + + if hasattr(self.transaction.model_backend, 'set_transaction'): + self.transaction.model_backend.set_transaction(self.transaction) if mode == 'train': self.transaction.model_backend.train() diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,11 +32,17 @@ def remove_requirements(requirements, name, replace=None): # OSX specific requirements elif sys_platform == 'darwin': requirements = requirements + requirements = remove_requirements(requirements, 'tensorflow', 'tensorflow == 1.13.1') + requirements = remove_requirements(requirements, 'tensorflow-estimator', 'tensorflow-estimator == 1.13.0') + requirements = remove_requirements(requirements, 'ludwig', 'ludwig == 0.1.2') # Windows specific requirements elif sys_platform in ['win32','cygwin','windows']: requirements = ['cwrap',*requirements] requirements = remove_requirements(requirements, 'tensorflow-estimator') + requirements = remove_requirements(requirements, 'tensorflow', 'tensorflow == 1.13.1') + requirements = remove_requirements(requirements, 'ludwig', 'ludwig == 0.1.2') + requirements = remove_requirements(requirements, 'tensorflow-estimator') requirements = remove_requirements(requirements,'wheel', replace='wheel == 0.26.0') requirements = remove_requirements(requirements,'lightwood', replace='lightwood @ git+https://github.com/mindsdb/lightwood.git@master')
diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -1,11 +1,12 @@ from mindsdb import Predictor import sys +import os def basic_test(backend='ludwig',use_gpu=True,ignore_columns=[], run_extra=False): if run_extra: - mdb = Predictor(name='metapredictor') - mdb.analyse_dataset(from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv") + for py_file in [x for x in os.listdir('../functional_testing') if '.py' in x]: + os.system(f'python3 ../functional_testing/{py_file}') # Create & Learn mdb = Predictor(name='home_rentals_price') diff --git a/tests/functional_testing/analyse_dataset.py b/tests/functional_testing/analyse_dataset.py new file mode 100644 --- /dev/null +++ b/tests/functional_testing/analyse_dataset.py @@ -0,0 +1,5 @@ +from mindsdb import Predictor + + +mdb = Predictor(name='analyse_dataset_test_predictor') +mdb.analyse_dataset(from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv") diff --git a/tests/functional_testing/custom_model.py b/tests/functional_testing/custom_model.py new file mode 100644 --- /dev/null +++ b/tests/functional_testing/custom_model.py @@ -0,0 +1,65 @@ +from mindsdb import Predictor +import pandas as pd +import numpy as np +from sklearn import tree +from sklearn import preprocessing + + +class CustomDTModel(): + def __init__(self): + self.clf = tree.DecisionTreeClassifier() + le = preprocessing.LabelEncoder() + + def set_transaction(self, transaction): + self.transaction = transaction + self.output_columns = self.transaction.lmd['predict_columns'] + self.input_columns = [x for x in self.transaction.lmd['columns'] if x not in self.output_columns] + self.train_df = self.transaction.input_data.train_df + self.test_dt = train_df = self.transaction.input_data.test_df + + + def train(self): + self.le_arr = {} + for col in [*self.output_columns, *self.input_columns]: + self.le_arr[col] = preprocessing.LabelEncoder() + self.le_arr[col].fit(self.transaction.input_data.data_frame[col]) + + X = [] + for col in self.input_columns: + X.append(self.le_arr[col].transform(self.transaction.input_data.train_df[col])) + + X = np.swapaxes(X,1,0) + + # Only works with one output column + Y = self.le_arr[self.output_columns[0]].transform(self.transaction.input_data.train_df[self.output_columns[0]]) + + self.clf.fit(X, Y) + + def predict(self, mode='predict', ignore_columns=[]): + if mode == 'predict': + df = self.transaction.input_data.data_frame + if mode == 'validate': + df = self.transaction.input_data.validation_df + elif mode == 'test': + df = self.transaction.input_data.test_df + + X = [] + for col in self.input_columns: + X.append(self.le_arr[col].transform(df[col])) + + X = np.swapaxes(X,1,0) + + predictions = self.clf.predict(X) + + formated_predictions = {self.output_columns[0]: predictions} + + return formated_predictions + + +predictor = Predictor(name='custom_model_test_predictor') + +dt_model = CustomDTModel() + +predictor.learn(to_predict='rental_price',from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", backend=dt_model) +predictions = predictor.predict(when_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", backend=dt_model) +print(predictions[25]) diff --git a/tests/integration_tests/generated_data_tests.py b/tests/integration_tests/generated_data_tests.py --- a/tests/integration_tests/generated_data_tests.py +++ b/tests/integration_tests/generated_data_tests.py @@ -38,7 +38,7 @@ def setup_testing_logger(): logger.addHandler(handler) logger.setLevel(logging.DEBUG) -def test_timeseries(): +def test_timeseries(backend='lightwood'): logger.info('Starting timeseries test !') ts_hours = 12 separator = ',' @@ -94,7 +94,7 @@ def test_timeseries(): ,window_size=3 #,group_by = feature_headers[3] ,use_gpu=False - ,backend='lightwood' + ,backend=backend ) logger.info(f'--------------- Learning ran succesfully ---------------') except: @@ -135,7 +135,7 @@ def test_timeseries(): logger.info('Timeseries test ran succesfully !') -def test_one_label_prediction(): +def test_one_label_prediction(backend='lightwood'): logger.info('Starting one-label test') separator = ',' train_file_name = 'train_data.csv' @@ -177,7 +177,7 @@ def test_one_label_prediction(): try: - mdb.learn(from_data=train_file_name, to_predict=label_headers) + mdb.learn(from_data=train_file_name, to_predict=label_headers, backend=backend) logger.info(f'--------------- Learning ran succesfully ---------------') except: print(traceback.format_exc()) @@ -213,7 +213,7 @@ def test_one_label_prediction(): logger.info('One-label prediction test ran succesfully !') -def test_one_label_prediction_wo_strings(): +def test_one_label_prediction_wo_strings(backend='lightwood'): logger.info('Starting one-label test') separator = ',' train_file_name = 'train_data.csv' @@ -255,7 +255,7 @@ def test_one_label_prediction_wo_strings(): try: - mdb.learn(from_data=train_file_name, to_predict=label_headers) + mdb.learn(from_data=train_file_name, to_predict=label_headers, backend=backend) logger.info(f'--------------- Learning ran succesfully ---------------') except: print(traceback.format_exc()) @@ -291,7 +291,7 @@ def test_one_label_prediction_wo_strings(): logger.info('One-label prediction test ran succesfully !') -def test_multilabel_prediction(): +def test_multilabel_prediction(backend='lightwood'): logger.info('Starting multilabel prediction test') separator = ',' train_file_name = 'train_data.csv' @@ -336,7 +336,7 @@ def test_multilabel_prediction(): try: - mdb.learn(from_data=train_file_name, to_predict=label_headers) + mdb.learn(from_data=train_file_name, to_predict=label_headers, backend=backend) logger.info(f'--------------- Learning ran succesfully ---------------') except: print(traceback.format_exc()) diff --git a/tests/integration_tests/test_suite.py b/tests/integration_tests/test_suite.py --- a/tests/integration_tests/test_suite.py +++ b/tests/integration_tests/test_suite.py @@ -4,6 +4,13 @@ import os +# Run the generated data tests +for backend in ['ludwig', 'lightwood']: + test_one_label_prediction_wo_strings(backend) + test_timeseries(backend) + test_multilabel_prediction(backend) + test_one_label_prediction(backend) + # Run the CI tests os.system('cd ..; cd ci_tests; python3 full_test.py') @@ -42,11 +49,5 @@ print(f'Got accuracy of {acc} !') print('\n\n\n==================================\n\n\n') -# Run the generated data tests -test_one_label_prediction_wo_strings() -test_timeseries() -test_multilabel_prediction() -test_one_label_prediction() - #with multiprocessing.Pool(max(len(datasets),6)) as pool: # pool.map(run_example,datasets)
Issues with ludwig backend and large numbers The Ludwig backend seems to always return `nan` if feed only numerical values bigger than `2^20` as inputs. We don't want to do normalization inside mindsdb, so not sure how to address this yet, we should look if it's an easy to fix issue in ludwig or if it might have already been fixed. This is a followup to #137. Update ludwig version and see what bugs were fixed Update to 0.2.0 and see what modifications have been made. balance_target_category = True doesn't work for ludwig backend Setting `balance_target_category=True` in the `unstable_parameters_dict` causes ludwig backend to crash, probably due to the way the dataframe is generated in the ludwig backend in mindsdb. ``` ERROR:mindsdb-logger-f7dcccb8-b861-11e9-9f27-fc7774b6bf48:libs/controllers/transaction.py:105 - Could not load module ModelInterface ERROR:mindsdb-logger-f7dcccb8-b861-11e9-9f27-fc7774b6bf48:libs/controllers/transaction.py:106 - Traceback (most recent call last): File "/home/george/mindsdb/mindsdb/libs/controllers/transaction.py", line 102, in _call_phase_module return module(self.session, self)(**kwargs) File "/home/george/mindsdb/mindsdb/libs/phases/base_module.py", line 54, in __call__ ret = self.run(**kwargs) File "/home/george/mindsdb/mindsdb/libs/phases/model_interface/model_interface.py", line 28, in run self.transaction.model_backend.train() File "/home/george/mindsdb/mindsdb/libs/backends/ludwig.py", line 354, in train training_dataframe, model_definition, timeseries_cols, has_heavy_data, col_map = self._create_ludwig_dataframe('train') File "/home/george/mindsdb/mindsdb/libs/backends/ludwig.py", line 275, in _create_ludwig_dataframe if type(self.transaction.input_data.data_frame[col][row_ind]) == str: File "/usr/lib/python3.7/site-packages/pandas/core/series.py", line 868, in __getitem__ result = self.index.get_value(self, key) File "/usr/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 4375, in get_value tz=getattr(series.dtype, 'tz', None)) File "pandas/_libs/index.pyx", line 81, in pandas._libs.index.IndexEngine.get_value File "pandas/_libs/index.pyx", line 89, in pandas._libs.index.IndexEngine.get_value File "pandas/_libs/index.pyx", line 127, in pandas._libs.index.IndexEngine.get_loc File "pandas/_libs/index.pyx", line 153, in pandas._libs.index.IndexEngine._get_loc_duplicates File "pandas/_libs/index_class_helper.pxi", line 122, in pandas._libs.index.Int64Engine._maybe_get_bool_indexer KeyError: 3500 ```
2019-09-19T16:55:11Z
[]
[]
mindsdb/mindsdb
319
mindsdb__mindsdb-319
[ "225" ]
19081936bac555a8e16d7a2db51ebf909f1590f7
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.7.0' +__version__ = '1.7.1' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc' diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -4,6 +4,7 @@ import uuid import traceback import pickle +import shutil from mindsdb.libs.data_types.mindsdb_logger import MindsdbLogger from mindsdb.libs.helpers.multi_data_source import getDS @@ -404,6 +405,69 @@ def load_model(self, model_archive_path=None): """ self.load(model_archive_path) + def rename_model(self, old_model_name, new_model_name): + """ + If you want to export a model to a file + + :param old_model_name: this is the name of the model you wish to rename + :param new_model_name: this is the new name of the model + :return: bool (True/False) True if mind was exported successfully + """ + + if old_model_name == new_model_name: + return True + + moved_a_backend = False + for extension in ['_lightwood_data', '_ludwig_data']: + try: + shutil.move(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, old_model_name + extension), os.path.join(CONFIG.MINDSDB_STORAGE_PATH, new_model_name + extension)) + moved_a_backend = True + except: + pass + + if not moved_a_backend: + return False + + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, old_model_name + '_light_model_metadata.pickle'), 'rb') as fp: + lmd =pickle.load(fp) + + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, old_model_name + '_heavy_model_metadata.pickle'), 'rb') as fp: + hmd =pickle.load(fp) + + lmd['name'] = new_model_name + hmd['name'] = new_model_name + + renamed_one_backend = False + try: + lmd['ludwig_data']['ludwig_save_path'] = lmd['ludwig_data']['ludwig_save_path'].replace(old_model_name, new_model_name) + renamed_one_backend = True + except: + pass + + try: + lmd['lightwood_data']['save_path'] = lmd['lightwood_data']['save_path'].replace(old_model_name, new_model_name) + renamed_one_backend = True + except: + pass + + if not renamed_one_backend: + return False + + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, new_model_name + '_light_model_metadata.pickle'), 'wb') as fp: + pickle.dump(lmd, fp,protocol=pickle.HIGHEST_PROTOCOL) + + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, new_model_name + '_heavy_model_metadata.pickle'), 'wb') as fp: + pickle.dump(hmd, fp,protocol=pickle.HIGHEST_PROTOCOL) + + + + + + os.remove(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, old_model_name + '_light_model_metadata.pickle')) + os.remove(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, old_model_name + '_heavy_model_metadata.pickle')) + return True + + def delete_model(self, model_name): """ If you want to export a model to a file diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -270,9 +270,6 @@ def run(self): return if self.lmd['type'] == TRANSACTION_LEARN: - self.output_data.data_frame = [['Model ' + self.lmd['name'] + ' training.']] - self.output_data.columns = ['Status'] - if CONFIG.EXEC_LEARN_IN_THREAD == False: self._execute_learn() else:
diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -13,7 +13,12 @@ def basic_test(backend='ludwig',use_gpu=True,ignore_columns=[], run_extra=False) mdb.learn(to_predict='rental_price',from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=20,use_gpu=use_gpu) # Reload & Predict - mdb = Predictor(name='home_rentals_price') + model_name = 'home_rentals_price' + if run_extra: + mdb.rename_model('home_rentals_price', 'home_rentals_price_renamed') + model_name = 'home_rentals_price_renamed' + + mdb = Predictor(name=model_name) prediction = mdb.predict(when={'sqft':300}, use_gpu=use_gpu) # Test all different forms of output @@ -41,6 +46,6 @@ def basic_test(backend='ludwig',use_gpu=True,ignore_columns=[], run_extra=False) print('\n\n') # See if we can get the adapted metadata - amd = mdb.get_model_data('home_rentals_price') + amd = mdb.get_model_data(model_name) # Make some simple assertions about it assert(5 < len(list(amd.keys())))
Rename functionality to the predictor interface Add a `rename` function to the predictor interface, it should take two arguments: `new_name` and `current_name` `current_name` should default to `self.name` if not provided. This required changing the model name in a few places through the light and heavy metadata as well as modifying the underlying metadata and learning backend files.
Why the ```current_name``` argument is needed in a ```rename``` function? In case you fish to rename a predictor other than the one being used right now... it could also be designed to just have a single argument and take the current name to always be `self.name`. The design with two arguments was so because at some point I wanted to make all these 'administrative' functions separate from the predictor, and thus I didn't want to have them rely on `self`... but, it can be made other way, it doesn't matter much. Looking into adding this. I don't quite understand the logic behind having a class method for renaming other instances of that class, could you provide some context on why we might want to do this? True, it makes little sense, it should just be `@staticmethod`, and I'm fine with it being written as `@staticmethod`. If you want to know the reasoning for initially not wanting to make it static it was as follows (quite boring and long): 1. * There are a bunch on standalone method define on the predictor already as normal methods (not static) which should really be static (e.g. list all model, load the data for a specific model, turn the model metadata into a json print-friendly format... etc) 2. * These methods were not made static because they require one or two internal variable, which could themselves be created by static functions, but they aren't, they require 1 or 2 lines in the predictor initialization to compute 3. * These methods are use in some other places (mindsb_server) on an instance of the predictor, rather than as static methods. So to migrate all methods that fall into (1) to static I would have to fix (2) and (3)... which is annoying, not hard, but annoying, loads of places to change stuff in, I want to get to it at some point, probably next week. Until then, I though it would be more consistent to keep all these stand-alone functionality as methods, just for the sake of interface consistency. But I can live with it being static as well, either way works, it would end up as a separate function in the future anyway :) @George3d6 is this taken care of? If not then I can work on it Still needs doing, yes thanks @George3d6 I will look into it over the Weekend @mohsinkhansymc Since no PR has come in I assume you didn't have the time to look into it. Thanks for wanting to help, but as it turns out this feature just became rather critical to implement, so I will be doing it myself in the next PR, so please don't start work on it as it will likely be implemented by the time you finish.
2019-10-26T17:49:44Z
[]
[]
mindsdb/mindsdb
329
mindsdb__mindsdb-329
[ "320", "322" ]
f8987924b6fa5d04b4624b5f75eed2c6a66d1edb
diff --git a/mindsdb/config/__init__.py b/mindsdb/config/__init__.py --- a/mindsdb/config/__init__.py +++ b/mindsdb/config/__init__.py @@ -29,4 +29,6 @@ class Config: # If logs should be streamed to a server SEND_LOGS = ifEnvElse('SEND_LOGS', False) + CHECK_FOR_UPDATES = True + CONFIG = Config() diff --git a/mindsdb/libs/constants/mindsdb.py b/mindsdb/libs/constants/mindsdb.py --- a/mindsdb/libs/constants/mindsdb.py +++ b/mindsdb/libs/constants/mindsdb.py @@ -59,11 +59,6 @@ class ORDER_BY_KEYS: COLUMN = 0 ASCENDING_VALUE = 1 -PHASE_DATA_EXTRACTOR = 1 -PHASE_STATS_GENERATOR = 2 -PHASE_MODEL_INTERFACE = 3 -PHASE_MODEL_ANALYZER = 4 - MODEL_STATUS_TRAINED = "Trained" MODEL_STATUS_PREPARING = "Preparing" MODEL_STATUS_DATA_ANALYSIS = "Data Analysis" diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -13,7 +13,7 @@ from mindsdb.config import CONFIG from mindsdb.libs.controllers.transaction import Transaction from mindsdb.libs.constants.mindsdb import * - +from mindsdb.libs.helpers.general_helpers import check_for_updates from pathlib import Path @@ -36,6 +36,9 @@ def __init__(self, name, root_folder=CONFIG.MINDSDB_STORAGE_PATH, log_level=CONF # initialize log self.log = MindsdbLogger(log_level=log_level, uuid=self.uuid) + if CONFIG.CHECK_FOR_UPDATES: + check_for_updates() + # set the mindsdb storage folder storage_ok = True # default state @@ -206,9 +209,10 @@ def _adapt_column(self, col_stats, col): return icm - def get_model_data(self, model_name): - with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, f'{model_name}_light_model_metadata.pickle'), 'rb') as fp: - lmd = pickle.load(fp) + def get_model_data(self, model_name, lmd=None): + if lmd is None: + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, f'{model_name}_light_model_metadata.pickle'), 'rb') as fp: + lmd = pickle.load(fp) # ADAPTOR CODE amd = {} @@ -510,8 +514,10 @@ def analyse_dataset(self, from_data, sample_margin_of_error=CONFIG.DEFAULT_MARGI light_transaction_metadata['model_order_by'] = [] light_transaction_metadata['malformed_columns'] = [] light_transaction_metadata['data_preparation'] = {} + light_transaction_metadata['predict_columns'] = [] Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log) + return self.get_model_data(model_name=None, lmd=light_transaction_metadata) def learn(self, to_predict, from_data, test_from_data=None, group_by = None, window_size = None, order_by = [], sample_margin_of_error = CONFIG.DEFAULT_MARGIN_OF_ERROR, ignore_columns = [], stop_training_in_x_seconds = None, stop_training_in_accuracy = None, backend='lightwood', rebuild_model=True, use_gpu=False, disable_optional_analysis=False, equal_accuracy_for_all_output_categories=False, output_categories_importance_dictionary=None, unstable_parameters_dict={}): """ diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -168,6 +168,8 @@ def _execute_learn(self): self._call_phase_module(clean_exit=True, module_name='StatsGenerator', input_data=self.input_data, modify_light_metadata=True, hmd=self.hmd) self.save_metadata() + self._call_phase_module(clean_exit=True, module_name='DataSplitter') + self._call_phase_module(clean_exit=True, module_name='DataTransformer', input_data=self.input_data, mode='train') self.lmd['current_phase'] = MODEL_STATUS_TRAINING @@ -225,6 +227,7 @@ def _execute_predict(self): self.log.error('No input data provided !') return + self._call_phase_module(clean_exit=True, module_name='DataSplitter') # @TODO Maybe move to a separate "PredictionAnalysis" phase ? if self.lmd['run_confidence_variation_analysis']: diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -38,7 +38,7 @@ def check_for_updates(): fp.write(uuid_str) except: log.warning(f'Cannot store token, Please add write permissions to file: {uuid_file}') - uuid_str = f'{token}.NO_WRITE' + uuid_str = f'{uuid_str}.NO_WRITE' if Path(mdb_file).is_file(): token = open(mdb_file, 'r').read() @@ -51,11 +51,11 @@ def check_for_updates(): token = f'{token}.NO_WRITE' try: - ret = requests.get('http://mindsdb.com/updates/check/{token}'.format(token=token), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)}).json() + ret = requests.get('https://public.api.mindsdb.com/updates/check/{token}'.format(token=token), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)}).json() except: log.warning('Could not check for updates') return - + try: if 'version' in ret and ret['version']!= __version__: log.warning("There is a new version of MindsDB {version}, please do:\n pip3 uninstall mindsdb\n pip3 install mindsdb --user".format(version=ret['version'])) diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -13,29 +13,24 @@ class DataExtractor(BaseModule): - - phase_name = PHASE_DATA_EXTRACTOR - def _get_data_frame_from_when_conditions(self): """ :return: """ - - columns = self.transaction.lmd['columns'] when_conditions = self.transaction.hmd['model_when_conditions'] when_conditions_list = [] # here we want to make a list of the type ( ValueForField1, ValueForField2,..., ValueForFieldN ), ... for when_condition in when_conditions: - cond_list = [None] * len(columns) # empty list with blanks for values + cond_list = [None] * len(self.transaction.lmd['columns']) # empty list with blanks for values for condition_col in when_condition: - col_index = columns.index(condition_col) + col_index = self.transaction.lmd['columns'].index(condition_col) cond_list[col_index] = when_condition[condition_col] when_conditions_list.append(cond_list) - result = pandas.DataFrame(when_conditions_list, columns=columns) + result = pandas.DataFrame(when_conditions_list, columns=self.transaction.lmd['columns']) return result @@ -141,102 +136,16 @@ def _validate_input_data_integrity(self): def run(self): + # --- Dataset gets randomized or sorted (if timeseries) --- # result = self._get_prepared_input_df() + # --- Dataset gets randomized or sorted (if timeseries) --- # + # --- Some information about the dataset gets transplanted into transaction level variables --- # self.transaction.input_data.columns = result.columns.values.tolist() self.transaction.lmd['columns'] = self.transaction.input_data.columns self.transaction.input_data.data_frame = result + # --- Some information about the dataset gets transplanted into transaction level variables --- # + # --- Some preliminary dataset integrity checks --- # self._validate_input_data_integrity() - - is_time_series = self.transaction.lmd['model_is_time_series'] - group_by = self.transaction.lmd['model_group_by'] - KEY_NO_GROUP_BY = '{PLEASE_DONT_TELL_ME_ANYONE_WOULD_CALL_A_COLUMN_THIS}##ALL_ROWS_NO_GROUP_BY##{PLEASE_DONT_TELL_ME_ANYONE_WOULD_CALL_A_COLUMN_THIS}' - - # create all indexes by group by, that is all the rows that belong to each group by - all_indexes = {} - train_indexes = {} - test_indexes = {} - validation_indexes = {} - - all_indexes[KEY_NO_GROUP_BY] = [] - train_indexes[KEY_NO_GROUP_BY] = [] - test_indexes[KEY_NO_GROUP_BY] = [] - validation_indexes[KEY_NO_GROUP_BY] = [] - for i, row in self.transaction.input_data.data_frame.iterrows(): - - if len(group_by) > 0: - group_by_value = '_'.join([str(row[group_by_index]) for group_by_index in [columns.index(group_by_column) for group_by_column in group_by]]) - - if group_by_value not in all_indexes: - all_indexes[group_by_value] = [] - - all_indexes[group_by_value] += [i] - - all_indexes[KEY_NO_GROUP_BY] += [i] - - # move indexes to corresponding train, test, validation, etc and trim input data accordingly - for key in all_indexes: - #If this is a group by, skip the `KEY_NO_GROUP_BY` key - if len(all_indexes) > 1 and key == KEY_NO_GROUP_BY: - continue - - length = len(all_indexes[key]) - if self.transaction.lmd['type'] == TRANSACTION_LEARN: - # this evals True if it should send the entire group data into test, train or validation as opposed to breaking the group into the subsets - should_split_by_group = type(group_by) == list and len(group_by) > 0 - - if should_split_by_group: - train_indexes[key] = all_indexes[key][0:round(length - length*CONFIG.TEST_TRAIN_RATIO)] - train_indexes[KEY_NO_GROUP_BY].extend(train_indexes[key]) - - test_indexes[key] = all_indexes[key][round(length - length*CONFIG.TEST_TRAIN_RATIO):int(round(length - length*CONFIG.TEST_TRAIN_RATIO) + round(length*CONFIG.TEST_TRAIN_RATIO/2))] - test_indexes[KEY_NO_GROUP_BY].extend(test_indexes[key]) - - validation_indexes[key] = all_indexes[key][(round(length - length*CONFIG.TEST_TRAIN_RATIO) + round(length*CONFIG.TEST_TRAIN_RATIO/2)):] - validation_indexes[KEY_NO_GROUP_BY].extend(validation_indexes[key]) - - else: - # make sure that the last in the time series are also the subset used for test - train_window = (0,int(length*(1-2*CONFIG.TEST_TRAIN_RATIO))) - train_indexes[key] = all_indexes[key][train_window[0]:train_window[1]] - validation_window = (train_window[1],train_window[1] + int(length*CONFIG.TEST_TRAIN_RATIO)) - test_window = (validation_window[1],length) - test_indexes[key] = all_indexes[key][test_window[0]:test_window[1]] - validation_indexes[key] = all_indexes[key][validation_window[0]:validation_window[1]] - - self.transaction.input_data.train_df = self.transaction.input_data.data_frame.iloc[train_indexes[KEY_NO_GROUP_BY]].copy() - self.transaction.input_data.test_df = self.transaction.input_data.data_frame.iloc[test_indexes[KEY_NO_GROUP_BY]].copy() - self.transaction.input_data.validation_df = self.transaction.input_data.data_frame.iloc[validation_indexes[KEY_NO_GROUP_BY]].copy() - - self.transaction.lmd['data_preparation']['test_row_count'] = len(self.transaction.input_data.test_df) - self.transaction.lmd['data_preparation']['train_row_count'] = len(self.transaction.input_data.train_df) - self.transaction.lmd['data_preparation']['validation_row_count'] = len(self.transaction.input_data.validation_df) - - # @TODO: Consider deleting self.transaction.input_data.data_frame here - - # log some stats - if self.transaction.lmd['type'] == TRANSACTION_LEARN: - # @TODO I don't think the above works, fix at some point or just remove `sample_margin_of_error` option from the interface - if len(self.transaction.input_data.data_frame) != sum([len(self.transaction.input_data.train_df),len(self.transaction.input_data.test_df),len(self.transaction.input_data.validation_df)]): - self.log.info('You requested to sample with a *margin of error* of {sample_margin_of_error} and a *confidence level* of {sample_confidence_level}. Therefore:'.format(sample_confidence_level=self.transaction.lmd['sample_confidence_level'], sample_margin_of_error= self.transaction.lmd['sample_margin_of_error'])) - self.log.info('Using a [Cochran\'s sample size calculator](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/find-sample-size/) we got the following sample sizes:') - data = { - 'total': [total_rows_in_input, 'Total number of rows in input'], - 'subsets': [[total_rows_used, 'Total number of rows used']], - 'label': 'Sample size for margin of error of ({sample_margin_of_error}) and a confidence level of ({sample_confidence_level})'.format(sample_confidence_level=self.transaction.lmd['sample_confidence_level'], sample_margin_of_error= self.transaction.lmd['sample_margin_of_error']) - } - self.log.infoChart(data, type='pie') - # @TODO Bad code ends here (see @TODO above) - - data = { - 'subsets': [ - [len(self.transaction.input_data.train_df), 'Train'], - [len(self.transaction.input_data.test_df), 'Test'], - [len(self.transaction.input_data.validation_df), 'Validation'] - ], - 'label': 'Number of rows per subset' - } - - self.log.info('We have split the input data into:') - self.log.infoChart(data, type='pie') + # --- Some preliminary dataset integrity checks --- # diff --git a/mindsdb/libs/phases/data_splitter/__init__.py b/mindsdb/libs/phases/data_splitter/__init__.py new file mode 100644 diff --git a/mindsdb/libs/phases/data_splitter/data_splitter.py b/mindsdb/libs/phases/data_splitter/data_splitter.py new file mode 100644 --- /dev/null +++ b/mindsdb/libs/phases/data_splitter/data_splitter.py @@ -0,0 +1,116 @@ +from mindsdb.config import CONFIG +from mindsdb.libs.constants.mindsdb import * +from mindsdb.libs.phases.base_module import BaseModule +from mindsdb.libs.data_types.mindsdb_logger import log +from mindsdb.libs.helpers.text_helpers import hashtext +from mindsdb.external_libs.stats import calculate_sample_size +from pandas.api.types import is_numeric_dtype + +import random +import traceback +import pandas +import numpy as np +from mindsdb.libs.constants.mindsdb import * + +class DataSplitter(BaseModule): + def run(self): + group_by = self.transaction.lmd['model_group_by'] + if group_by is None or len(group_by) == 0: + group_by = [] + for col in self.transaction.lmd['predict_columns']: + if self.transaction.lmd['column_stats'][col]['data_type'] == DATA_TYPES.CATEGORICAL: + group_by.append(col) + if len(group_by) > 0: + self.transaction.input_data.data_frame = self.transaction.input_data.data_frame.sort_values(group_by) + + KEY_NO_GROUP_BY = '{PLEASE_DONT_TELL_ME_ANYONE_WOULD_CALL_A_COLUMN_THIS}##ALL_ROWS_NO_GROUP_BY##{PLEASE_DONT_TELL_ME_ANYONE_WOULD_CALL_A_COLUMN_THIS}' + + # create all indexes by group by, that is all the rows that belong to each group by + all_indexes = {} + train_indexes = {} + test_indexes = {} + validation_indexes = {} + + all_indexes[KEY_NO_GROUP_BY] = [] + train_indexes[KEY_NO_GROUP_BY] = [] + test_indexes[KEY_NO_GROUP_BY] = [] + validation_indexes[KEY_NO_GROUP_BY] = [] + for i, row in self.transaction.input_data.data_frame.iterrows(): + + if len(group_by) > 0: + group_by_value = '_'.join([str(row[group_by_index]) for group_by_index in [self.transaction.input_data.columns.index(group_by_col) for group_by_col in group_by]]) + + if group_by_value not in all_indexes: + all_indexes[group_by_value] = [] + + all_indexes[group_by_value] += [i] + + all_indexes[KEY_NO_GROUP_BY] += [i] + + # move indexes to corresponding train, test, validation, etc and trim input data accordingly + if self.transaction.lmd['type'] == TRANSACTION_LEARN: + for key in all_indexes: + should_split_by_group = type(group_by) == list and len(group_by) > 0 + + #If this is a group by, skip the `KEY_NO_GROUP_BY` key + if should_split_by_group and key == KEY_NO_GROUP_BY: + continue + + length = len(all_indexes[key]) + # this evals True if it should send the entire group data into test, train or validation as opposed to breaking the group into the subsets + if should_split_by_group: + train_indexes[key] = all_indexes[key][0:round(length - length*CONFIG.TEST_TRAIN_RATIO)] + train_indexes[KEY_NO_GROUP_BY].extend(train_indexes[key]) + + test_indexes[key] = all_indexes[key][round(length - length*CONFIG.TEST_TRAIN_RATIO):int(round(length - length*CONFIG.TEST_TRAIN_RATIO) + round(length*CONFIG.TEST_TRAIN_RATIO/2))] + test_indexes[KEY_NO_GROUP_BY].extend(test_indexes[key]) + + validation_indexes[key] = all_indexes[key][(round(length - length*CONFIG.TEST_TRAIN_RATIO) + round(length*CONFIG.TEST_TRAIN_RATIO/2)):] + validation_indexes[KEY_NO_GROUP_BY].extend(validation_indexes[key]) + + else: + # make sure that the last in the time series are also the subset used for test + + train_window = (0,int(length*(1-2*CONFIG.TEST_TRAIN_RATIO))) + train_indexes[key] = all_indexes[key][train_window[0]:train_window[1]] + validation_window = (train_window[1],train_window[1] + int(length*CONFIG.TEST_TRAIN_RATIO)) + test_window = (validation_window[1],length) + test_indexes[key] = all_indexes[key][test_window[0]:test_window[1]] + validation_indexes[key] = all_indexes[key][validation_window[0]:validation_window[1]] + + self.transaction.input_data.train_df = self.transaction.input_data.data_frame.iloc[train_indexes[KEY_NO_GROUP_BY]].copy() + self.transaction.input_data.test_df = self.transaction.input_data.data_frame.iloc[test_indexes[KEY_NO_GROUP_BY]].copy() + self.transaction.input_data.validation_df = self.transaction.input_data.data_frame.iloc[validation_indexes[KEY_NO_GROUP_BY]].copy() + + self.transaction.lmd['data_preparation']['test_row_count'] = len(self.transaction.input_data.test_df) + self.transaction.lmd['data_preparation']['train_row_count'] = len(self.transaction.input_data.train_df) + self.transaction.lmd['data_preparation']['validation_row_count'] = len(self.transaction.input_data.validation_df) + + # @TODO: Consider deleting self.transaction.input_data.data_frame here + + # log some stats + if self.transaction.lmd['type'] == TRANSACTION_LEARN: + # @TODO I don't think the above works, fix at some point or just remove `sample_margin_of_error` option from the interface + if len(self.transaction.input_data.data_frame) != sum([len(self.transaction.input_data.train_df),len(self.transaction.input_data.test_df),len(self.transaction.input_data.validation_df)]): + self.log.info('You requested to sample with a *margin of error* of {sample_margin_of_error} and a *confidence level* of {sample_confidence_level}. Therefore:'.format(sample_confidence_level=self.transaction.lmd['sample_confidence_level'], sample_margin_of_error= self.transaction.lmd['sample_margin_of_error'])) + self.log.info('Using a [Cochran\'s sample size calculator](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/find-sample-size/) we got the following sample sizes:') + data = { + 'total': [total_rows_in_input, 'Total number of rows in input'], + 'subsets': [[total_rows_used, 'Total number of rows used']], + 'label': 'Sample size for margin of error of ({sample_margin_of_error}) and a confidence level of ({sample_confidence_level})'.format(sample_confidence_level=self.transaction.lmd['sample_confidence_level'], sample_margin_of_error= self.transaction.lmd['sample_margin_of_error']) + } + self.log.infoChart(data, type='pie') + # @TODO Bad code ends here (see @TODO above) + + data = { + 'subsets': [ + [len(self.transaction.input_data.train_df), 'Train'], + [len(self.transaction.input_data.test_df), 'Test'], + [len(self.transaction.input_data.validation_df), 'Validation'] + ], + 'label': 'Number of rows per subset' + } + + self.log.info('We have split the input data into:') + self.log.infoChart(data, type='pie') + # --- Dataset split into train/test/validate --- # diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -8,9 +8,6 @@ import numpy as np class ModelAnalyzer(BaseModule): - - phase_name = PHASE_MODEL_ANALYZER - def run(self): """ # Runs the model on the validation set in order to fit a probabilistic model that will evaluate the accuracy of future predictions diff --git a/mindsdb/libs/phases/model_interface/model_interface.py b/mindsdb/libs/phases/model_interface/model_interface.py --- a/mindsdb/libs/phases/model_interface/model_interface.py +++ b/mindsdb/libs/phases/model_interface/model_interface.py @@ -16,9 +16,6 @@ def run(self, mode='train'): from mindsdb.libs.backends.lightwood import LightwoodBackend except ImportError as e: self.transaction.log.warning(e) - - phase_name = PHASE_MODEL_INTERFACE - if self.transaction.hmd['model_backend'] == 'ludwig': self.transaction.model_backend = LudwigBackend(self.transaction) elif self.transaction.hmd['model_backend'] == 'lightwood': diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -30,9 +30,6 @@ class StatsGenerator(BaseModule): # Additionally, the stats generator also provides the user with some extra meaningful information about his data, though this functionality may be moved to a different step (after vectorization) in the future """ - - phase_name = PHASE_STATS_GENERATOR - def _get_file_type(self, potential_path): could_be_fp = False for char in ('/', '\\', ':\\'):
diff --git a/test.py b/test.py --- a/test.py +++ b/test.py @@ -5,7 +5,7 @@ mdb = Predictor(name='test_predictor') -''' + mdb.learn(to_predict=['rental_price', 'location'],from_data="https://mindsdb-example-data.s3.eu-west-2.amazonaws.com/home_rentals.csv",use_gpu=True,stop_training_in_x_seconds=30, backend='ludwig') p_arr = mdb.predict(when_data='https://mindsdb-example-data.s3.eu-west-2.amazonaws.com/home_rentals.csv') @@ -13,8 +13,8 @@ exp_s = p.epitomize() #exp = p.explain() #print(exp) - print(exp_s) + #print(exp_s) ''' print(mdb.predict(when={'number_of_rooms': 3, 'number_of_bathrooms': 2, 'neighborhood': 'south_side', 'sqft':2411}, run_confidence_variation_analysis=True)[0].explain()) - +''' #print(json.dumps(mdb.get_model_data('test_predictor'))) diff --git a/tests/functional_testing/analyse_dataset.py b/tests/functional_testing/analyse_dataset.py --- a/tests/functional_testing/analyse_dataset.py +++ b/tests/functional_testing/analyse_dataset.py @@ -2,4 +2,6 @@ mdb = Predictor(name='analyse_dataset_test_predictor') -mdb.analyse_dataset(from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv") +results = mdb.analyse_dataset(from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv") +print('\n\n\n\n========================\n\n') +print(results)
Return a server parsable object from analyse_dataset The `analyse_dataset` functionality is currently useful only for printing CL logs, we need it to also be useable by the server/gui (this can be accomplished by returning a JSON similar to the one we get when analyzing a predictor). It needs to be able to return and parse the subsequent metadata, most of the code for this should already be there, I just need to separate it so I can re-use it in here. Install fails without certain dev libraries installed on various OSes Some dependency is pulling in `gmpy` which has a broken deployment on pypi that fails to install without certain dev libraries. We need to figure out why this is happening and remove the library or find a way to install gmpy without them. Or at least, if nothing works, warn the user about this.
tried an alternate way installing gympy, check the following logs running install running bdist_egg running egg_info creating gmpy2.egg-info writing gmpy2.egg-info/PKG-INFO writing dependency_links to gmpy2.egg-info/dependency_links.txt writing top-level names to gmpy2.egg-info/top_level.txt writing manifest file 'gmpy2.egg-info/SOURCES.txt' reading manifest file 'gmpy2.egg-info/SOURCES.txt' writing manifest file 'gmpy2.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_ext building 'gmpy2.gmpy2' extension creating build creating build/temp.linux-x86_64-3.6 creating build/temp.linux-x86_64-3.6/src x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I./src -I/usr/include/python3.6m -c src/gmpy2.c -o build/temp.linux-x86_64-3.6/src/gmpy2.o -DSHARED=1 x86_64-linux-gnu-gcc: error: src/gmpy2.c: No such file or directory x86_64-linux-gnu-gcc: fatal error: no input files compilation terminated. error: command 'x86_64-linux-gnu-gcc' failed with exit status 1
2019-11-08T16:01:22Z
[]
[]
mindsdb/mindsdb
337
mindsdb__mindsdb-337
[ "324" ]
6906a58f7cd12dbe7b3c321679e01f06766b6306
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.7.11' +__version__ = '1.7.12' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc'
diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -10,8 +10,9 @@ def basic_test(backend='ludwig',use_gpu=True,ignore_columns=[], run_extra=False, os.system(f'python3 ../functional_testing/{py_file}') # Create & Learn + to_predict = 'rental_price' mdb = mindsdb.Predictor(name='home_rentals_price') - mdb.learn(to_predict='rental_price',from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=20,use_gpu=use_gpu) + mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=20,use_gpu=use_gpu) # Reload & Predict model_name = 'home_rentals_price' @@ -49,4 +50,30 @@ def basic_test(backend='ludwig',use_gpu=True,ignore_columns=[], run_extra=False, # See if we can get the adapted metadata amd = mdb.get_model_data(model_name) # Make some simple assertions about it - assert(5 < len(list(amd.keys()))) + + # @TODO: Sometimes are None, not sure why: [, validation_set_accuracy, accuracy] + for k in ['status', 'name', 'version', 'data_source', 'current_phase', 'updated_at', 'created_at', 'train_end_at']: + assert(type(amd[k]) == str) + assert(type(amd['predict']) == list or type(amd['predict']) == str) + assert(type(amd['is_active']) == bool) + + for k in ['validation_set_accuracy', 'accuracy']: + assert(type(amd[k]) == float) + + for k in amd['data_preparation']: + assert(type(amd['data_preparation'][k]) == int or type(amd['data_preparation'][k]) == float) + + + for k in amd['data_analysis']: + assert(len(amd['data_analysis'][k]) > 0) + assert(type(amd['data_analysis'][k][0]) == dict) + + assert(len(amd['model_analysis']) > 0) + assert(type(amd['model_analysis'][0]) == dict) + + for k in amd['force_vectors'][to_predict]['normal_data_distribution']: + assert(len(amd['force_vectors'][to_predict]['normal_data_distribution'][k]) > 0) + + for k in amd['force_vectors'][to_predict]['missing_data_distribution']: + for sk in amd['force_vectors'][to_predict]['missing_data_distribution'][k]: + assert(len(amd['force_vectors'][to_predict]['missing_data_distribution'][k][sk]) > 0)
Add consistency check for model data object in the CI tests We should take a snapshot of a "correct" model data object (as returned by `get_model_data()`) for the CI testing dataset and always compare the json-parsed outputs of `get_model_data` with it. Might be a bit tedious to alter every time when we run `get_model_data` ,but otherwise there are certain situation where we can get silent failures. Since certain parts of the Model Analyzer have more general exception catching built in (since certain data simply can't be produced in various situations) and `get_model_data` itself is made to work with incomplete data in order to return this data during training. If we want to be really scrupulous we should check the model data object at each phase of training, but I think that'd be overdoing it.
2019-11-19T13:43:19Z
[]
[]
mindsdb/mindsdb
371
mindsdb__mindsdb-371
[ "364" ]
ba9868c76c12be3b7836a94e95b5a2c70b228700
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.9.3' +__version__ = '1.9.5' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc' diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -391,14 +391,36 @@ def export_model(self, model_name): print(e) return False - def load(self, mindsdb_storage_dir): + def load(self, model_archive_path): """ If you want to import a mindsdb instance storage from a file :param mindsdb_storage_dir: full_path that contains your mindsdb predictor zip file :return: bool (True/False) True if mind was importerd successfully """ - shutil.unpack_archive(mindsdb_storage_dir, extract_dir=CONFIG.MINDSDB_STORAGE_PATH) + previous_models = os.listdir(CONFIG.MINDSDB_STORAGE_PATH) + shutil.unpack_archive(model_archive_path, extract_dir=CONFIG.MINDSDB_STORAGE_PATH) + + new_model_files = set(os.listdir(CONFIG.MINDSDB_STORAGE_PATH)) - set(previous_models) + model_names = [] + for file in new_model_files: + if '_light_model_metadata.pickle' in file: + model_name = file.replace('_light_model_metadata.pickle', '') + model_names.append(model_name) + + + for moel_name in model_names: + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, model_name + '_light_model_metadata.pickle'), 'rb') as fp: + lmd = pickle.load(fp) + + if 'ludwig_data' in lmd and 'ludwig_save_path' in lmd['ludwig_data']: + lmd['ludwig_data']['ludwig_save_path'] = str(os.path.join(CONFIG.MINDSDB_STORAGE_PATH),os.path.basename(lmd['ludwig_data']['ludwig_save_path'])) + + if 'lightwood_data' in lmd and 'save_path' in lmd['lightwood_data']: + lmd['lightwood_data']['save_path'] = str(os.path.join(CONFIG.MINDSDB_STORAGE_PATH),os.path.basename(lmdlmd['lightwood_data']['save_path'])) + + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, model_name + '_light_model_metadata.pickle'), 'wb') as fp: + pickle.dump(lmd, fp,protocol=pickle.HIGHEST_PROTOCOL) def load_model(self, model_archive_path=None):
diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -62,7 +62,7 @@ def basic_test(backend='lightwood',use_gpu=True,ignore_columns=[], run_extra=Fal # Create & Learn to_predict = 'rental_price' mdb = mindsdb.Predictor(name='home_rentals_price') - mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=20,use_gpu=use_gpu) + mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=10,use_gpu=use_gpu) # Reload & Predict model_name = 'home_rentals_price'
Predicting using an imported model from another machine is giving error **Your Environment** * Mindsdb version you tried to install: 1.9.2 **Describe the bug** I trained a model in aws machine and imported it in local machine. Then try to predict any values is giving bellow error. **To Reproduce** Steps to reproduce the behavior, for example: 1. Train a model in machine A and export the model from machine A 2. Load the model in different machine lets say B 3. Try to predict the target variable **Expected behavior** It should predict the values without error. **Additional context** Please find the attached screen shot. <img width="890" alt="export_model_prediction" src="https://user-images.githubusercontent.com/27191684/71776038-554bc500-2fb0-11ea-9467-1858403b2ff8.PNG">
2020-01-07T21:40:49Z
[]
[]
mindsdb/mindsdb
380
mindsdb__mindsdb-380
[ "365" ]
90d6ce25579b1100ca4dd1bf20329f038df4c6b5
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.10.0' +__version__ = '1.10.1' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc' diff --git a/mindsdb/libs/backends/lightwood.py b/mindsdb/libs/backends/lightwood.py --- a/mindsdb/libs/backends/lightwood.py +++ b/mindsdb/libs/backends/lightwood.py @@ -18,7 +18,7 @@ def __init__(self, transaction): def _get_group_by_key(self, group_by, row): gb_lookup_key = '!!@@!!' for column in group_by: - gb_lookup_key += column + '_' + row[column] + '!!@@!!' + gb_lookup_key += f'{column}_{row[column]}_!!@@!!' return gb_lookup_key def _create_timeseries_df(self, original_df): @@ -186,7 +186,6 @@ def train(self): if eval_every_x_epochs < 3: eval_every_x_epochs = 3 - if self.transaction.lmd['stop_training_in_x_seconds'] is None: self.predictor.learn(from_data=train_df, test_data=test_df, callback_on_iter=self.callback_on_iter, eval_every_x_epochs=eval_every_x_epochs) else: diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -197,7 +197,10 @@ def _adapt_column(self, col_stats, col): return icm - def get_model_data(self, model_name, lmd=None): + def get_model_data(self, model_name=None, lmd=None): + if model_name is None: + model_name = self.name + if lmd is None: with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, f'{model_name}_light_model_metadata.pickle'), 'rb') as fp: lmd = pickle.load(fp) @@ -347,7 +350,7 @@ def export(self, mindsdb_storage_dir='mindsdb_storage'): except: return False - def export_model(self, model_name): + def export_model(self, model_name=None): """ If you want to export a model to a file @@ -483,7 +486,7 @@ def rename_model(self, old_model_name, new_model_name): return True - def delete_model(self, model_name): + def delete_model(self, model_name=None): """ If you want to export a model to a file diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -165,7 +165,6 @@ def evaluate_accuracy(predictions, full_dataset, col_stats, output_columns, hmd= score = 0.00000001 return score - class suppress_stdout_stderr(object): def __init__(self): try: diff --git a/mindsdb/libs/helpers/probabilistic_validator.py b/mindsdb/libs/helpers/probabilistic_validator.py --- a/mindsdb/libs/helpers/probabilistic_validator.py +++ b/mindsdb/libs/helpers/probabilistic_validator.py @@ -170,10 +170,18 @@ def get_confusion_matrix(self): labels= list(set(self._original_real_buckets_buff)) matrix = confusion_matrix(self._original_real_buckets_buff, self._original_predicted_buckets_buff, labels=labels) + + value_labels = [] + for label in labels: + try: + value_labels.append(str(self.buckets[label])) + except: + value_labels.append('UNKNOWN') + confusion_matrix_obj = { 'matrix': [[int(y) for y in x] for x in matrix], - 'predicted': [str(self.buckets[x]) for x in labels], - 'real': [str(self.buckets[x]) for x in labels] + 'predicted': value_labels, + 'real': value_labels } return confusion_matrix_obj diff --git a/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py b/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py --- a/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py +++ b/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py @@ -21,7 +21,8 @@ def get_column_importance(self, model, output_columns, input_columns, full_datas columnless_prediction_distribution = {} all_columns_prediction_distribution = {} - normal_predictions = model.predict('validate') + with disable_console_output(True): + normal_predictions = model.predict('validate') normal_accuracy = evaluate_accuracy(normal_predictions, full_dataset, stats, output_columns) column_importance_dict = {} buckets_stats = {} @@ -42,12 +43,14 @@ def get_column_importance(self, model, output_columns, input_columns, full_datas for input_column in ignorable_input_columns: # See what happens with the accuracy of the outputs if only this column is present ignore_columns = [col for col in ignorable_input_columns if col != input_column] - col_only_predictions = model.predict('validate', ignore_columns) + with disable_console_output(True): + col_only_predictions = model.predict('validate', ignore_columns) col_only_accuracy = evaluate_accuracy(col_only_predictions, full_dataset, stats, output_columns) # See what happens with the accuracy if all columns but this one are present ignore_columns = [input_column] - col_missing_predictions = model.predict('validate', ignore_columns) + with disable_console_output(True): + col_missing_predictions = model.predict('validate', ignore_columns) col_missing_accuracy = evaluate_accuracy(col_missing_predictions, full_dataset, stats, output_columns) combined_column_accuracy = ((normal_accuracy - col_missing_accuracy) + col_only_accuracy)/2 @@ -69,7 +72,6 @@ def get_column_importance(self, model, output_columns, input_columns, full_datas columnless_prediction_distribution[output_column][input_column] = col_missing_output_histogram # @TODO should be go back to generating this information based on the buckets of the input columns ? Or just keep doing the stats generation for the input columns based on the indexes of the buckets for the output column - for output_column in output_columns: buckets_stats[output_column] = {} diff --git a/mindsdb/libs/phases/model_analyzer/model_analyzer.py b/mindsdb/libs/phases/model_analyzer/model_analyzer.py --- a/mindsdb/libs/phases/model_analyzer/model_analyzer.py +++ b/mindsdb/libs/phases/model_analyzer/model_analyzer.py @@ -1,4 +1,4 @@ -from mindsdb.libs.helpers.general_helpers import pickle_obj +from mindsdb.libs.helpers.general_helpers import pickle_obj, disable_console_output from mindsdb.libs.constants.mindsdb import * from mindsdb.libs.phases.base_module import BaseModule from mindsdb.libs.helpers.probabilistic_validator import ProbabilisticValidator @@ -41,7 +41,8 @@ def run(self): if self.transaction.lmd['column_stats'][input_column]['data_type'] != DATA_TYPES.FILE_PATH and input_column not in [x[0] for x in self.transaction.lmd['model_order_by']]: ignorable_input_columns.append(input_column) - normal_predictions = self.transaction.model_backend.predict('validate') + with disable_console_output(): + normal_predictions = self.transaction.model_backend.predict('validate') # Single observation on the validation dataset when we have no ignorable column if len(ignorable_input_columns) == 0: @@ -55,7 +56,9 @@ def run(self): ignore_columns = [] ignore_columns.append(column_name) - ignore_col_predictions = self.transaction.model_backend.predict('validate', ignore_columns) + # Silence logging since otherwise lightwood and ludwig will complain too much about None values + with disable_console_output(): + ignore_col_predictions = self.transaction.model_backend.predict('validate', ignore_columns) # create a vector that has True for each feature that was passed to the model tester and False if it was blanked features_existence = [True if np_col not in ignore_columns else False for np_col in input_columns] diff --git a/mindsdb/libs/phases/model_interface/model_interface.py b/mindsdb/libs/phases/model_interface/model_interface.py --- a/mindsdb/libs/phases/model_interface/model_interface.py +++ b/mindsdb/libs/phases/model_interface/model_interface.py @@ -10,12 +10,14 @@ def run(self, mode='train'): try: from mindsdb.libs.backends.ludwig import LudwigBackend except ImportError as e: - self.transaction.log.warning(e) + # Ludwig is optional, so this is fine + pass try: from mindsdb.libs.backends.lightwood import LightwoodBackend except ImportError as e: self.transaction.log.warning(e) + if self.transaction.hmd['model_backend'] == 'ludwig': self.transaction.model_backend = LudwigBackend(self.transaction) elif self.transaction.hmd['model_backend'] == 'lightwood':
diff --git a/tests/accuracy_benchmarking/benchmark.py b/tests/accuracy_benchmarking/benchmark.py --- a/tests/accuracy_benchmarking/benchmark.py +++ b/tests/accuracy_benchmarking/benchmark.py @@ -45,8 +45,8 @@ def run_benchmarks(): except: pass - #TESTS = ['default_of_credit', 'cancer50', 'pulsar_stars', 'cifar_100', 'imdb_movie_review'] - TESTS = ['default_of_credit', 'cancer50', 'pulsar_stars'] + TESTS = ['default_of_credit', 'cancer50', 'pulsar_stars', 'cifar_100', 'imdb_movie_review'] + #TESTS = ['default_of_credit', 'cancer50', 'pulsar_stars'] test_data_arr = [] for test_name in TESTS: ''' diff --git a/tests/ci_tests/fast_test.py b/tests/ci_tests/fast_test.py --- a/tests/ci_tests/fast_test.py +++ b/tests/ci_tests/fast_test.py @@ -2,7 +2,6 @@ import torch # Test with a few basic options - if __name__ == "__main__": basic_test(backend='lightwood',use_gpu=torch.cuda.is_available(),ignore_columns=[], IS_CI_TEST=True) print('\n\n=============[Success]==============\n Finished running quick test !\n=============[Success]==============\n\n') diff --git a/tests/ci_tests/full_test.py b/tests/ci_tests/full_test.py --- a/tests/ci_tests/full_test.py +++ b/tests/ci_tests/full_test.py @@ -10,12 +10,17 @@ use_gpu_settings.append(False) + # Try ignoring some columns and running only the stats generator + run_extra = True + # Cycle through a few options: - for backend in ['lightwood','ludwig']: + for backend in ['lightwood']: #,'ludwig' for use_gpu in use_gpu_settings: print(f'use_gpu is set to {use_gpu}, backend is set to {backend}') - basic_test(backend=backend,use_gpu=use_gpu,ignore_columns=[], IS_CI_TEST=True) + if run_extra: + basic_test(backend=backend,use_gpu=use_gpu,ignore_columns=['days_on_market','number_of_bathrooms'], IS_CI_TEST=True, run_extra=True) + run_extra = False + else: + basic_test(backend=backend,use_gpu=use_gpu,ignore_columns=[], IS_CI_TEST=True) - # Try ignoring some columns and running only the stats generator - basic_test(backend='lightwood',use_gpu=use_gpu_settings[0],ignore_columns=['days_on_market','number_of_bathrooms'],run_extra=True, IS_CI_TEST=True) print('\n\n=============[Success]==============\n Finished running full test suite !\n=============[Success]==============\n\n') diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -57,12 +57,18 @@ def basic_test(backend='lightwood',use_gpu=True,ignore_columns=[], run_extra=Fal mindsdb.CONFIG.IS_CI_TEST = IS_CI_TEST if run_extra: for py_file in [x for x in os.listdir('../functional_testing') if '.py' in x]: - os.system(f'python3 ../functional_testing/{py_file}') + # Skip data source tests since installing dependencies is annoying + # @TODO: Figure out a way to make travis install required dependencies on osx + if 'all_data_sources' in py_file: + continue + code = os.system(f'python3 ../functional_testing/{py_file}') + if code != 0: + raise Exception(f'Test failed with status code: {code} !') # Create & Learn to_predict = 'rental_price' mdb = mindsdb.Predictor(name='home_rentals_price') - mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=10,use_gpu=use_gpu) + mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=30,use_gpu=use_gpu) # Reload & Predict model_name = 'home_rentals_price' diff --git a/tests/functional_testing/custom_model.py b/tests/functional_testing/custom_model.py --- a/tests/functional_testing/custom_model.py +++ b/tests/functional_testing/custom_model.py @@ -22,7 +22,7 @@ def train(self): self.le_arr = {} for col in [*self.output_columns, *self.input_columns]: self.le_arr[col] = preprocessing.LabelEncoder() - self.le_arr[col].fit(self.transaction.input_data.data_frame[col]) + self.le_arr[col].fit(pd.concat([self.transaction.input_data.train_df,self.transaction.input_data.test_df,self.transaction.input_data.validation_df])[col]) X = [] for col in self.input_columns:
Group by Integer column, in time series model training is giving error **Your Environment** * Mindsdb version you tried to install:1.9.2 **Describe the bug** Group by Integer column, in time series model training, is giving error **To Reproduce** Steps to reproduce the behavior, for example: 1. Use the attached data sets, and code 2. Run the code: `...etc` 3. You should see the error: `... blah` **Expected behavior** It should train the model **Additional context** **AGREEMENTID** is an integer column, when I add "x" at the end of each agreement id(to make it string) it works <img width="928" alt="groupby_timeseries_foreclosure_data" src="https://user-images.githubusercontent.com/27191684/71776112-d061ab00-2fb1-11ea-870d-5ffc54f01cde.PNG"> [timeseres_column_int.zip](https://github.com/mindsdb/mindsdb/files/4022937/timeseres_column_int.zip)
Hmh, weird, I think I experimented with timeseries before and grouping-by numerical columns worked just tine. Might be a bug I introduced in the recent changes to the data splitting logic (which also handles grouping by and ordering). Please find the code for both error and success cases. After converting the int value in "AGREEMENTID" column to string value it worked. [Error.zip](https://github.com/mindsdb/mindsdb/files/4036102/Error.zip) [Success.zip](https://github.com/mindsdb/mindsdb/files/4036108/Success.zip)
2020-01-14T22:09:37Z
[]
[]
mindsdb/mindsdb
393
mindsdb__mindsdb-393
[ "390" ]
2efe61c462473c680fec34a1fb6063dbf245f549
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.13.9' +__version__ = '1.13.10' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc' diff --git a/mindsdb/libs/backends/lightwood.py b/mindsdb/libs/backends/lightwood.py --- a/mindsdb/libs/backends/lightwood.py +++ b/mindsdb/libs/backends/lightwood.py @@ -31,6 +31,7 @@ def _create_timeseries_df(self, original_df): for _, row in original_df.iterrows(): gb_lookup_key = self._get_group_by_key(group_by, row) + print(f'\n\n{gb_lookup_key}\n\n') if gb_lookup_key not in group_by_ts_map: group_by_ts_map[gb_lookup_key] = [] diff --git a/mindsdb/libs/backends/ludwig.py b/mindsdb/libs/backends/ludwig.py --- a/mindsdb/libs/backends/ludwig.py +++ b/mindsdb/libs/backends/ludwig.py @@ -14,7 +14,6 @@ from imageio import imread -# @TODO: Define generci interface, similar to 'base_module' in the phases class LudwigBackend(): def __init__(self, transaction): diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -52,7 +52,7 @@ def __init__(self, session, light_transaction_metadata, heavy_transaction_metada self.run() - # @TODO Make it more generic, move to general helpers, use inside predictor instead of linline loading + def load_metadata(self): try: import resource @@ -76,7 +76,7 @@ def load_metadata(self): except: self.log.error(f'Could not load mindsdb heavy metadata in the file: {fn}') - # @TODO Make it more generic, move to general helpers + def save_metadata(self): fn = os.path.join(CONFIG.MINDSDB_STORAGE_PATH, self.lmd['name'] + '_light_model_metadata.pickle') self.lmd['updated_at'] = str(datetime.datetime.now()) @@ -128,8 +128,7 @@ def _call_phase_module(self, clean_exit, module_name, **kwargs): if clean_exit: sys.exit(1) else: - raise ValueError(error) - return None + raise Exception(error) finally: self.lmd['is_active'] = False @@ -156,7 +155,7 @@ def _execute_learn(self): self.lmd['current_phase'] = MODEL_STATUS_PREPARING self.save_metadata() - self._call_phase_module(clean_exit=True, module_name='DataExtractor') + self._call_phase_module(clean_exit=False, module_name='DataExtractor') self.save_metadata() self.lmd['current_phase'] = MODEL_STATUS_DATA_ANALYSIS @@ -164,19 +163,19 @@ def _execute_learn(self): self.load_metadata() else: self.save_metadata() - self._call_phase_module(clean_exit=True, module_name='StatsGenerator', input_data=self.input_data, modify_light_metadata=True, hmd=self.hmd) + self._call_phase_module(clean_exit=False, module_name='StatsGenerator', input_data=self.input_data, modify_light_metadata=True, hmd=self.hmd) self.save_metadata() - self._call_phase_module(clean_exit=True, module_name='DataSplitter') + self._call_phase_module(clean_exit=False, module_name='DataSplitter') - self._call_phase_module(clean_exit=True, module_name='DataTransformer', input_data=self.input_data) + self._call_phase_module(clean_exit=False, module_name='DataTransformer', input_data=self.input_data) self.lmd['current_phase'] = MODEL_STATUS_TRAINING self.save_metadata() - self._call_phase_module(clean_exit=True, module_name='ModelInterface', mode='train') + self._call_phase_module(clean_exit=False, module_name='ModelInterface', mode='train') self.lmd['current_phase'] = MODEL_STATUS_ANALYZING self.save_metadata() - self._call_phase_module(clean_exit=True, module_name='ModelAnalyzer') + self._call_phase_module(clean_exit=False, module_name='ModelAnalyzer') self.lmd['current_phase'] = MODEL_STATUS_TRAINED self.save_metadata() diff --git a/mindsdb/libs/helpers/general_helpers.py b/mindsdb/libs/helpers/general_helpers.py --- a/mindsdb/libs/helpers/general_helpers.py +++ b/mindsdb/libs/helpers/general_helpers.py @@ -210,7 +210,6 @@ def get_tensorflow_colname(col): return col @contextmanager -# @TODO: Make it work with mindsdb logger/log levels... maybe def disable_console_output(activate=True): try: try: diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py --- a/mindsdb/libs/phases/data_extractor/data_extractor.py +++ b/mindsdb/libs/phases/data_extractor/data_extractor.py @@ -78,6 +78,10 @@ def _get_prepared_input_df(self): df = self.transaction.hmd['when_data'] df = df.where((pd.notnull(df)), None) + for col in self.transaction.lmd['columns']: + if col not in df.columns: + df[col] = [None] * len(df) + elif self.transaction.hmd['model_when_conditions'] is not None: # if no data frame yet, make one diff --git a/mindsdb/libs/phases/data_transformer/data_transformer.py b/mindsdb/libs/phases/data_transformer/data_transformer.py --- a/mindsdb/libs/phases/data_transformer/data_transformer.py +++ b/mindsdb/libs/phases/data_transformer/data_transformer.py @@ -49,11 +49,7 @@ def _standardize_datetime(date_str): dt = datetime.datetime.utcfromtimestamp(date_str) except: return None - # Uncomment if we want to work internally date type - #return dt - # Uncomment if we want to work internally with string type - # @TODO Decide if we ever need/want the milliseconds return dt.strftime('%Y-%m-%d %H:%M:%S') @staticmethod @@ -62,7 +58,6 @@ def _lightwood_datetime_processing(dt): try: return dt.timestamp() except: - # @TODO Return `None` after appropriate changes in lightwood return None @staticmethod diff --git a/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py b/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py --- a/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py +++ b/mindsdb/libs/phases/model_analyzer/helpers/column_evaluator.py @@ -29,7 +29,6 @@ def get_column_importance(self, model, output_columns, input_columns, full_datas # Histogram for when all columns are present, in order to plot the force vectors for output_column in output_columns: - # @TODO: Running stats generator just to get the histogram is very inefficient, change this validation_set_output_column_histogram, _ = StatsGenerator.get_histogram(normal_predictions[output_column], data_type=stats[output_column]['data_type'],data_subtype=stats[output_column]['data_subtype']) if validation_set_output_column_histogram is not None: diff --git a/mindsdb/libs/phases/stats_generator/scores.py b/mindsdb/libs/phases/stats_generator/scores.py --- a/mindsdb/libs/phases/stats_generator/scores.py +++ b/mindsdb/libs/phases/stats_generator/scores.py @@ -199,11 +199,7 @@ def compute_similariy_score(stats, columns, col_name): if other_col_name == col_name: continue else: - # @TODO Figure out why computing matthews_corrcoef is so slow, possibly find a better implementation and replace it with that - #try: - # similarity = matthews_corrcoef(list(map(str,col_data)), list(map(str,columns[other_col_name]))) - # similarities.append((other_col_name,similarity)) - #except: + # @TODO Figure out why computing matthews_corrcoef is so slow, possibly find a better implementation and replace it with that. Matthews corrcoef code was: similarity = matthews_corrcoef(list(map(str,col_data)), list(map(str,columns[other_col_name]))) similarity = 0 X1 = list(map(str,col_data)) X2 = list(map(str,columns[other_col_name])) diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py --- a/mindsdb/libs/phases/stats_generator/stats_generator.py +++ b/mindsdb/libs/phases/stats_generator/stats_generator.py @@ -142,7 +142,7 @@ def _get_column_data_type(self, data, data_frame, col_name): for element in data: # Maybe use list of functions in the future - element = element + element = str(element) current_subtype_guess = 'Unknown' current_type_guess = 'Unknown' @@ -185,8 +185,6 @@ def _get_column_data_type(self, data, data_frame, col_name): current_type_guess = DATA_TYPES.FILE_PATH current_subtype_guess = subtype - # If nothing works, assume it's categorical or sequential and determine type later (based on all the data in the column) - if current_type_guess not in type_dist: type_dist[current_type_guess] = 1 else: @@ -205,7 +203,7 @@ def _get_column_data_type(self, data, data_frame, col_name): # assume that the type is the one with the most prevalent type_dist for data_type in type_dist: # If any of the members are Unknown, use that data type (later to be turned into CATEGORICAL or SEQUENTIAL), since otherwise the model will crash when casting - # @TODO consider removing rows where data type is unknown in the future, might just be corrupt data... a bit hard to imply currently + # @TODO consider removing or flagging rows where data type is unknown in the future, might just be corrupt data... a bit hard to imply currently if data_type == 'Unknown': curr_data_type = 'Unknown' break @@ -227,8 +225,6 @@ def _get_column_data_type(self, data, data_frame, col_name): type_dist[curr_data_type] = type_dist.pop('Unknown') subtype_dist[curr_data_subtype] = subtype_dist.pop('Unknown') - - # @TODO: Extremely slow for large datasets, make it faster if curr_data_type != DATA_TYPES.CATEGORICAL and curr_data_subtype != DATA_SUBTYPES.DATE: all_values = data_frame[col_name] all_distinct_vals = set(all_values)
diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -35,7 +35,6 @@ def test_force_vectors(amd, to_predict): def test_adapted_model_data(amd, to_predict): amd = amd - # @TODO: Sometimes are None, not sure why: [, validation_set_accuracy, accuracy] for k in ['status', 'name', 'version', 'data_source', 'current_phase', 'updated_at', 'created_at', 'train_end_at']: assert (type(amd[k]) == str) diff --git a/tests/integration_tests/data_generators.py b/tests/integration_tests/data_generators.py --- a/tests/integration_tests/data_generators.py +++ b/tests/integration_tests/data_generators.py @@ -22,7 +22,6 @@ def generate_timeseries(length, bounds=(0,1852255420), _type='timestamp',period= def rand_str(length=random.randrange(4,120)): # Create a list of unicode characters within the range 0000-D7FF - # @TODO Copy pasted the 0xD7FF value, not 100% sure it returns all uncideo chars, maybe check that random_unicodes = [chr(random.randrange(0xD7FF)) for _ in range(0, length)] return u"".join(random_unicodes) @@ -79,7 +78,7 @@ def generate_value_cols(types, length, separator=',', ts_period=48*3600): for n in range(length): val = gen_fun() - # @TODO Maybe escpae the separator rather than replace + # @TODO: Maybe escpae the separator rather than replace them if type(val) == str: val = val.replace(separator,'_').replace('\n','_').replace('\r','_') columns[-1].append(val)
Status stays as "training" when learn fails There's some scenarios in which calling `learn` fails by throwing an error but the model status remains as `training` (based on Max's observation, though I assume it should be easy enough to spot even without replicating).
2020-02-21T07:33:04Z
[]
[]
mindsdb/mindsdb
438
mindsdb__mindsdb-438
[ "433" ]
5f0f8e8546274d6330ef9a7e89f18ed8a40c2292
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py --- a/mindsdb/__about__.py +++ b/mindsdb/__about__.py @@ -1,6 +1,6 @@ __title__ = 'MindsDB' __package_name__ = 'mindsdb' -__version__ = '1.17.1' +__version__ = '1.17.2' __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "[email protected]" __author__ = 'MindsDB Inc' diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py --- a/mindsdb/libs/controllers/predictor.py +++ b/mindsdb/libs/controllers/predictor.py @@ -728,13 +728,41 @@ def learn(self, to_predict, from_data, test_from_data=None, group_by = None, win if old_hmd[k] is not None: heavy_transaction_metadata[k] = old_hmd[k] Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log) + def test(self, when_data, accuracy_score_functions, score_using='predicted_value', predict_args=None): + """ + :param when_data: use this when you have data in either a file, a pandas data frame, or url to a file that you want to predict from + :param accuracy_score_functions: a single function or a dictionary for the form `{f'{target_name}': acc_func}` for when we have multiple targets + :param score_using: what values from the `explanation` of the target to use in the score function, defaults to the + :param predict_args: dictionary of arguments to be passed to `predict`, e.g: `predict_args={'use_gpu': True}` + + :return: a dictionary for the form `{f'{target_name}_accuracy': accuracy_func_return}`, e.g. {'rental_price_accuracy':0.99} + """ + if predict_args is None: + predict_args = {} + + predictions = self.predict(when_data=when_data, **predict_args) + + with open(os.path.join(CONFIG.MINDSDB_STORAGE_PATH, f'{self.name}_light_model_metadata.pickle'), 'rb') as fp: + lmd = pickle.load(fp) + + accuracy_dict = {} + for col in lmd['predict_columns']: + if type(accuracy_score_functions) == type({}): + acc_f = accuracy_score_functions[col] + else: + acc_f = accuracy_score_functions + + accuracy_dict[f'{col}_accuracy'] = acc_f([x[f'__observed_{col}'] for x in predictions], [x.explanation[col][score_using] for x in predictions]) + + return accuracy_dict + def predict(self, when=None, when_data=None, update_cached_model = False, use_gpu=None, unstable_parameters_dict=None, backend=None, run_confidence_variation_analysis=False): """ You have a mind trained already and you want to make a prediction :param when: use this if you have certain conditions for a single prediction - :param when_data: (optional) use this when you have data in either a file, a pandas data frame, or url to a file that you want to predict from + :param when_data: use this when you have data in either a file, a pandas data frame, or url to a file that you want to predict from :param update_cached_model: (optional, default:False) when you run predict for the first time, it loads the latest model in memory, you can force it to do this on this run by flipping it to True :param run_confidence_variation_analysis: Run a confidence variation analysis on each of the given input column, currently only works when making single predictions via `when` diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py --- a/mindsdb/libs/controllers/transaction.py +++ b/mindsdb/libs/controllers/transaction.py @@ -254,7 +254,10 @@ def _execute_predict(self): output_data = {col: [] for col in self.lmd['columns']} for column in self.input_data.columns: - output_data[column] = list(self.input_data.data_frame[column]) + if column in self.lmd['predict_columns']: + output_data[f'__observed_{column}'] = list(self.input_data.data_frame[column]) + else: + output_data[column] = list(self.input_data.data_frame[column]) for predicted_col in self.lmd['predict_columns']: output_data[predicted_col] = list(self.hmd['predictions'][predicted_col]) diff --git a/mindsdb/libs/helpers/probabilistic_validator.py b/mindsdb/libs/helpers/probabilistic_validator.py --- a/mindsdb/libs/helpers/probabilistic_validator.py +++ b/mindsdb/libs/helpers/probabilistic_validator.py @@ -20,7 +20,7 @@ def __init__(self, col_stats, col_name, input_columns): """ Chose the algorithm to use for the rest of the model As of right now we go with BernoulliNB¶ - """ + """ self.col_stats = col_stats self.col_name = col_name self.input_columns = input_columns @@ -72,20 +72,20 @@ def fit(self, real_df, predictions_arr, missing_col_arr, hmd=None): except: real_value = None - if self.buckets is not None: + if self.buckets is not None: predicted_value_b = get_value_bucket(predicted_value, self.buckets, self.col_stats, hmd) real_value_b = get_value_bucket(real_value, self.buckets, self.col_stats, hmd) X.append([0] * (len(self.buckets) + 1)) X[-1][predicted_value_b] = 1 - + else: predicted_value_b = predicted_value real_value_b = real_value_b - + X.append([]) - + Y.append(real_value_b == predicted_value_b) if n == 0: @@ -98,7 +98,7 @@ def fit(self, real_df, predictions_arr, missing_col_arr, hmd=None): feature_existance[self.input_columns.index(missing_col)] = 0 X[-1] += feature_existance - + log_types = np.seterr() np.seterr(divide='ignore') @@ -130,7 +130,7 @@ def evaluate_prediction_accuracy(self, features_existence, predicted_value): probability_true_prediction = 0 else: probability_true_prediction = self._probabilistic_model.predict_proba(np.array(X))[0][true_index] - + return probability_true_prediction @@ -143,7 +143,7 @@ def get_accuracy_stats(self): bucket_acc_counts[bucket] = [] bucket_acc_counts[bucket].append(1 if bucket == self.real_values_bucketized[i] else 0) - + for bucket in bucket_accuracy: bucket_accuracy[bucket] = sum(bucket_acc_counts[bucket])/len(bucket_acc_counts[bucket]) @@ -180,9 +180,9 @@ def get_accuracy_stats(self): 'predicted': bucket_values, 'real': bucket_values } - - return overall_accuracy, accuracy_histogram, cm + + return overall_accuracy, accuracy_histogram, cm if __name__ == "__main__": pass - # Removing test for now, as tets for the new one stand-alone would require the creation of a bunch of dataframes mimicking those inputed into mindsdb and those predicted by lightwood. \ No newline at end of file + # Removing test for now, as tets for the new one stand-alone would require the creation of a bunch of dataframes mimicking those inputed into mindsdb and those predicted by lightwood.
diff --git a/tests/ci_tests/fast_test.py b/tests/ci_tests/fast_test.py --- a/tests/ci_tests/fast_test.py +++ b/tests/ci_tests/fast_test.py @@ -3,5 +3,5 @@ # Test with a few basic options if __name__ == "__main__": - basic_test(backend='lightwood',use_gpu=torch.cuda.is_available(),ignore_columns=[], IS_CI_TEST=True) + basic_test(backend='lightwood',use_gpu=torch.cuda.is_available(), IS_CI_TEST=True) print('\n\n=============[Success]==============\n Finished running quick test !\n=============[Success]==============\n\n') diff --git a/tests/ci_tests/full_test.py b/tests/ci_tests/full_test.py --- a/tests/ci_tests/full_test.py +++ b/tests/ci_tests/full_test.py @@ -4,23 +4,14 @@ if __name__ == "__main__": - use_gpu_settings = [] + use_gpu_settings = [False] if torch.cuda.is_available(): use_gpu_settings.append(True) - use_gpu_settings.append(False) - - # Try ignoring some columns and running only the stats generator - run_extra = True - # Cycle through a few options: - for backend in ['lightwood']: #,'ludwig' + for backend in ['lightwood']: for use_gpu in use_gpu_settings: print(f'use_gpu is set to {use_gpu}, backend is set to {backend}') - if run_extra: - basic_test(backend=backend,use_gpu=use_gpu,ignore_columns=['days_on_market','number_of_bathrooms'], IS_CI_TEST=True, run_extra=True) - run_extra = False - else: - basic_test(backend=backend,use_gpu=use_gpu,ignore_columns=[], IS_CI_TEST=True) + basic_test(backend=backend,use_gpu=use_gpu, IS_CI_TEST=True, run_extra=True) print('\n\n=============[Success]==============\n Finished running full test suite !\n=============[Success]==============\n\n') diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py --- a/tests/ci_tests/tests.py +++ b/tests/ci_tests/tests.py @@ -1,7 +1,7 @@ import mindsdb import sys import os - +from sklearn.metrics import r2_score def test_data_analysis(amd, to_predict): data_analysis = amd['data_analysis'] @@ -52,14 +52,20 @@ def test_adapted_model_data(amd, to_predict): #test_force_vectors(amd, to_predict) -def basic_test(backend='lightwood',use_gpu=True,ignore_columns=[], run_extra=False, IS_CI_TEST=False): +def basic_test(backend='lightwood',use_gpu=True, run_extra=False, IS_CI_TEST=False): mindsdb.CONFIG.IS_CI_TEST = IS_CI_TEST if run_extra: for py_file in [x for x in os.listdir('../functional_testing') if '.py' in x]: # Skip data source tests since installing dependencies is annoying # @TODO: Figure out a way to make travis install required dependencies on osx - if 'all_data_sources' in py_file: + + ctn = False + for name in ['all_data_sources', 'custom_model']: + if name in py_file: + ctn = True + if ctn: continue + code = os.system(f'python3 ../functional_testing/{py_file}') if code != 0: raise Exception(f'Test failed with status code: {code} !') @@ -67,7 +73,7 @@ def basic_test(backend='lightwood',use_gpu=True,ignore_columns=[], run_extra=Fal # Create & Learn to_predict = 'rental_price' mdb = mindsdb.Predictor(name='home_rentals_price') - mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=30,use_gpu=use_gpu) + mdb.learn(to_predict=to_predict,from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",backend=backend, stop_training_in_x_seconds=120,use_gpu=use_gpu) # Reload & Predict model_name = 'home_rentals_price' @@ -78,6 +84,9 @@ def basic_test(backend='lightwood',use_gpu=True,ignore_columns=[], run_extra=Fal mdb = mindsdb.Predictor(name=model_name) # Try predicting from a file and from a dictionary prediction = mdb.predict(when_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", use_gpu=use_gpu) + + mdb.test(when_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv",accuracy_score_functions=r2_score,predict_args={'use_gpu': use_gpu}) + prediction = mdb.predict(when={'sqft':300}, use_gpu=use_gpu) # Test all different forms of output diff --git a/tests/functional_testing/custom_model.py b/tests/functional_testing/custom_model.py --- a/tests/functional_testing/custom_model.py +++ b/tests/functional_testing/custom_model.py @@ -3,11 +3,11 @@ import numpy as np from sklearn import tree from sklearn import preprocessing - +from sklearn.linear_model import LinearRegression class CustomDTModel(): def __init__(self): - self.clf = tree.DecisionTreeClassifier() + self.clf = LinearRegression() le = preprocessing.LabelEncoder() def set_transaction(self, transaction): @@ -62,4 +62,3 @@ def predict(self, mode='predict', ignore_columns=[]): predictor.learn(to_predict='rental_price',from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", backend=dt_model) predictions = predictor.predict(when_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv", backend=dt_model) -print(predictions[25])
predictor.test method implement a predictor.test method as such: predictor.test(using_data = file or data_frame, accuracy_score_function = pointer to function)
Few things: * If `accuracy_score_function` is not specified it should default to whatever mindsdb would use as the default. * An additional argument, `score_using` should be used, this should specify what value is passed to the accuracy function, e.g. for numbers it could be `confidence_range` or `predicted_value`. This argument should default to `predicted_value`. * Also, I feel like this requires all the options that can be passed to the `.predict` method, i.e: `use_gpu, unstable_parameters_dict, backend, run_confidence_variation_analysis`. Though maybe one way to keep the interface simpler is to just pass these parameters into a `predict_args` argument (type dictionary), since in most cases they won't be needed. * Finally `using_data` should be `when_data` in order to be consistent with the `.predict` method. Granted, I think `using_data` or `data` is a better name and I'd be tempted to have that as the data argument for `predict`, `learn` and `analyze_data` and `test`. But that would be a pretty major interface change and something to be done only when we release version `2.0.0`.
2020-05-07T21:18:07Z
[]
[]