blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3e5ac9657c6a68133633a1e6cf922e32dec52c2 | 806709d56129cf9c5422b5e58be7072d21887b7d | /HW_clock/clockapp/apps.py | 688d37335d37894cee69aa8591f52c6f0c302190 | [] | no_license | merry-hyelyn/LikeLion | 6334f5cc909a090a5e264aedcd9d790436ce59cf | cbd1654abd0bfbf872c2c670c604679467f5ef8e | refs/heads/master | 2020-07-11T07:14:31.308439 | 2019-09-13T14:26:47 | 2019-09-13T14:26:47 | 204,473,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from django.apps import AppConfig
class ClockappConfig(AppConfig):
name = 'clockapp'
| [
"[email protected]"
] | |
ed9a9a16fa606dc3a0b92e7d93dbca7f3237abe1 | 7101871e7a82d202483ada3053fec155ce7824a6 | /test/functional/sapling_wallet_send.py | 39c6d0a107af083c9d58aa278ac3ef4d18f9ad78 | [
"MIT"
] | permissive | trumpcoinsupport/TrumpCoin | 633a9992e46cab00774d01e569f4611b7f6b4b54 | 098c62ea249a63ca1cc31d5f37c6209ccdf50e2a | refs/heads/master | 2023-01-11T20:22:03.469608 | 2021-12-31T10:04:39 | 2021-12-31T10:04:39 | 194,952,065 | 15 | 14 | MIT | 2023-01-08T02:49:09 | 2019-07-03T00:24:45 | C++ | UTF-8 | Python | false | false | 3,831 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Copyright (c) 2020 The TrumpCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from decimal import Decimal
from test_framework.test_framework import TrumpCoinTestFramework
from test_framework.util import (
assert_equal,
)
class SaplingWalletSend(TrumpCoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
saplingUpgrade = ['-nuparams=v5_shield:201']
self.extra_args = [saplingUpgrade, saplingUpgrade, saplingUpgrade]
def run_test(self):
self.log.info("Mining...")
self.nodes[0].generate(2)
self.sync_all()
self.nodes[2].generate(200)
self.sync_all()
assert_equal(self.nodes[1].getblockcount(), 202)
taddr1 = self.nodes[1].getnewaddress()
saplingAddr1 = self.nodes[1].getnewshieldaddress()
# Verify addresses
assert(saplingAddr1 in self.nodes[1].listshieldaddresses())
assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('0'))
assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))
# Test subtract fee from recipient
self.log.info("Checking sendto[shield]address with subtract-fee-from-amt")
node_0_bal = self.nodes[0].getbalance()
node_1_bal = self.nodes[1].getbalance()
txid = self.nodes[0].sendtoaddress(saplingAddr1, 10, "", "", True)
node_0_bal -= Decimal('10')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
self.sync_mempools()
self.nodes[2].generate(1)
self.sync_all()
feeTx = self.nodes[0].gettransaction(txid)["fee"] # fee < 0
saplingAddr1_bal = (Decimal('10') + feeTx)
node_1_bal += saplingAddr1_bal
assert_equal(self.nodes[1].getbalance(), node_1_bal)
self.log.info("Checking shieldsendmany with subtract-fee-from-amt")
node_2_bal = self.nodes[2].getbalance()
recipients1 = [{"address": saplingAddr1, "amount": Decimal('10')},
{"address": self.nodes[0].getnewshieldaddress(), "amount": Decimal('5')}]
subtractfeefrom = [saplingAddr1]
txid = self.nodes[2].shieldsendmany("from_transparent", recipients1, 1, 0, subtractfeefrom)
node_2_bal -= Decimal('15')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all()
feeTx = self.nodes[2].gettransaction(txid)["fee"] # fee < 0
node_1_bal += (Decimal('10') + feeTx)
saplingAddr1_bal += (Decimal('10') + feeTx)
assert_equal(self.nodes[1].getbalance(), node_1_bal)
node_0_bal += Decimal('5')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
self.log.info("Checking sendmany to shield with subtract-fee-from-amt")
node_2_bal = self.nodes[2].getbalance()
txid = self.nodes[2].sendmany('', {saplingAddr1: 10, taddr1: 10},
1, "", False, [saplingAddr1, taddr1])
node_2_bal -= Decimal('20')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all()
feeTx = self.nodes[2].gettransaction(txid)["fee"] # fee < 0
node_1_bal += (Decimal('20') + feeTx)
assert_equal(self.nodes[1].getbalance(), node_1_bal)
taddr1_bal = Decimal('10') + feeTx/2
saplingAddr1_bal += Decimal('10') + feeTx / 2
assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), taddr1_bal)
assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), saplingAddr1_bal)
if __name__ == '__main__':
SaplingWalletSend().main()
| [
"[email protected]"
] | |
d96f1e56f4b8aa0ac94be4330d3f7524cc14c3a7 | ffd19240effa4f50b8469432d6ad2078e6b0db7d | /app/models.py | 2e9fa0ba6b2c20f730a447db3c7f950342cbe5c6 | [] | no_license | Jackson-coder-arch/Studio-session-booker | 98e26ca1ef7953b81562884b4306becde097a47c | de20432fa3bb0660c7499efd5dd0917f0218670b | refs/heads/features | 2023-03-21T11:42:48.043138 | 2021-03-12T08:49:34 | 2021-03-12T08:49:34 | 345,683,743 | 0 | 0 | null | 2021-03-12T08:49:35 | 2021-03-08T14:27:54 | Python | UTF-8 | Python | false | false | 1,749 | py | from app import db
from flask_login import UserMixin
from datetime import datetime
from werkzeug.security import generate_password_hash,check_password_hash
from . import login_manager
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index =True)
email = db.Column(db.String(255),unique = True,index =True)
pass_secure = db.Column(db.String(255))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
bookings = db.relationship('Booking',backref = 'user', lazy = 'dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def save_user(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'User {self.username} '
class Booking(db.Model):
__tablename__ = 'bookings'
id = db.Column(db.Integer,primary_key = True)
# email = db.Column(db.String(255),unique = True,index =True)
title = db.Column(db.String(255))
day = db.Column(db.String(255))
session = db.Column(db.String(255))
category = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
def save_booking(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'Booking {self.day} '
| [
"[email protected]"
] | |
6ca7eee9d0ea8b564e470641f42e79dd6d3c8de4 | 35d3bd909cc232b51496b8b07971386305bbc769 | /sitemessage/settings.py | 649d4678202125f4fcafd413a1db551e4a45344d | [] | no_license | shtalinberg/django-sitemessage | 772810beae344529867df2b58e873a04dc6b5c93 | dc1a5312316c5d0269380c1f80752437c7a1d6eb | refs/heads/master | 2021-01-22T05:57:43.991672 | 2017-04-15T04:33:30 | 2017-04-15T04:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from django.conf import settings
# Module name to search sitemessage preferences in.
APP_MODULE_NAME = getattr(settings, 'SITEMESSAGE_APP_MODULE_NAME', 'sitemessages')
# Whether to register builtin message types.
INIT_BUILTIN_MESSAGE_TYPES = getattr(settings, 'SITEMESSAGE_INIT_BUILTIN_MESSAGE_TYPES', True)
# Priority for messages sent by Django Email backend (sitemessage.backends.EmailBackend).
EMAIL_BACKEND_MESSAGES_PRIORITY = getattr(settings, 'SITEMESSAGE_EMAIL_BACKEND_MESSAGES_PRIORITY', None)
# Message type alias for messages sent `schedule_email` shortcut.
DEFAULT_SHORTCUT_EMAIL_MESSAGES_TYPE = getattr(settings, 'SITEMESSAGE_DEFAULT_SHORTCUT_EMAIL_MESSAGES_TYPE', 'smtp')
# Site URL to use in messages.
SITE_URL = getattr(settings, 'SITEMESSAGE_SITE_URL', None)
| [
"[email protected]"
] | |
6a9727a84a58a3c17a257bafd64c3423e263ac0a | 9a93a4d9e8d7424ccc3947ed8486083b815c5276 | /websockets/exceptions.py | 1b758c648ad74be77879f58c0bf6c315f1664f94 | [
"BSD-3-Clause"
] | permissive | MariaElysse/websockets | de40f7dea8fa26c5f29a0cc2bf41d78c1acd2ac8 | 4216b35384c177981c4d18d763248c712b8e21d4 | refs/heads/master | 2020-03-26T19:26:29.171235 | 2018-08-11T10:16:12 | 2018-08-11T10:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,462 | py | __all__ = [
'AbortHandshake', 'ConnectionClosed', 'DuplicateParameter',
'InvalidHandshake', 'InvalidHeader', 'InvalidHeaderFormat',
'InvalidHeaderValue', 'InvalidMessage', 'InvalidOrigin',
'InvalidParameterName', 'InvalidParameterValue', 'InvalidState',
'InvalidStatusCode', 'InvalidUpgrade', 'InvalidURI', 'NegotiationError',
'PayloadTooBig', 'WebSocketProtocolError',
]
class InvalidHandshake(Exception):
"""
Exception raised when a handshake request or response is invalid.
"""
class AbortHandshake(InvalidHandshake):
"""
Exception raised to abort a handshake and return a HTTP response.
"""
def __init__(self, status, headers, body=b''):
self.status = status
self.headers = headers
self.body = body
message = "HTTP {}, {} headers, {} bytes".format(
status, len(headers), len(body))
super().__init__(message)
class InvalidMessage(InvalidHandshake):
"""
Exception raised when the HTTP message in a handshake request is malformed.
"""
class InvalidHeader(InvalidHandshake):
"""
Exception raised when a HTTP header doesn't have a valid format or value.
"""
def __init__(self, name, value):
if value:
message = "Invalid {} header: {}".format(name, value)
else:
message = "Missing or empty {} header".format(name)
super().__init__(message)
class InvalidHeaderFormat(InvalidHeader):
"""
Exception raised when a Sec-WebSocket-* HTTP header cannot be parsed.
"""
def __init__(self, name, error, string, pos):
error = "{} at {} in {}".format(error, pos, string)
super().__init__(name, error)
class InvalidHeaderValue(InvalidHeader):
"""
Exception raised when a Sec-WebSocket-* HTTP header has a wrong value.
"""
class InvalidUpgrade(InvalidHeader):
"""
Exception raised when a Upgrade or Connection header isn't correct.
"""
class InvalidOrigin(InvalidHeader):
"""
Exception raised when the Origin header in a request isn't allowed.
"""
def __init__(self, origin):
super().__init__('Origin', origin)
class InvalidStatusCode(InvalidHandshake):
"""
Exception raised when a handshake response status code is invalid.
Provides the integer status code in its ``status_code`` attribute.
"""
def __init__(self, status_code):
self.status_code = status_code
message = "Status code not 101: {}".format(status_code)
super().__init__(message)
class NegotiationError(InvalidHandshake):
"""
Exception raised when negotiating an extension fails.
"""
class InvalidParameterName(NegotiationError):
"""
Exception raised when a parameter name in an extension header is invalid.
"""
def __init__(self, name):
self.name = name
message = "Invalid parameter name: {}".format(name)
super().__init__(message)
class InvalidParameterValue(NegotiationError):
"""
Exception raised when a parameter value in an extension header is invalid.
"""
def __init__(self, name, value):
self.name = name
self.value = value
message = "Invalid value for parameter {}: {}".format(name, value)
super().__init__(message)
class DuplicateParameter(NegotiationError):
"""
Exception raised when a parameter name is repeated in an extension header.
"""
def __init__(self, name):
self.name = name
message = "Duplicate parameter: {}".format(name)
super().__init__(message)
class InvalidState(Exception):
"""
Exception raised when an operation is forbidden in the current state.
"""
CLOSE_CODES = {
1000: "OK",
1001: "going away",
1002: "protocol error",
1003: "unsupported type",
# 1004 is reserved
1005: "no status code [internal]",
1006: "connection closed abnormally [internal]",
1007: "invalid data",
1008: "policy violation",
1009: "message too big",
1010: "extension required",
1011: "unexpected error",
1015: "TLS failure [internal]",
}
def format_close(code, reason):
"""
Display a human-readable version of the close code and reason.
"""
if 3000 <= code < 4000:
explanation = "registered"
elif 4000 <= code < 5000:
explanation = "private use"
else:
explanation = CLOSE_CODES.get(code, "unknown")
result = "code = {} ({}), ".format(code, explanation)
if reason:
result += "reason = {}".format(reason)
else:
result += "no reason"
return result
class ConnectionClosed(InvalidState):
"""
Exception raised when trying to read or write on a closed connection.
Provides the connection close code and reason in its ``code`` and
``reason`` attributes respectively.
"""
def __init__(self, code, reason):
self.code = code
self.reason = reason
message = "WebSocket connection is closed: "
message += format_close(code, reason)
super().__init__(message)
class InvalidURI(Exception):
"""
Exception raised when an URI isn't a valid websocket URI.
"""
class PayloadTooBig(Exception):
"""
Exception raised when a frame's payload exceeds the maximum size.
"""
class WebSocketProtocolError(Exception):
"""
Internal exception raised when the remote side breaks the protocol.
"""
| [
"[email protected]"
] | |
bdf28fd0c02c0410299165e2313553ae48a9f9ea | b92417413ec5b05ca25695de55934ce7072a0f0a | /test/test_v1_role.py | 8276850e9eb6f7e6038ba217a2d6777c01ea27fd | [
"Apache-2.0"
] | permissive | detiber/lib_openshift | be1f0f1b3eec62c9bbf50a3fcea61303a870c112 | efea21ce6f67e3d48885c03ae22978c576c0b87d | refs/heads/master | 2021-01-18T04:12:00.820052 | 2016-10-04T03:20:43 | 2016-10-04T03:20:43 | 63,102,761 | 0 | 0 | null | 2016-07-11T21:15:36 | 2016-07-11T21:15:36 | null | UTF-8 | Python | false | false | 1,212 | py | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_role import V1Role
class TestV1Role(unittest.TestCase):
""" V1Role unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Role(self):
"""
Test V1Role
"""
model = lib_openshift.models.v1_role.V1Role()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5dacf336171c8318f8a1a9bd3b0984427bad0db2 | 6285a6f1357a8c4104edbb1f9ece10a27d4be344 | /lib/modeling/backbone/resnet.py | f5c5600b4d372bc93756faa06bb3c08ae0137669 | [] | no_license | cumtchenchang/descriptor-space | f9af90a12c484f519a5cbd54985fe112e063332e | 0e7d0b15785039f7d8ecf3bc96db746b1ccf3465 | refs/heads/master | 2020-07-17T13:57:11.027855 | 2019-08-20T13:54:01 | 2019-08-20T13:54:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,919 | py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
padding = dilation
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, cfg=None):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
end_layer = int(cfg.MODEL.BACKBONE.STOP_DOWNSAMPLING[1:]) - 1
if end_layer <= 1:
raise ValueError("The stride and dilation before layer 2 should not be revised")
start_layer = 5
if cfg.MODEL.BACKBONE.START_DOWNSAMPLING:
start_layer = int(cfg.MODEL.BACKBONE.START_DOWNSAMPLING[1:]) - 1
strides = [1, 2, 2, 2]
for i in range(end_layer-1, start_layer-1):
strides[i] = 1
receptive = np.array([1, 2, 4, 8])
dilations = receptive // np.cumprod(strides)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class SingleResNet(nn.Module):
def __init__(self, cfg):
super(SingleResNet, self).__init__()
self.cfg = cfg
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
ind = conv_body.rfind('-')
resnet = _RESNETS[conv_body[:ind]](pretrained=True, cfg=cfg)
self.stage = int(conv_body[ind+2:])
resnet_module_list = [
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4
]
self.model = nn.Sequential(*resnet_module_list[:3+self.stage])
def forward(self, x):
return self.model(x)
class MultiResNet(nn.Module):
def __init__(self, cfg):
super(MultiResNet, self).__init__()
self.cfg = cfg
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
ind = conv_body.rfind('-')
resnet = _RESNETS[conv_body[:ind]](pretrained=True, cfg=cfg)
self.stage = int(conv_body[ind+2:])
resnet_module_list = [
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4
]
self.model = nn.Sequential(*resnet_module_list[:3+self.stage])
def forward(self, x):
feats = {}
x = self.model[:4](x)
feats['C1'] = x
for i in range(1, self.stage):
x = self.model[i + 3](x)
feats['C' + str(i+1)] = x
return feats
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet18"], model_dir=kwargs["cfg"].MODEL_DIR))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet34"], model_dir=kwargs["cfg"].MODEL_DIR))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet50"], model_dir=kwargs["cfg"].MODEL_DIR))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet101"], model_dir=kwargs["cfg"].MODEL_DIR))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls["resnet152"], model_dir=kwargs["cfg"].MODEL_DIR))
return model
_RESNETS = {
'R-18': resnet18,
'Multi-R-18': resnet18,
'R-34': resnet34,
'R-50': resnet50,
'R-101': resnet101,
'R-152': resnet152,
}
| [
"[email protected]"
] | |
d9b8eb05b29632fdf14022991b75fcc4898142aa | 76c8a2593316a74078e5ebe3c280d393b058ff67 | /vai/commands/JoinWithNextLineCommand.py | c009849009712410c2f67e842d8f74c84019c1ef | [] | no_license | gavd89/vai | b7f746c3ba31397e8d85f477af9b9b71d01795fb | afa3a31b74ee81f9be8ab2c06cd8bdaebae1baad | refs/heads/master | 2021-01-16T22:04:05.131998 | 2014-10-31T22:35:37 | 2014-10-31T22:35:37 | 26,130,434 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | from .BufferCommand import BufferCommand
from .CommandResult import CommandResult
from ..models.TextDocument import LineMeta
class JoinWithNextLineCommand(BufferCommand):
def execute(self):
cursor = self._cursor
document = self._document
pos = cursor.pos
if pos[0] == document.numLines():
return CommandResult(success=False, info=None)
self.saveCursorPos()
line_meta = document.lineMeta(pos[0])
self.saveLineMemento(pos[0], BufferCommand.MEMENTO_REPLACE)
self.saveLineMemento(pos[0]+1, BufferCommand.MEMENTO_INSERT)
document.joinWithNextLine(pos[0])
if line_meta.get(LineMeta.Change) == None:
document.updateLineMeta(pos[0], {LineMeta.Change: "modified"})
return CommandResult(success=True, info=None)
| [
"[email protected]"
] | |
b78e36b1360b1dd9d552187653a755c3bb26c881 | 35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6 | /55_JumpGame/55_JumpGame_3.py | 01b5f0afae09de958c5980a9bd943ed3ceab4200 | [] | no_license | H-Cong/LeetCode | 0a2084a4845b5d7fac67c89bd72a2adf49f90c3d | d00993a88c6b34fcd79d0a6580fde5c523a2741d | refs/heads/master | 2023-03-19T15:22:00.971461 | 2021-03-11T00:33:00 | 2021-03-11T00:33:00 | 303,265,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | class Solution:
def canJump(self, nums: List[int]) -> bool:
'''
BackTracking
'''
return self.canJumpFromPosition(0, nums)
def canJumpFromPosition(self, position, nums):
if position == len(nums) - 1: return True
furthestJump = min(position + nums[position], len(nums) - 1)
for nextPosition in range(position + 1, furthestJump + 1):
if self.canJumpFromPosition(nextPosition, nums):
return True
return False
# TC: O(2^n)
# There are 2^n (upper bound) ways of jumping from the first position to the last,
# where nn is the length of array nums.
# SC: O(n)
# Recursion requires additional memory for the stack frames.
| [
"[email protected]"
] | |
ef301480c09e8bf0f702faabb05d320f96d1726c | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/cross_validation/movielens_1M/gaussian_gaussian_wishart.py | 77b129a3b7aae4cbfca58736af054f4dfb1902a5 | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 1,730 | py | '''
Run nested cross-validation experiment on the MovieLens 1M dataset, with
the All Gaussian model (multivariate posterior) and Wishart prior.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/" # "/home/tab43/Documents/Projects/libraries/" #
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_gaussian_wishart import BMF_Gaussian_Gaussian_Wishart
from BMF_Priors.code.cross_validation.nested_matrix_cross_validation import MatrixNestedCrossValidation
from BMF_Priors.data.movielens.load_data import load_processed_movielens_1M
''' Settings BMF model. '''
method = BMF_Gaussian_Gaussian_Wishart
R, M = load_processed_movielens_1M()
hyperparameters = { 'alpha':1., 'beta':1., 'mu0':0., 'beta0':1., 'W0':1. }
train_config = {
'iterations' : 120,
'init' : 'random',
}
predict_config = {
'burn_in' : 100,
'thinning' : 1,
}
''' Settings nested cross-validation. '''
K_range = [12,13,14]
no_folds = 5
no_threads = 5
parallel = False
folder_results = './results/gaussian_gaussian_wishart/'
output_file = folder_results+'results.txt'
files_nested_performances = [folder_results+'fold_%s.txt'%(fold+1) for fold in range(no_folds)]
''' Construct the parameter search. '''
parameter_search = [{'K':K, 'hyperparameters':hyperparameters} for K in K_range]
''' Run the cross-validation framework. '''
nested_crossval = MatrixNestedCrossValidation(
method=method,
R=R,
M=M,
K=no_folds,
P=no_threads,
parameter_search=parameter_search,
train_config=train_config,
predict_config=predict_config,
file_performance=output_file,
files_nested_performances=files_nested_performances,
)
nested_crossval.run(parallel=parallel)
| [
"[email protected]"
] | |
d0a7aeff905f45c9098ea9c161be390f6f6400d6 | 00b24ff5ec169210b1b7cce53b621cbc0ee0fe40 | /migrations/versions/e812a221262e_initialized_database.py | 5944c31f6d72464d38c440607572aeca5fe9a83d | [] | no_license | carter3689/fakebook-march | 1242c052fa51826f56aeb187cfdf41e0464ca4f8 | 41c2c388e0f19d849eef4572a13fcdffb41d3de4 | refs/heads/main | 2023-05-04T02:59:00.245789 | 2021-05-18T16:28:38 | 2021-05-18T16:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | """initialized database
Revision ID: e812a221262e
Revises:
Create Date: 2021-04-26 11:24:10.910838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e812a221262e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=50), nullable=True),
sa.Column('image', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('post')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
d2bda2941a3c280e45de65afc578c06a0a1341f7 | 95f9c734c4bf5de8e5d0adff9ac2cf0228df75ac | /django-pro/opweb/opweb/wsgi.py | afbe2f475b239ff7eb311f1c9c1e5d1dd89b1289 | [] | no_license | holen/Python | 7a996b13ff2224084397223879c380169d47ff8c | 506fff291d6e9c6f80c30a51cc3b77e9dd048468 | refs/heads/master | 2022-12-12T22:12:51.561716 | 2019-10-16T03:08:00 | 2019-10-16T03:08:00 | 14,278,665 | 1 | 0 | null | 2022-12-08T00:51:26 | 2013-11-10T15:29:59 | Python | UTF-8 | Python | false | false | 385 | py | """
WSGI config for opweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opweb.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"[email protected]"
] | |
b0e721c955feb492b1b48896ef1eeac252b62e25 | ee303308d85c28467a7dfe5300951d49a3866fb3 | /src/uvm/base/uvm_report_server.py | 07b247efdc4497b985ad27098590a5554ee788dc | [
"Apache-2.0"
] | permissive | tpoikela/uvm-python | 3a66a43100a2903f91e0bb73b84c07c1003f7763 | fc5f955701b2b56c1fddac195c70cb3ebb9139fe | refs/heads/master | 2023-05-02T05:08:00.792132 | 2023-04-24T16:07:14 | 2023-04-24T16:07:14 | 232,838,902 | 199 | 43 | Apache-2.0 | 2023-04-24T16:10:44 | 2020-01-09T15:22:26 | Python | UTF-8 | Python | false | false | 22,445 | py |
# from cocotb.utils import get_sim_time
from .uvm_object import UVMObject
from .uvm_object_globals import (UVM_BIN, UVM_COUNT, UVM_DEC, UVM_DISPLAY, UVM_ERROR, UVM_EXIT,
UVM_FATAL, UVM_INFO, UVM_LOG, UVM_LOW, UVM_MEDIUM, UVM_NONE,
UVM_NO_ACTION, UVM_RM_RECORD, UVM_SEVERITY_LEVELS, UVM_STOP,
UVM_WARNING)
from .uvm_globals import uvm_report_info, uvm_sim_time
from .uvm_pool import UVMPool
from .uvm_global_vars import uvm_default_printer
from .sv import sv
from ..macros.uvm_message_defines import uvm_info
from .uvm_tr_database import UVMTrDatabase, UVMTextTrDatabase
from typing import List, Any, Callable
def ename(sever) -> str:
"""
Converts given severity level into string.
Args:
sever (int): Severity level.
Returns:
str: Severity as string.
Raises:
Exception
"""
if isinstance(sever, str):
raise Exception("str was given to ename(). Expected int. Got: " + sever)
if sever == UVM_INFO:
return "UVM_INFO"
if sever == UVM_ERROR:
return "UVM_ERROR"
if sever == UVM_FATAL:
return "UVM_FATAL"
if sever == UVM_WARNING:
return "UVM_WARNING"
return "UNKNOWN_SEVERITY: {}".format(sever)
class UVMReportServer(UVMObject):
"""
UVMReportServer is a global server that processes all of the reports
generated by a uvm_report_handler.
The `UVMReportServer` is an abstract class which declares many of its methods
as ~pure virtual~. The UVM uses the <uvm_default_report_server> class
as its default report server implementation.
"""
def __init__(self, name="base"):
UVMObject.__init__(self, name)
self.m_quit_count = 0
self.m_max_quit_count = 0
self.max_quit_overridable = True
self.m_severity_count = UVMPool()
self.m_id_count = UVMPool()
self.enable_report_id_count_summary = True
self.record_all_messages = False
self.show_verbosity = False
self.show_terminator = False
self.m_message_db: UVMTrDatabase = UVMTextTrDatabase() # uvm_tr_database
self.m_streams = {}
self.reset_quit_count()
self.reset_severity_counts()
self.set_max_quit_count(0)
self.print_on_closed_file = True
self.logger = print # By default, use print to emit the messages
def get_type_name(self) -> str:
return "uvm_report_server"
@classmethod
def set_server(cls, server):
cs = get_cs()
#server.copy(cs.get_report_server())
cs.set_report_server(server)
@classmethod
def get_server(cls) -> 'UVMReportServer':
cs = get_cs()
return cs.get_report_server()
def set_logger(self, logger: Callable):
"""
Sets the logger function used to print the messages. Default is python
built-in print.
logger (func): Logging function to use.
"""
self.logger = logger
# Function: print
#
# The uvm_report_server implements the `UVMObject.do_print()` such that
# ~print~ method provides UVM printer formatted output
# of the current configuration. A snippet of example output is shown here::
#
# uvm_report_server uvm_report_server - @13
# quit_count int 32 'd0
# max_quit_count int 32 'd5
# max_quit_overridable bit 1 'b1
# severity_count severity counts 4 -
# [UVM_INFO] integral 32 'd4
# [UVM_WARNING] integral 32 'd2
# [UVM_ERROR] integral 32 'd50
# [UVM_FATAL] integral 32 'd10
# id_count id counts 4 -
# [ID1] integral 32 'd1
# [ID2] integral 32 'd2
# [RNTST] integral 32 'd1
# enable_report_id_count_summary bit 1 'b1
# record_all_messages bit 1 `b0
# show_verbosity bit 1 `b0
# show_terminator bit 1 `b0
def do_print(self, printer):
"""
Print to show report server state
Args:
printer (UVMPrinter):
"""
l_severity_count_index = 0
l_id_count_index = ""
printer.print_int("quit_count", self.m_quit_count, sv.bits(self.m_quit_count), UVM_DEC,
".", "int")
printer.print_int("max_quit_count", self.m_max_quit_count,
sv.bits(self.m_max_quit_count), UVM_DEC, ".", "int")
printer.print_int("max_quit_overridable", self.max_quit_overridable,
sv.bits(self.max_quit_overridable), UVM_BIN, ".", "bit")
if self.m_severity_count.has_first():
l_severity_count_index = self.m_severity_count.first()
sev_count = self.m_severity_count.num()
printer.print_array_header("severity_count",sev_count,"severity counts")
ok = True
while ok:
printer.print_int("[{}]".format(ename(l_severity_count_index)),
self.m_severity_count[l_severity_count_index], 32, UVM_DEC)
ok = self.m_severity_count.has_next()
if ok:
l_severity_count_index = self.m_severity_count.next()
printer.print_array_footer()
if self.m_id_count.has_first():
l_id_count_index = self.m_id_count.first()
printer.print_array_header("id_count",self.m_id_count.num(),"id counts")
ok = True
while ok:
printer.print_int("[{}]".format(l_id_count_index),
self.m_id_count[l_id_count_index], 32, UVM_DEC)
ok = self.m_id_count.has_next()
if ok:
l_id_count_index = self.m_id_count.next()
printer.print_array_footer()
printer.print_int("enable_report_id_count_summary", self.enable_report_id_count_summary,
sv.bits(self.enable_report_id_count_summary), UVM_BIN, ".", "bit")
printer.print_int("record_all_messages", self.record_all_messages,
sv.bits(self.record_all_messages), UVM_BIN, ".", "bit")
printer.print_int("show_verbosity", self.show_verbosity,
sv.bits(self.show_verbosity), UVM_BIN, ".", "bit")
printer.print_int("show_terminator", self.show_terminator,
sv.bits(self.show_terminator), UVM_BIN, ".", "bit")
#----------------------------------------------------------------------------
# Group: Quit Count
#----------------------------------------------------------------------------
def get_max_quit_count(self):
"""
Get the maximum number of COUNT actions that can be tolerated
before a UVM_EXIT action is taken. The default is 0, which specifies
no maximum.
Returns:
int: Max quit count allowed for the server.
"""
return self.m_max_quit_count
# Function: set_max_quit_count
#
# Get or set the maximum number of COUNT actions that can be tolerated
# before a UVM_EXIT action is taken. The default is 0, which specifies
# no maximum.
def set_max_quit_count(self, count, overridable=True):
if self.max_quit_overridable is False:
uvm_report_info("NOMAXQUITOVR",
"The max quit count setting of {} is not overridable to {} due to a previous setting."
.format(self.m_max_quit_count, count), UVM_NONE)
return
self.max_quit_overridable = overridable
if count < 0:
self.m_max_quit_count = 0
else:
self.m_max_quit_count = count
def get_quit_count(self):
"""
Function: get_quit_count
Returns:
int: Quit count set for this report server.
"""
return self.m_quit_count
def set_quit_count(self, quit_count):
"""
Args:
quit_count (int): Desired quit count for this server.
"""
if quit_count < 0:
self.m_quit_count = 0
else:
self.m_quit_count = quit_count
def incr_quit_count(self):
"""
Increment quit count by one.
"""
self.m_quit_count += 1
def reset_quit_count(self):
"""
Set, get, increment, or reset to 0 the quit count, i.e., the number of
`UVM_COUNT` actions issued.
"""
self.m_quit_count = 0
def is_quit_count_reached(self):
"""
If is_quit_count_reached returns 1, then the quit counter has reached
the maximum.
Returns:
bool: True is maximum quit count reached, False otherwise.
"""
return self.m_quit_count >= self.m_max_quit_count
#----------------------------------------------------------------------------
# Group: Severity Count
#----------------------------------------------------------------------------
def get_severity_count(self, severity):
"""
Returns number of messages reported for given severity.
Args:
severity (int): Severity level
Returns:
int: Number of messages reported for given severity.
"""
if self.m_severity_count.exists(severity):
return self.m_severity_count.get(severity)
return 0
# Function: set_severity_count
def set_severity_count(self, severity, count):
val = count
if count < 0:
val = 0
self.m_severity_count.add(severity, val)
# Function: incr_severity_count
def incr_severity_count(self, severity):
if self.m_severity_count.exists(severity):
new_count = self.m_severity_count.get(severity) + 1
self.m_severity_count.add(severity, new_count)
else:
self.m_severity_count.add(severity, 1)
def reset_severity_counts(self):
"""
Function: reset_severity_counts
Set, get, or increment the counter for the given severity, or reset
all severity counters to 0.
"""
for s in UVM_SEVERITY_LEVELS:
self.m_severity_count.add(s, 0)
#----------------------------------------------------------------------------
# Group: id Count
#----------------------------------------------------------------------------
# Function: get_id_count
def get_id_count(self, id):
if self.m_id_count.exists(id):
return self.m_id_count.get(id)
return 0
# Function: set_id_count
def set_id_count(self, id, count):
val = count
if count < 0:
val = 0
self.m_id_count.add(id, val)
# Function: incr_id_count
#
# Set, get, or increment the counter for reports with the given id.
def incr_id_count(self, id):
if self.m_id_count.exists(id):
val = self.m_id_count.get(id)
self.m_id_count.add(id, val + 1)
else:
self.m_id_count.add(id, 1)
#----------------------------------------------------------------------------
# Group: message recording
#
# The ~uvm_default_report_server~ will record messages into the message
# database, using one transaction per message, and one stream per report
# object/handler pair.
#
#----------------------------------------------------------------------------
def set_message_database(self, database: UVMTrDatabase):
"""
Function: set_message_database
sets the `UVMTrDatabase` used for recording messages
Args:
database (UVMTrDatabase):
"""
self.m_message_db = database
def get_message_database(self) -> UVMTrDatabase:
"""
Function: get_message_database
returns the `uvm_tr_database` used for recording messages
Returns:
UVMTrDatabase: Message database.
"""
return self.m_message_db
def get_severity_set(self, q: List[Any]) -> None:
while self.m_severity_count.has_next():
q.append(self.m_severity_count.next())
def get_id_set(self, q: List[Any]) -> None:
while self.m_id_count.has_next():
q.append(self.m_id_count.next())
def f_display(self, file, _str) -> None:
"""
This method sends string severity to the command line if file is 0 and to
the file(s) specified by file if it is not 0.
Args:
file:
_str:
"""
if file == 0:
self.logger(_str)
# print(_str)
else:
if not file.closed:
file.write(_str + "\n")
else:
if self.print_on_closed_file:
self.logger('UVM_WARNING. File already closed for msg ' + _str)
def process_report_message(self, report_message) -> None:
l_report_handler = report_message.get_report_handler()
#process p = process::self()
report_ok = True
# Set the report server for this message
report_message.set_report_server(self)
if report_ok is True:
from .uvm_report_catcher import UVMReportCatcher
report_ok = UVMReportCatcher.process_all_report_catchers(report_message)
if report_message.get_action() == UVM_NO_ACTION:
report_ok = False
if report_ok:
m = ""
cs = get_cs()
# give the global server a chance to intercept the calls
svr = cs.get_report_server()
# no need to compose when neither UVM_DISPLAY nor UVM_LOG is set
if report_message.get_action() & (UVM_LOG | UVM_DISPLAY):
m = svr.compose_report_message(report_message)
svr.execute_report_message(report_message, m)
#----------------------------------------------------------------------------
# Group: Message Processing
#----------------------------------------------------------------------------
def execute_report_message(self, report_message, composed_message) -> None:
"""
Processes the provided message per the actions contained within.
Expert users can overload this method to customize action processing.
Args:
report_message (UVMReportMessage):
composed_message (str): Formatted message string
"""
#process p = process::self()
# Update counts
self.incr_severity_count(report_message.get_severity())
self.incr_id_count(report_message.get_id())
if self.record_all_messages is True:
report_message.set_action(report_message.get_action() | UVM_RM_RECORD)
# UVM_RM_RECORD action
if report_message.get_action() & UVM_RM_RECORD:
stream = None # uvm_tr_stream stream
ro = report_message.get_report_object()
rh = report_message.get_report_handler()
# Check for pre-existing stream TODO
#if (m_streams.exists(ro.get_name()) && (m_streams[ro.get_name()].exists(rh.get_name())))
#stream = m_streams[ro.get_name()][rh.get_name()]
# If no pre-existing stream (or for some reason pre-existing stream was ~null~)
if stream is None:
# Grab the database
db = self.get_message_database()
# If database is ~null~, use the default database
if db is None:
cs = get_cs()
db = cs.get_default_tr_database()
if db is not None:
# Open the stream. Name=report object name, scope=report handler name, type=MESSAGES
stream = db.open_stream(ro.get_name(), rh.get_name(), "MESSAGES")
# Save off the openned stream
self.m_streams[ro.get_name()][rh.get_name()] = stream
if stream is not None:
recorder = stream.open_recorder(report_message.get_name(), None,report_message.get_type_name())
if recorder is not None:
report_message.record(recorder)
recorder.free()
# DISPLAY action
if report_message.get_action() & UVM_DISPLAY:
self.logger(composed_message)
# LOG action
# if log is set we need to send to the file but not resend to the
# display. So, we need to mask off stdout for an mcd or we need
# to ignore the stdout file handle for a file handle.
if report_message.get_action() & UVM_LOG:
if report_message.get_file() == 0 or report_message.get_file() != 0x80000001: #ignore stdout handle
tmp_file = report_message.get_file()
#if report_message.get_file() & 0x80000000 == 0: # is an mcd so mask off stdout
# tmp_file = report_message.get_file() & 0xfffffffe
self.f_display(tmp_file, composed_message)
# Process the UVM_COUNT action
if report_message.get_action() & UVM_COUNT:
if self.get_max_quit_count() != 0:
self.incr_quit_count()
# If quit count is reached, add the UVM_EXIT action.
if self.is_quit_count_reached():
report_message.set_action(report_message.get_action() | UVM_EXIT)
# Process the UVM_EXIT action
if report_message.get_action() & UVM_EXIT:
cs = get_cs()
l_root = cs.get_root()
l_root.die()
# Process the UVM_STOP action
if report_message.get_action() & UVM_STOP:
raise Exception("$stop from uvm_report_server, msg: " +
report_message.sprint())
def compose_report_message(self, report_message, report_object_name="") -> str:
"""
Constructs the actual string sent to the file or command line
from the severity, component name, report id, and the message itself.
Expert users can overload this method to customize report formatting.
Args:
report_message (UVMReportMessage):
report_object_name (UVMReportObject):
Returns:
str: Composed message as string.
"""
sev_string = ""
l_severity = UVM_INFO
l_verbosity = UVM_MEDIUM
filename_line_string = ""
time_str = ""
line_str = ""
context_str = ""
verbosity_str = ""
terminator_str = ""
msg_body_str = ""
el_container = None
prefix = ""
l_report_handler = None
l_severity = report_message.get_severity()
sev_string = ename(l_severity)
if report_message.get_filename() != "":
line_str = str(report_message.get_line())
filename_line_string = report_message.get_filename() + "(" + line_str + ") "
# Make definable in terms of units.
#$swrite(time_str, "%0t", $time)
#time_str = get_sim_time('ns') TODO
time_str = str(uvm_sim_time('NS')) + 'NS'
if report_message.get_context() != "":
context_str = "@@" + report_message.get_context()
if self.show_verbosity is True:
verb = report_message.get_verbosity()
verbosity_str = "(" + verb + ")"
if self.show_terminator is True:
terminator_str = " -" + sev_string
el_container = report_message.get_element_container()
if el_container.size() == 0:
msg_body_str = report_message.get_message()
else:
prefix = uvm_default_printer.knobs.prefix
uvm_default_printer.knobs.prefix = " +"
msg_body_str = report_message.get_message() + "\n" + el_container.sprint()
uvm_default_printer.knobs.prefix = prefix
if report_object_name == "":
l_report_handler = report_message.get_report_handler()
if l_report_handler is not None:
report_object_name = l_report_handler.get_full_name()
else:
report_object_name = "NO_REPORT_OBJECT"
result = (sev_string + verbosity_str + " " + filename_line_string
+ "@ " + time_str + ": " + report_object_name + context_str
+ " [" + report_message.get_id() + "] " + msg_body_str + terminator_str)
return result
def report_summarize(self, file=0) -> None:
"""
Outputs statistical information on the reports issued by this central
report server. This information will be sent to the command line if
~file~ is 0, or to the file descriptor ~file~ if it is not 0.
The `UVMRoot.run_test` method in `UVMRoot` calls this method.
"""
rpt = self.get_summary_string()
uvm_info("UVM/REPORT/SERVER", rpt, UVM_LOW)
def get_summary_string(self) -> str:
"""
Returns the statistical information on the reports issued by this central report
server as multi-line string.
Returns:
str: End of simulation summary.
"""
id = ""
q = []
from .uvm_report_catcher import UVMReportCatcher
UVMReportCatcher.summarize()
q.append("\n--- UVM Report Summary ---\n\n")
if self.m_max_quit_count != 0:
if self.m_quit_count >= self.m_max_quit_count:
q.append("Quit count reached!\n")
q.append("Quit count : {} of {}\n".format(self.m_quit_count,
self.m_max_quit_count))
q.append("** Report counts by severity\n")
for s in self.m_severity_count.keys():
q.append("{} : {}\n".format(ename(s), self.m_severity_count.get(s)))
if self.enable_report_id_count_summary is True:
q.append("** Report counts by id\n")
for id in self.m_id_count.keys():
q.append("[{}] {}\n".format(id, self.m_id_count.get(id)))
return "".join(q)
def get_cs():
from .uvm_coreservice import UVMCoreService
return UVMCoreService.get()
| [
"[email protected]"
] | |
15b35b4d0ed83d0b99c75f0e25604fb40d79f538 | ea6b3b74c8f1ff9333c5d4b06a0e4dd9bbdb3bba | /tests/rpc/test_reflect_service.py | b7a206e547357b2ef8b7398477b04497c4742ae9 | [
"MIT"
] | permissive | sgalkina/venom | d495d296a388afcb25525491bbbe590bfd258a05 | e372ab9002e71ba4e2422aabd02143e4f1247dba | refs/heads/master | 2021-01-23T03:27:17.239289 | 2017-03-24T15:05:56 | 2017-03-24T15:05:56 | 86,077,951 | 0 | 0 | null | 2017-03-24T14:40:46 | 2017-03-24T14:40:46 | null | UTF-8 | Python | false | false | 557 | py | from unittest import TestCase
from venom.rpc import Service, Venom
from venom.rpc.reflect.service import ReflectService
class ReflectServiceTestCase(TestCase):
def test_service_registration(self):
class BeforeService(Service):
pass
class AfterService(Service):
pass
venom = Venom()
venom.add(BeforeService)
venom.add(ReflectService)
venom.add(AfterService)
self.assertEqual(ReflectService.__manager__.reflect.services, {BeforeService, AfterService, ReflectService})
| [
"[email protected]"
] | |
a19b592b9058d5945dee87f774bc4ee913bbecf1 | 0b5f2442b222da2895cdad06913c3687162f06bb | /pyclustering/container/__init__.py | 27e87524cc9610c785f192f53863d1b4e7d5a005 | [] | no_license | Kinddle-tick/ML_clustering | a765fadde581392de098227b0ee4a9b3572ef24f | 27f9887cb383d0d1ea0a4a42788eddc2f4c85c67 | refs/heads/master | 2023-03-14T18:15:08.350604 | 2021-03-23T07:16:51 | 2021-03-23T07:16:51 | 350,600,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | """!
@brief pyclustering module of data structures (containers).
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
""" | [
"[email protected]"
] | |
66ba21a633e7441cc8a26fce40999953920a1510 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/akPu3PFJetSequence_pp_mc_bTag_cff.py | 9bd3111ca859c1bbd91ad38579946f0d50b47212 | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,707 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
akPu3PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu3PFJets"),
matched = cms.InputTag("ak3HiGenJets")
)
akPu3PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPu3PFJets")
)
akPu3PFcorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu3PFJets"),
payload = "AKPu3PF_generalTracks"
)
akPu3PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPu3CaloJets'))
akPu3PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak3HiGenJets'))
akPu3PFbTagger = bTaggers("akPu3PF")
#create objects locally since they dont load properly otherwise
akPu3PFmatch = akPu3PFbTagger.match
akPu3PFparton = akPu3PFbTagger.parton
akPu3PFPatJetFlavourAssociation = akPu3PFbTagger.PatJetFlavourAssociation
akPu3PFJetTracksAssociatorAtVertex = akPu3PFbTagger.JetTracksAssociatorAtVertex
akPu3PFSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPu3PFSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPu3PFCombinedSecondaryVertexBJetTags = akPu3PFbTagger.CombinedSecondaryVertexBJetTags
akPu3PFCombinedSecondaryVertexMVABJetTags = akPu3PFbTagger.CombinedSecondaryVertexMVABJetTags
akPu3PFJetBProbabilityBJetTags = akPu3PFbTagger.JetBProbabilityBJetTags
akPu3PFSoftMuonByPtBJetTags = akPu3PFbTagger.SoftMuonByPtBJetTags
akPu3PFSoftMuonByIP3dBJetTags = akPu3PFbTagger.SoftMuonByIP3dBJetTags
akPu3PFTrackCountingHighEffBJetTags = akPu3PFbTagger.TrackCountingHighEffBJetTags
akPu3PFTrackCountingHighPurBJetTags = akPu3PFbTagger.TrackCountingHighPurBJetTags
akPu3PFPatJetPartonAssociation = akPu3PFbTagger.PatJetPartonAssociation
akPu3PFImpactParameterTagInfos = akPu3PFbTagger.ImpactParameterTagInfos
akPu3PFJetProbabilityBJetTags = akPu3PFbTagger.JetProbabilityBJetTags
akPu3PFPositiveOnlyJetProbabilityJetTags = akPu3PFbTagger.PositiveOnlyJetProbabilityJetTags
akPu3PFNegativeOnlyJetProbabilityJetTags = akPu3PFbTagger.NegativeOnlyJetProbabilityJetTags
akPu3PFNegativeTrackCountingHighEffJetTags = akPu3PFbTagger.NegativeTrackCountingHighEffJetTags
akPu3PFNegativeTrackCountingHighPur = akPu3PFbTagger.NegativeTrackCountingHighPur
akPu3PFNegativeOnlyJetBProbabilityJetTags = akPu3PFbTagger.NegativeOnlyJetBProbabilityJetTags
akPu3PFPositiveOnlyJetBProbabilityJetTags = akPu3PFbTagger.PositiveOnlyJetBProbabilityJetTags
akPu3PFSecondaryVertexTagInfos = akPu3PFbTagger.SecondaryVertexTagInfos
akPu3PFSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPu3PFSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPu3PFCombinedSecondaryVertexBJetTags = akPu3PFbTagger.CombinedSecondaryVertexBJetTags
akPu3PFCombinedSecondaryVertexMVABJetTags = akPu3PFbTagger.CombinedSecondaryVertexMVABJetTags
akPu3PFSecondaryVertexNegativeTagInfos = akPu3PFbTagger.SecondaryVertexNegativeTagInfos
akPu3PFSimpleSecondaryVertexNegativeHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexNegativeHighEffBJetTags
akPu3PFSimpleSecondaryVertexNegativeHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexNegativeHighPurBJetTags
akPu3PFCombinedSecondaryVertexNegativeBJetTags = akPu3PFbTagger.CombinedSecondaryVertexNegativeBJetTags
akPu3PFCombinedSecondaryVertexPositiveBJetTags = akPu3PFbTagger.CombinedSecondaryVertexPositiveBJetTags
akPu3PFSoftMuonTagInfos = akPu3PFbTagger.SoftMuonTagInfos
akPu3PFSoftMuonBJetTags = akPu3PFbTagger.SoftMuonBJetTags
akPu3PFSoftMuonByIP3dBJetTags = akPu3PFbTagger.SoftMuonByIP3dBJetTags
akPu3PFSoftMuonByPtBJetTags = akPu3PFbTagger.SoftMuonByPtBJetTags
akPu3PFNegativeSoftMuonByPtBJetTags = akPu3PFbTagger.NegativeSoftMuonByPtBJetTags
akPu3PFPositiveSoftMuonByPtBJetTags = akPu3PFbTagger.PositiveSoftMuonByPtBJetTags
akPu3PFPatJetFlavourId = cms.Sequence(akPu3PFPatJetPartonAssociation*akPu3PFPatJetFlavourAssociation)
akPu3PFJetBtaggingIP = cms.Sequence(akPu3PFImpactParameterTagInfos *
(akPu3PFTrackCountingHighEffBJetTags +
akPu3PFTrackCountingHighPurBJetTags +
akPu3PFJetProbabilityBJetTags +
akPu3PFJetBProbabilityBJetTags +
akPu3PFPositiveOnlyJetProbabilityJetTags +
akPu3PFNegativeOnlyJetProbabilityJetTags +
akPu3PFNegativeTrackCountingHighEffJetTags +
akPu3PFNegativeTrackCountingHighPur +
akPu3PFNegativeOnlyJetBProbabilityJetTags +
akPu3PFPositiveOnlyJetBProbabilityJetTags
)
)
akPu3PFJetBtaggingSV = cms.Sequence(akPu3PFImpactParameterTagInfos
*
akPu3PFSecondaryVertexTagInfos
* (akPu3PFSimpleSecondaryVertexHighEffBJetTags
+
akPu3PFSimpleSecondaryVertexHighPurBJetTags
+
akPu3PFCombinedSecondaryVertexBJetTags
+
akPu3PFCombinedSecondaryVertexMVABJetTags
)
)
akPu3PFJetBtaggingNegSV = cms.Sequence(akPu3PFImpactParameterTagInfos
*
akPu3PFSecondaryVertexNegativeTagInfos
* (akPu3PFSimpleSecondaryVertexNegativeHighEffBJetTags
+
akPu3PFSimpleSecondaryVertexNegativeHighPurBJetTags
+
akPu3PFCombinedSecondaryVertexNegativeBJetTags
+
akPu3PFCombinedSecondaryVertexPositiveBJetTags
)
)
akPu3PFJetBtaggingMu = cms.Sequence(akPu3PFSoftMuonTagInfos * (akPu3PFSoftMuonBJetTags
+
akPu3PFSoftMuonByIP3dBJetTags
+
akPu3PFSoftMuonByPtBJetTags
+
akPu3PFNegativeSoftMuonByPtBJetTags
+
akPu3PFPositiveSoftMuonByPtBJetTags
)
)
akPu3PFJetBtagging = cms.Sequence(akPu3PFJetBtaggingIP
*akPu3PFJetBtaggingSV
*akPu3PFJetBtaggingNegSV
*akPu3PFJetBtaggingMu
)
akPu3PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPu3PFJets"),
genJetMatch = cms.InputTag("akPu3PFmatch"),
genPartonMatch = cms.InputTag("akPu3PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu3PFcorr")),
JetPartonMapSource = cms.InputTag("akPu3PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPu3PFJetTracksAssociatorAtVertex"),
discriminatorSources = cms.VInputTag(cms.InputTag("akPu3PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPu3PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPu3PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPu3PFCombinedSecondaryVertexMVABJetTags"),
cms.InputTag("akPu3PFJetBProbabilityBJetTags"),
cms.InputTag("akPu3PFJetProbabilityBJetTags"),
cms.InputTag("akPu3PFSoftMuonByPtBJetTags"),
cms.InputTag("akPu3PFSoftMuonByIP3dBJetTags"),
cms.InputTag("akPu3PFTrackCountingHighEffBJetTags"),
cms.InputTag("akPu3PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPu3PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = True,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = True
)
akPu3PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu3PFpatJetsWithBtagging"),
genjetTag = 'ak3HiGenJets',
rParam = 0.3,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(True),
bTagJetName = cms.untracked.string("akPu3PF"),
genPtMin = cms.untracked.double(15),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL')
)
akPu3PFJetSequence_mc = cms.Sequence(
akPu3PFclean
*
akPu3PFmatch
*
akPu3PFparton
*
akPu3PFcorr
*
akPu3PFJetID
*
akPu3PFPatJetFlavourId
*
akPu3PFJetTracksAssociatorAtVertex
*
akPu3PFJetBtagging
*
akPu3PFpatJetsWithBtagging
*
akPu3PFJetAnalyzer
)
akPu3PFJetSequence_data = cms.Sequence(akPu3PFcorr
*
akPu3PFJetTracksAssociatorAtVertex
*
akPu3PFJetBtagging
*
akPu3PFpatJetsWithBtagging
*
akPu3PFJetAnalyzer
)
akPu3PFJetSequence_jec = akPu3PFJetSequence_mc
akPu3PFJetSequence_mix = akPu3PFJetSequence_mc
akPu3PFJetSequence = cms.Sequence(akPu3PFJetSequence_mc)
| [
"[email protected]"
] | |
05932b5eb4efff7df2c4efaadaa8037452a9e61d | cd90bbc775cbce9a7e0bc46cbb9437e3961e587f | /misc/advent/2017/5/e.py | 4d998c22b9ab5cb25fc7a904a5b71e05e17ea209 | [] | no_license | llimllib/personal_code | 7b3f0483589e2928bf994184e3413f4b887e1f0c | 4d4662d53e0ac293dea8a4208ccca4a1f272e64a | refs/heads/master | 2023-09-05T04:02:05.075388 | 2023-09-01T12:34:09 | 2023-09-01T12:34:09 | 77,958 | 9 | 16 | null | 2023-08-16T13:54:39 | 2008-11-19T02:04:46 | HTML | UTF-8 | Python | false | false | 571 | py | def run(cmds):
location = 0
counter = 0
l = len(cmds)
while 1:
try:
cmd = cmds[location]
if cmd >= 3:
cmds[location] -= 1
else:
cmds[location] += 1
location += cmd
if location < 0:
print(counter)
break
counter += 1
except:
print(counter)
break
if __name__=="__main__":
text = open("input.txt").read().strip().split("\n")
cmds = [int(cmd) for cmd in text]
run(cmds)
| [
"[email protected]"
] | |
0977ce752af81adc5992e95036e5f5f852fc53ac | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02903/s750261745.py | 420fde7847d4409890218c615f7f27f1bf072248 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | h,w,a,b=map(int,input().split())
for _ in range(b):print("0"*a+"1"*(w-a))
for _ in range(h-b):print("1"*a+"0"*(w-a)) | [
"[email protected]"
] | |
b314afaaabc9bbf3ea4b69fe5f6f89638900efc2 | 04d55063219d484f29bf1a351b87e972b374e9a6 | /inversetoon/core/light_estimation/light_estimation.py | f6a43a8535f4fe512287218f1850a235825d8872 | [
"MIT"
] | permissive | tody411/InverseToon | 5530f63d225f91d1c497f3f80f24c4ccf086aa8f | bc5b922cae9bbf99ed1f020c93b1577c4747ff92 | refs/heads/master | 2020-05-18T13:32:25.285723 | 2015-10-06T02:35:15 | 2015-10-06T02:35:15 | 39,255,745 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py |
# -*- coding: utf-8 -*-
## @package inversetoon.core.light_estimation
#
# inversetoon.core.light_estimation utility package.
# @author tody
# @date 2015/09/07
import numpy as np
from inversetoon.np.norm import normalizeVector
from inversetoon.core.light_estimation.light_estimation_common import testToon
def estimateLightDir(input_data):
N_sil = input_data["N_sil"]
I_sil = input_data["I_sil"]
I_positive = I_sil > 0.001
N_sil = N_sil[I_positive]
I_sil = I_sil[I_positive]
NdLs = I_sil
L = estimateLightDirProjection(N_sil, NdLs)
output_data = {"L": L}
# error = np.linalg.norm(np.dot(N_sil, L) - NdLs)
return output_data
def estimateLightDirLstSq(Ns, NdLs):
b = NdLs
A = Ns
L = np.linalg.lstsq(A, b)[0]
return L
def estimateLightDirProjection(Ns, NdLs):
I_maxID = np.argmax(NdLs)
L = Ns[I_maxID]
for i in xrange(100):
for N, NdL in zip(Ns, NdLs):
NdL_c = np.dot(L, N)
L = L - NdL_c * N + NdL * N
L = normalizeVector(L)
return L
if __name__ == '__main__':
testToon("LeastSquare", estimateLightDir)
| [
"[email protected]"
] | |
050fe8dd60fa24022d363e59407aef735c810440 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/P/protanto/vultus_christi_archive.py | a3494ec9528161e0c530bce04e116f028619d6fe | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,560 | py | from datetime import datetime, timedelta
import scraperwiki
import requests
import lxml.html
from lxml.cssselect import CSSSelector as CSS
import dateutil.parser
import dateutil.tz
TARGET = "http://vultus.stblogs.org/archives.html"
HEADERS = {
'User-agent': 'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11',
}
ROW_KEY = ['year', 'month', 'slug']
ROW_SCHEMA = ROW_KEY + ['title', 'text', 'author', 'date', 'tags']
EDIT_WINDOW = timedelta(days=10)
sel_item = CSS("div.archive-individual li")
sel_anchor = CSS("a")
sel_asset = CSS("#alpha-inner")
sel_author = CSS("div.asset-header span.byline address.author a")
sel_date = CSS("div.asset-header span.byline abbr.published")
sel_text = CSS("div.asset-content")
sel_tags = CSS("div.asset-footer li.entry-category a")
def scrape(url):
return lxml.html.fromstring(requests.get(url, headers=HEADERS).text)
def Row(**kwargs):
row = dict((field, None) for field in ROW_SCHEMA)
row.update(kwargs)
return row
store = scraperwiki.sqlite.save
parsedate = dateutil.parser.parse
tzlocal = dateutil.tz.tzlocal
# If the scraper has run once successfully, subsequent runs should
# only scrape new pages and pages that are less than ten days old (to
# allow for edits by the author)
historic_latest = scraperwiki.sqlite.get_var('latest')
if historic_latest:
historic_latest = parsedate(historic_latest)
print("Begin scraping archive ten days prior to: %s" % historic_latest.strftime("%Y.%m.%d"))
latest = datetime(year=2000, month=12, day=31, tzinfo=tzlocal())
latest_timestamp = None
# the scraping loop below swallows errors, but the error may have been
# due to a request timeout or similar, so we want to retry those pages
# that don't exist in the database
try:
archive = set(
(d['year'], d['month'], d['slug']) for d in scraperwiki.sqlite.select("year, month, slug FROM pages")
)
except:
archive = set([])
print "PAGE COUNT: %s" % len(archive)
# begin scrape - first the archive index page to get all individual page urls
index = scrape(TARGET)
# go through the list of page links and scrape each one
for li in sel_item(index):
date = li.text.rstrip().rstrip(':').strip()
a = sel_anchor(li)[0]
href = a.get('href')
if href:
year, month, day = map(int, date.split('.'))
slug = href.split('/')[5].partition('.')[0]
if (year, month, slug) in archive and historic_latest:
# don't re-scrape anything outside the ten day edit window
if datetime(year=year, month=month, day=day, tzinfo=tzlocal()) < historic_latest-EDIT_WINDOW:
# you could break here because the list is date-ordered
continue
print("%s - %s - %s" % (date, slug, href))
page = scrape(href)
try:
content = sel_asset(page)[0]
timestamp = sel_date(content)[0].get('title')
date = parsedate(timestamp)
if date > latest:
# there's a new 'latest' timestamp - saved as a variable below
latest = date
latest_timestamp = timestamp
row = Row(year=year, month=month, title=a.text_content(), slug=slug)
row['date'] = date
row['author'] = sel_author(content)[0].text_content()
row['tags'] = ','.join(a.text_content() for a in sel_tags(content))
row['text'] = lxml.html.tostring(sel_text(content)[0])
except Exception, e:
print("Skipping " + href)
print(" ERROR: %s" % e)
continue
#print row
store(unique_keys=ROW_KEY, data=row, table_name="pages")
if latest_timestamp:
scraperwiki.sqlite.save_var('latest', latest_timestamp)
from datetime import datetime, timedelta
import scraperwiki
import requests
import lxml.html
from lxml.cssselect import CSSSelector as CSS
import dateutil.parser
import dateutil.tz
TARGET = "http://vultus.stblogs.org/archives.html"
HEADERS = {
'User-agent': 'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11',
}
ROW_KEY = ['year', 'month', 'slug']
ROW_SCHEMA = ROW_KEY + ['title', 'text', 'author', 'date', 'tags']
EDIT_WINDOW = timedelta(days=10)
sel_item = CSS("div.archive-individual li")
sel_anchor = CSS("a")
sel_asset = CSS("#alpha-inner")
sel_author = CSS("div.asset-header span.byline address.author a")
sel_date = CSS("div.asset-header span.byline abbr.published")
sel_text = CSS("div.asset-content")
sel_tags = CSS("div.asset-footer li.entry-category a")
def scrape(url):
return lxml.html.fromstring(requests.get(url, headers=HEADERS).text)
def Row(**kwargs):
row = dict((field, None) for field in ROW_SCHEMA)
row.update(kwargs)
return row
store = scraperwiki.sqlite.save
parsedate = dateutil.parser.parse
tzlocal = dateutil.tz.tzlocal
# If the scraper has run once successfully, subsequent runs should
# only scrape new pages and pages that are less than ten days old (to
# allow for edits by the author)
historic_latest = scraperwiki.sqlite.get_var('latest')
if historic_latest:
historic_latest = parsedate(historic_latest)
print("Begin scraping archive ten days prior to: %s" % historic_latest.strftime("%Y.%m.%d"))
latest = datetime(year=2000, month=12, day=31, tzinfo=tzlocal())
latest_timestamp = None
# the scraping loop below swallows errors, but the error may have been
# due to a request timeout or similar, so we want to retry those pages
# that don't exist in the database
try:
archive = set(
(d['year'], d['month'], d['slug']) for d in scraperwiki.sqlite.select("year, month, slug FROM pages")
)
except:
archive = set([])
print "PAGE COUNT: %s" % len(archive)
# begin scrape - first the archive index page to get all individual page urls
index = scrape(TARGET)
# go through the list of page links and scrape each one
for li in sel_item(index):
date = li.text.rstrip().rstrip(':').strip()
a = sel_anchor(li)[0]
href = a.get('href')
if href:
year, month, day = map(int, date.split('.'))
slug = href.split('/')[5].partition('.')[0]
if (year, month, slug) in archive and historic_latest:
# don't re-scrape anything outside the ten day edit window
if datetime(year=year, month=month, day=day, tzinfo=tzlocal()) < historic_latest-EDIT_WINDOW:
# you could break here because the list is date-ordered
continue
print("%s - %s - %s" % (date, slug, href))
page = scrape(href)
try:
content = sel_asset(page)[0]
timestamp = sel_date(content)[0].get('title')
date = parsedate(timestamp)
if date > latest:
# there's a new 'latest' timestamp - saved as a variable below
latest = date
latest_timestamp = timestamp
row = Row(year=year, month=month, title=a.text_content(), slug=slug)
row['date'] = date
row['author'] = sel_author(content)[0].text_content()
row['tags'] = ','.join(a.text_content() for a in sel_tags(content))
row['text'] = lxml.html.tostring(sel_text(content)[0])
except Exception, e:
print("Skipping " + href)
print(" ERROR: %s" % e)
continue
#print row
store(unique_keys=ROW_KEY, data=row, table_name="pages")
if latest_timestamp:
scraperwiki.sqlite.save_var('latest', latest_timestamp)
| [
"[email protected]"
] | |
a60e1c26c1bea3cb0d32e6cad14184af90210d17 | 92f6008ff6a3199111f9cd6d26ef9102dcc2c5c3 | /Problems/Jackie's Savings/tests.py | 44a6f5b4df88f82de7dd04ed4be188fc14ccbccb | [] | no_license | TonyNewbie/-SmartCalculator | 52c8f5f074e6c72d7b335cf33d7f4f6cab2da34e | e3779b74d83b5db62101f29a73a350b308e982f9 | refs/heads/master | 2022-07-04T02:22:12.044689 | 2020-05-10T08:49:30 | 2020-05-10T08:49:30 | 262,726,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["5 7 4","1168.44"]]) | [
"[email protected]"
] | |
50bcbf5cec7db78090ff9b0a03360ba55829d65d | cb65ef874d2427a1edcb132cda05e5ce2dc1aae4 | /modpy/proxy/examples/example_kriging.py | 8b2f0eec48e6e00edcf4d69f0493b005e6b0ba69 | [
"MIT"
] | permissive | FrederikLehn/modpy | 1395c27029f5fbfae2388cbd500b28e67a3cdb9e | 19ab18547e06e93fabfbd7f7b2f0f07ff0e70db3 | refs/heads/main | 2023-07-14T11:58:26.379687 | 2021-07-30T11:04:19 | 2021-07-30T11:04:19 | 390,731,060 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,160 | py | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import MaxNLocator
from numpy.random import Generator, PCG64
from modpy.proxy._kriging import SimpleKrigingModel, OrdinaryKrigingModel, UniversalKrigingModel,\
_maximum_likelihood_objective, exponential_correlation, gaussian_correlation, matern32_correlation,\
matern52_correlation, power_exponential_correlation
from modpy.illustration.illustration_util import PROXY_PATH
from modpy.plot.plot_util import default_color, cm_parula, subplot_layout, add_point, set_font_sizes
from modpy.optimize import nlprog, cma_es
from modpy.proxy._kriging import _ensure_matrix
def _plot_ML_obj_1D():
x, z = np.array([
[-5.01, 1.06], [-4.90, 0.92], [-4.82, 0.35], [-4.69, 0.49], [-4.56, 0.52],
[-4.52, 0.12], [-4.39, 0.47], [-4.32, -0.19], [-4.19, 0.08], [-4.11, -0.19],
[-4.00, -0.03], [-3.89, -0.03], [-3.78, -0.05], [-3.67, 0.10], [-3.59, 0.44],
[-3.50, 0.66], [-3.39, -0.12], [-3.28, 0.45], [-3.20, 0.14], [-3.07, -0.28],
[-3.01, -0.46], [-2.90, -0.32], [-2.77, -1.58], [-2.69, -1.44], [-2.60, -1.51],
[-2.49, -1.50], [-2.41, -2.04], [-2.28, -1.57], [-2.19, -1.25], [-2.10, -1.50],
[-2.00, -1.42], [-1.91, -1.10], [-1.80, -0.58], [-1.67, -1.08], [-1.61, -0.79],
[-1.50, -1.00], [-1.37, -0.04], [-1.30, -0.54], [-1.19, -0.15], [-1.06, -0.18],
[-0.98, -0.25], [-0.87, -1.20], [-0.78, -0.49], [-0.68, -0.83], [-0.57, -0.15],
[-0.50, 0.00], [-0.38, -1.10], [-0.29, -0.32], [-0.18, -0.60], [-0.09, -0.49],
[0.03, -0.50], [0.09, -0.02], [0.20, -0.47], [0.31, -0.11], [0.41, -0.28],
[0.53, 0.40], [0.61, 0.11], [0.70, 0.32], [0.94, 0.42], [1.02, 0.57],
[1.13, 0.82], [1.24, 1.18], [1.30, 0.86], [1.43, 1.11], [1.50, 0.74],
[1.63, 0.75], [1.74, 1.15], [1.80, 0.76], [1.93, 0.68], [2.03, 0.03],
[2.12, 0.31], [2.23, -0.14], [2.31, -0.88], [2.40, -1.25], [2.50, -1.62],
[2.63, -1.37], [2.72, -0.99], [2.80, -1.92], [2.83, -1.94], [2.91, -1.32],
[3.00, -1.69], [3.13, -1.84], [3.21, -2.05], [3.30, -1.69], [3.41, -0.53],
[3.52, -0.55], [3.63, -0.92], [3.72, -0.76], [3.80, -0.41], [3.91, 0.12],
[4.04, 0.25], [4.13, 0.16], [4.24, 0.26], [4.32, 0.62], [4.44, 1.69],
[4.52, 1.11], [4.65, 0.36], [4.74, 0.79], [4.84, 0.87], [4.93, 1.01],
[5.02, 0.55]
]).T
x = np.atleast_2d(x).T
n = z.size
m = 1
correlations = (exponential_correlation, gaussian_correlation, matern32_correlation,
matern52_correlation, power_exponential_correlation)
methods = ('Exponential', 'Gaussian', 'Matérn $\\nu=3/2$', 'Matérn $\\nu=5/2$', 'Power-Exponential')
# ordinary kriging
def f(x_):
return np.ones((x_.shape[0], 1))
bounds = tuple([(1e-5, None) for _ in range(m)])
par0 = (np.array([0.6665]), np.array([0.0087]), np.array([0.1635]), np.array([0.1253]), np.array([0.1235]))
ng = 100
theta = np.linspace(1e-5, 1., ng)
r, c = subplot_layout(len(correlations))
fig = plt.figure(figsize=(20, 11))
# plot data
ax = fig.add_subplot(r, c, 1)
ax.scatter(x, z, s=20, c='k')
ax.set_xlabel('$x$')
ax.set_ylabel('$Z(x)$')
ax.set_title('Data')
ax.grid(True)
# plot objective functions
for i, (corr, method) in enumerate(zip(correlations, methods)):
obj = _maximum_likelihood_objective('ordinary', x, z, f, corr)
vals = np.zeros((ng,))
for j in range(ng):
print('j = {}'.format(j))
vals[j] = obj(np.atleast_1d(theta[j]))
res = cma_es(obj, par0[i], bounds=bounds, sigma0=0.02, tol=1e-3, seed=1234)
print('Optimization: {}, theta: {}'.format(res.success, res.x))
ax = fig.add_subplot(r, c, i + 2)
ax.plot(theta, vals, c=default_color(0), label='Obj($\\theta$)', zorder=1)
ax.scatter(res.x, obj(res.x), c='r', label='Optimum', zorder=2)
ax.set_xlim([0., None])
ax.set_xlabel('$\\theta$')
ax.set_ylabel('$Obj(\\theta)$')
ax.grid(True)
ax.legend()
ax.set_title(method)
fig.savefig(PROXY_PATH + 'kriging_ML_obj_1D.png')
def _plot_ML_obj_2D():
x = np.array([[1., 1.],
[0.5, 0.3],
[0.1, 0.1],
[0.8, 0.3],
[0.25, 0.75]])
z = np.array([1., 10., 12., 5., 7.])
n = z.size
m = 2
correlations = (exponential_correlation, gaussian_correlation, matern32_correlation,
matern52_correlation, power_exponential_correlation)
methods = ('Exponential', 'Gaussian', 'Matérn $\\nu=3/2$', 'Matérn $\\nu=5/2$', 'Power-Exponential ($p=2$)')
# ordinary kriging
def f(x_):
return np.ones((x_.shape[0], 1))
bounds = tuple([(1e-5, None) for _ in range(m)])
# run optimization. TODO: change to MMO optimizer
par0 = np.array([0.5, 1.])
ng = 100
theta1 = np.linspace(1e-5, 3., ng)
theta2 = np.linspace(1e-5, 3., ng)
X, Y = np.meshgrid(theta1, theta2)
S = np.vstack((X.flatten(), Y.flatten())).T
r, c = subplot_layout(len(correlations))
fig = plt.figure(figsize=(20, 11))
# plot data
ax = fig.add_subplot(r, c, 1, projection='3d')
ax.scatter(x[:, 0], x[:, 1], z, c='r')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$Z(x)$')
ax.set_title('Data')
ax.view_init(elev=40., azim=-110)
# plot objective functions
for i, (corr, method) in enumerate(zip(correlations, methods)):
obj = _maximum_likelihood_objective('ordinary', x, z, f, corr)
vals = np.zeros((ng ** 2,))
for j in range(ng ** 2):
vals[j] = obj(S[j])
Z = np.reshape(vals, (ng, ng))
res = cma_es(obj, par0, bounds=bounds, sigma0=0.1, seed=1234)
print('Optimization: {}, theta: {}'.format(res.success, res.x))
ax = fig.add_subplot(r, c, i + 2, projection='3d')
ax.plot_surface(X, Y, Z, cmap=cm_parula, edgecolors='k', lw=0.2, alpha=0.7, zorder=1)
add_point(ax, res.x[0], res.x[1], res.f, fc='r', ec='r', radius=0.08)
ax.set_xlabel('$\\theta_1$')
ax.set_ylabel('$\\theta_2$')
ax.set_zlabel('$Obj(\\theta)$')
ax.zaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_title(method)
ax.view_init(elev=50., azim=-120)
#plt.show()
fig.savefig(PROXY_PATH + 'kriging_ML_obj_2D.png')
def _plot_covariance_functions():
correlations = (exponential_correlation, gaussian_correlation, matern32_correlation,
matern52_correlation, power_exponential_correlation)
names = ('Exponential', 'Gaussian', 'Matérn $\\nu=3/2$', 'Matérn $\\nu=5/2$', 'Power-Exponential ($p=2$)')
nb = 1000
x_min = 0.
x_max = 1.8
x0 = np.linspace(x_min, x_max, nb)
theta = 0.5
sigma = 4.
fig = plt.figure()
ax = fig.gca()
for i, (corr, name) in enumerate(zip(correlations, names)):
c = sigma ** 2. * corr(x0, theta)
ax.plot(x0, c, c=default_color(i), label=name)
#ax.set_xlim([x.min() * 0.9, x.max() * 1.1])
#ax.set_ylim([z.min() * 1.5, z.max() * 1.5])
ax.set_xlim([x_min, x_max])
ax.set_ylim([0., sigma ** 2. + 1.])
ax.set_xlabel(r'$h$')
ax.set_ylabel(r'$C(h)$')
ax.grid(True)
ax.legend()
fig.savefig(PROXY_PATH + 'covariance_functions.png')
def _plot_kernel_impact_1D():
x = np.array([1., 1.8, 3.8, 4.7, 5.7, 7.3, 7.8, 9.2, 11.1, 12.9])
z = np.array([0.9, 2.2, -2.3, -4.9, -3.7, 7.3, 7.3, 0.8, -10.8, 7.1])
methods = ['exp', 'gaussian', 'matern32', 'matern52', 'pow-exp']
names = ('Exponential', 'Gaussian', 'Matérn $\\nu=3/2$', 'Matérn $\\nu=5/2$', 'Power-Exponential $p=1.5$')
theta0 = np.array([1.1])
nb = 200
ns = 10
x_min = 0.
x_max = 15.
x0 = np.linspace(x_min, x_max, nb)
h = np.linspace(0., 5., nb)
r, c = subplot_layout(len(methods))
fig = plt.figure(figsize=(20, 11))
ax1 = fig.add_subplot(r, c, 1)
fill_col = np.array([0.7, 0.7, 0.7, 0.3])
for i, (method, name) in enumerate(zip(methods, names)):
if method == 'pow-exp':
args = (1.5,)
else:
args = ()
model = OrdinaryKrigingModel(x, z, method, seed=1324, args=args)
model.initialize_ML(theta0)
model.define_weights(x0)
z0 = model.mean()
v0 = model.variance()
conf = 1.96 * np.sqrt(v0)
samples = model.sample(ns)
# add covariance function to ax1
ax1.plot(h, model.sigma ** 2. * model.corr(h, model.theta), c=default_color(i), label='{} ($\\theta=${})'.format(name, *np.round(model.theta, 2)))
ax = fig.add_subplot(r, c, i + 2)
ax.scatter(x, z, s=20, c='k', zorder=4)
ax.plot(x0, z0, c=default_color(i), label='$Z(x_0)$', zorder=3)
ax.fill_between(x0, z0 - conf, z0 + conf, color=fill_col, label='Conf.', zorder=1)
ax.plot(x0, samples[0, :], c='gray', label='Samples', lw=0.5)
for j in range(1, ns):
ax.plot(x0, samples[j, :], c='gray', lw=0.5, zorder=2)
ax.set_xlim([x_min, x_max])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$Z(x)$')
ax.set_title(name)
ax.grid(True)
ax.legend()
ax1.set_xlim([0., 5.])
ax1.set_ylim([0., None])
ax1.set_xlabel(r'$h$')
ax1.set_ylabel(r'$C(h)$')
ax1.set_title('Kernels')
ax1.grid(True)
ax1.legend()
fig.savefig(PROXY_PATH + 'kernel_impact_1D.png')
def _plot_prior_posterior_process():
x = np.array([1., 1.8, 3.8, 4.7, 5.7, 7.3, 7.8, 9.2, 11.1, 12.9])
z = np.array([0.9, 2.2, -2.3, -4.9, -3.7, 7.3, 7.3, 0.8, -10.8, 7.1])
theta0 = np.array([1.1])
nb = 1000
ns = 15
x_min = 0.
x_max = 15.
x0 = np.linspace(x_min, x_max, nb)
fig = plt.figure(figsize=(24, 10))
fill_col = np.array([0.7, 0.7, 0.7, 0.5])
model = OrdinaryKrigingModel(x, z, 'gaussian', seed=1324)
model.initialize_ML(theta0)
model.define_weights(x0)
# plot prior process
m = np.mean(z)
s = model.sigma
conf = 1.96 * s
samples = model.sample(ns, posterior=False)
ax = fig.add_subplot(1, 2, 1)
ax.plot([x_min, x_max], [m, m], c=default_color(0), label='$Z(x_0)$', zorder=3)
ax.fill_between([x_min, x_max], m - conf, m + conf, color=fill_col, label='Conf.', zorder=1)
ax.plot(x0, samples[0, :], c='gray', label='Samples', lw=1., zorder=2)
for j in range(1, ns):
ax.plot(x0, samples[j, :], c='gray', lw=1., zorder=2)
ax.set_xlim([x_min, x_max])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$Z(x)$')
ax.set_title('Prior Probability')
ax.grid(True)
ax.legend()
set_font_sizes(ax, 16)
# plot posterior process
z0 = model.mean()
v0 = model.variance()
conf = 1.96 * np.sqrt(v0)
samples = model.sample(ns)
ax = fig.add_subplot(1, 2, 2)
ax.scatter(x, z, s=20, c='k', zorder=4)
ax.plot(x0, z0, c=default_color(0), label='$Z(x_0)$', zorder=3)
ax.fill_between(x0, z0 - conf, z0 + conf, color=fill_col, label='Conf.', zorder=1)
ax.plot(x0, samples[0, :], c='gray', label='Samples', lw=1., zorder=2)
for j in range(1, ns):
ax.plot(x0, samples[j, :], c='gray', lw=1., zorder=2)
ax.set_xlim([x_min, x_max])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$Z(x)$')
ax.set_title('Posterior Probability')
ax.grid(True)
ax.legend(loc='upper left')
set_font_sizes(ax, 16)
fig.savefig(PROXY_PATH + 'gaussian_process_prior_posterior.png')
def _plot_kriging_types_1D():
gen = Generator(PCG64(1234))
def f(x_):
x2d = _ensure_matrix(x_)
n = x2d.shape[0]
return np.hstack((np.ones((n, 1)), x2d, x2d ** 2.))
x = np.array([1., 1.8, 3.8, 4.7, 5.7, 7.3, 7.8, 9.2, 11.1, 12.9])
beta_true = np.array([40., -20., 1.5])
sigma_true = 4.
z = f(x) @ beta_true + gen.normal(0., sigma_true, x.size)
theta0 = np.array([0.9])
nb = 600
x0 = np.linspace(0., 15., nb)
fig = plt.figure()
ax = fig.gca()
# simple kriging
simple = SimpleKrigingModel(x, z, 'gaussian', seed=1324)
simple.initialize_ML(theta0)
simple.define_weights(x0)
z_sim = simple.mean()
v_sim = simple.variance()
conf_sim = 1.96 * np.sqrt(v_sim)
print('SK ($\\beta$={}, $\\theta$={}, $\sigma$={})'\
.format(np.round(simple.beta, 1), *np.round(simple.theta, 2), np.round(simple.sigma, 1)))
ax.plot(x0, z_sim, c=default_color(0), label='Simple')
ax.plot(x0, z_sim - conf_sim, c=default_color(0), lw=0.5, ls='--')
ax.plot(x0, z_sim + conf_sim, c=default_color(0), lw=0.5, ls='--')
# ordinary kriging
ordinary = OrdinaryKrigingModel(x, z, 'gaussian', seed=1324)
ordinary.initialize_ML(theta0)
ordinary.define_weights(x0)
z_ord = ordinary.mean()
v_ord = ordinary.variance()
conf_ord = 1.96 * np.sqrt(v_ord)
print('OK ($\\beta$={}, $\\theta$={}, $\sigma$={})' \
.format(*np.round(ordinary.beta, 1), *np.round(ordinary.theta, 2), np.round(ordinary.sigma, 1)))
ax.plot(x0, z_ord, c=default_color(1), label='Ordinary')
ax.plot(x0, z_ord - conf_ord, c=default_color(1), lw=0.5, ls='--')
ax.plot(x0, z_ord + conf_ord, c=default_color(1), lw=0.5, ls='--')
# universal kriging
universal = UniversalKrigingModel(x, z, f, 'gaussian', seed=1324)
universal.initialize_ML(theta0)
universal.define_weights(x0)
z_uni = universal.mean()
v_uni = universal.variance()
conf_uni = 1.96 * np.sqrt(v_uni)
print('UK ($\\beta$={}, $\\theta$={}, $\sigma$={})' \
.format(np.round(universal.beta, 1), *np.round(universal.theta, 3), np.round(universal.sigma, 1)))
ax.plot(x0, z_uni, c=default_color(2), label='Universal')
ax.plot(x0, z_uni - conf_uni, c=default_color(2), lw=0.5, ls='--')
ax.plot(x0, z_uni + conf_uni, c=default_color(2), lw=0.5, ls='--')
# data
ax.scatter(x, z, s=20, c='k', zorder=2.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$Z(x)$')
ax.grid(True)
ax.legend()
fig.savefig(PROXY_PATH + 'kriging_types_1D.png')
def _plot_kriging_types_2D():
def f(x_):
if x_.ndim == 1:
x_ = np.reshape(x_, (1, x_.size))
n, _ = x_.shape
return np.hstack((np.ones((n, 1)), np.atleast_2d(np.prod(x_, axis=1)).T))
x = np.array([[1., 1.],
[0.5, 0.3],
[0.1, 0.1],
[0.8, 0.3],
[0.25, 0.75]])
z = np.array([1., 10., 12., 5., 7.])
theta0 = (np.array([0.4136, 0.5822]), np.array([0.4363, 0.5614]), np.array([0.0381, 0.0812]))
bounds = ((1e-3, 1.), (1e-3, 1.))
ng = 100
L = np.sqrt(2.)
x1 = np.linspace(-.5 * L, .5 * L, ng) + .5 * L
x2 = np.linspace(-.5 * L, .5 * L, ng) + .5 * L
X1, X2 = np.meshgrid(x1, x2)
x0 = np.vstack((X1.flatten(), X2.flatten())).T
r, c = 2, 2
fig = plt.figure(figsize=(20, 14))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
ax = fig.add_subplot(r, c, 1, projection='3d')
ax.scatter(x[:, 0], x[:, 1], z, c='r')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$Z(x)$')
ax.set_title('Data')
ax.view_init(elev=50, azim=120)
models = (SimpleKrigingModel(x, z, 'gaussian', seed=1324),
OrdinaryKrigingModel(x, z, 'gaussian', seed=1324),
UniversalKrigingModel(x, z, f, 'gaussian', seed=1324))
names = ('Simple', 'Ordinary', 'Universal')
for i, (model, name) in enumerate(zip(models, names)):
model.initialize_ML(theta0[i], bounds=bounds)
model.define_weights(x0)
Z = model.mean()
Z = np.reshape(Z, (ng, ng))
print(name, ' theta={}'.format(model.theta))
ax = fig.add_subplot(r, c, i + 2, projection='3d')
ax.plot_surface(X1, X2, Z, cmap=cm_parula, edgecolors='k', lw=0.2, alpha=0.7)
#ax.scatter(x[:, 0], x[:, 1], z, s=40, c='r', zorder=2.5)
for j in range(5):
add_point(ax, x[j, 0], x[j, 1], z[j], fc='r', ec='r', radius=0.03)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_zlabel('$Z(x)$')
ax.set_title(name)
set_font_sizes(ax, 12)
ax.view_init(elev=50, azim=120)
fig.savefig(PROXY_PATH + 'kriging_types_2D.png', bbox_inches='tight')
if __name__ == '__main__':
#_plot_ML_obj_1D()
#_plot_ML_obj_2D()
#_plot_covariance_functions()
#_plot_kernel_impact_1D()
#_plot_kriging_types_1D()
_plot_kriging_types_2D()
#_plot_prior_posterior_process()
| [
"[email protected]"
] | |
80a203056070914fde0b633807198a6f49b4d484 | 67c3c2a310a4d129a45739ca6351052f36f6d5f4 | /venv/lib/python3.7/keyword.py | 10891398d4fb79df3c09b078b871f9a58f1528fd | [] | no_license | cyobero/django-blog | a743203bdaf1d8ae9e6bd47c6e7b33a213a7abfd | 307335c84a0fa9eba6d3f69172a47580144cc066 | refs/heads/master | 2022-12-09T20:25:51.396813 | 2020-03-10T14:52:26 | 2020-03-10T14:52:26 | 245,950,344 | 0 | 0 | null | 2022-11-22T05:22:50 | 2020-03-09T05:20:31 | Python | UTF-8 | Python | false | false | 48 | py | /home/cyobero/anaconda3/lib/python3.7/keyword.py | [
"[email protected]"
] | |
266635f8db60fe89592c32c152f3b53a7832a8f6 | 2b3f859e7bde80f19e0f823b5e6e73ddb44cb3fe | /tests/migrators/test_group.py | a08a14e4fd2b0715b54d3f317b4ef5b0ed0de7b1 | [] | no_license | stormpath/stormpath-migrate | 4a0d4bc949da7df416529820bdcd76f590a8fe89 | ee43dbddda29a0b85c9901ea1e678660ef3bce36 | refs/heads/master | 2021-01-22T10:01:37.030105 | 2017-03-06T19:17:13 | 2017-03-06T19:17:13 | 43,518,049 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | """Tests for our GroupMigrator class."""
from os import environ
from unittest import TestCase
from uuid import uuid4
from stormpath.client import Client
from migrate.migrators import DirectoryMigrator, GroupMigrator
from migrate.utils import sanitize
# Necessary environment variables.
SRC_CLIENT_ID = environ['SRC_CLIENT_ID']
SRC_CLIENT_SECRET = environ['SRC_CLIENT_SECRET']
DST_CLIENT_ID = environ['DST_CLIENT_ID']
DST_CLIENT_SECRET = environ['DST_CLIENT_SECRET']
class GroupMigratorTest(TestCase):
def setUp(self):
self.src = Client(id=SRC_CLIENT_ID, secret=SRC_CLIENT_SECRET)
self.dst = Client(id=DST_CLIENT_ID, secret=DST_CLIENT_SECRET)
self.dir = self.src.directories.create({
'description': uuid4().hex,
'name': uuid4().hex,
})
self.group = self.dir.groups.create({
'description': uuid4().hex,
'name': uuid4().hex,
'status': 'DISABLED',
'custom_data': {'hi': 'there'},
})
migrator = DirectoryMigrator(destination_client=self.dst, source_directory=self.dir)
self.dst_dir = migrator.migrate()
def tearDown(self):
self.dir.delete()
self.dst_dir.delete()
def test_copy_group(self):
migrator = GroupMigrator(destination_directory=self.dst_dir, source_group=self.group)
migrator.destination_group = migrator.get_destination_group()
migrator.get_destination_group()
copied_group = migrator.copy_group()
self.assertTrue(copied_group)
self.assertEqual(copied_group.description, self.group.description)
self.assertEqual(copied_group.name, self.group.name)
self.assertEqual(copied_group.status, self.group.status)
def test_copy_custom_data(self):
migrator = GroupMigrator(destination_directory=self.dst_dir, source_group=self.group)
migrator.destination_group = migrator.get_destination_group()
copied_group = migrator.copy_group()
copied_custom_data = migrator.copy_custom_data()
self.assertEqual(copied_custom_data['hi'], 'there')
def test_migrate(self):
custom_data = self.group.custom_data
migrator = GroupMigrator(destination_directory=self.dst_dir, source_group=self.group)
copied_group = migrator.migrate()
copied_custom_data = copied_group.custom_data
self.assertEqual(copied_group.description, self.group.description)
self.assertEqual(copied_group.name, self.group.name)
self.assertEqual(copied_group.status, self.group.status)
self.assertEqual(copied_custom_data['hi'], self.group.custom_data['hi'])
| [
"[email protected]"
] | |
86ac7f9bcafb82d17cf1e1940c1920b2fc108579 | 0b414a080c9853997bfba016c7f66e5f11d80a14 | /cj_env/lib/python3.6/site-packages/pysnmp/proto/rfc1901.py | 8b7dec076254fc3b3a6d3e95de8296083ff5f1db | [] | no_license | alkhor/Cable_Journal | 2bd4bf00210f78c08fcc5508c13833b5e8aa3c46 | e64fb1bfcc4d1b7844b2e0a10653264d58039259 | refs/heads/master | 2021-01-22T19:09:33.562313 | 2018-04-15T19:42:16 | 2018-04-15T19:42:16 | 100,772,711 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pysnmp.sf.net/license.html
#
from pyasn1.type import univ, namedtype, namedval
from pysnmp.proto import rfc1905
version = univ.Integer(namedValues=namedval.NamedValues(('version-2c', 1)))
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', version),
namedtype.NamedType('community', univ.OctetString()),
namedtype.NamedType('data', rfc1905.PDUs())
)
| [
"[email protected]"
] | |
32a95aa4742fabf685bb335ad6a630b7ee37a801 | 975b2d421d3661e6770b601929d5f11d981d8985 | /msgraph/generated/models/data_subject_type.py | 8af8f9227c69c2dc7db37be9eb47d8afd69c4c11 | [
"MIT"
] | permissive | microsoftgraph/msgraph-sdk-python | a7c551b85daadeebf76ec4ae12668664ea639b42 | 27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949 | refs/heads/main | 2023-09-03T21:45:27.989672 | 2023-08-31T06:22:18 | 2023-08-31T06:22:18 | 534,665,999 | 135 | 18 | MIT | 2023-09-14T11:04:11 | 2022-09-09T14:00:17 | Python | UTF-8 | Python | false | false | 357 | py | from enum import Enum
class DataSubjectType(str, Enum):
Customer = "customer",
CurrentEmployee = "currentEmployee",
FormerEmployee = "formerEmployee",
ProspectiveEmployee = "prospectiveEmployee",
Student = "student",
Teacher = "teacher",
Faculty = "faculty",
Other = "other",
UnknownFutureValue = "unknownFutureValue",
| [
"[email protected]"
] | |
34405ad77c78331d0d5982215aff3c9b75139970 | 54ddb3f38cd09ac25213a7eb8743376fe778fee8 | /topic_08_functions/examples/6_visibility_global.py | ec56199933ecb0c82714c47ca80123e9fa749b43 | [] | no_license | ryndovaira/leveluppythonlevel1_300321 | dbfd4ee41485870097ee490f652751776ccbd7ab | 0877226e6fdb8945531775c42193a90ddb9c8a8b | refs/heads/master | 2023-06-06T07:44:15.157913 | 2021-06-18T11:53:35 | 2021-06-18T11:53:35 | 376,595,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | x = 0
def outer():
x = 1
def inner():
global x
x = 2
print("inner:", x)
inner()
print("outer:", x)
outer()
print("global:", x)
# inner: 2
# outer: 1
# global: 2 | [
"[email protected]"
] | |
66925b569b205b36e465e85da39a6c1ca0b998ab | cccf8da8d41ae2c14f5f4313c1edcf03a27956bb | /python/python2latex/writeLTXtextrm.py | 25de7160851c9ab2eed55ced5886e34a78ecaea7 | [] | no_license | LucaDiStasio/transpilers | e8f8ac4d99be3b42a050148ca8fbc5d025b83290 | c55d4f5240083ffd512f76cd1d39cff1016909b8 | refs/heads/master | 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,283 | py | # Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXtextrm(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXtextrm.varargin
nargin = writeLTXtextrm.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <[email protected]>
# <[email protected]>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Sets roman font family. SeeText Formatting.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\textrm'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return | [
"[email protected]"
] | |
e3d03d32e51e516989a28022f99a1ecc931a3bb1 | cb0e7d6493b23e870aa625eb362384a10f5ee657 | /solutions/python3/0199.py | 55bf7f22754e31867cd312d03f303f6cb6b10e0b | [] | no_license | sweetpand/LeetCode-1 | 0acfa603af254a3350d457803449a91322f2d1a7 | 65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94 | refs/heads/master | 2022-11-14T07:01:42.502172 | 2020-07-12T12:25:56 | 2020-07-12T12:25:56 | 279,088,171 | 1 | 0 | null | 2020-07-12T15:03:20 | 2020-07-12T15:03:19 | null | UTF-8 | Python | false | false | 384 | py | class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
def dfs(root: TreeNode, depth: int) -> None:
if not root:
return
if depth == len(ans):
ans.append(root.val)
dfs(root.right, depth + 1)
dfs(root.left, depth + 1)
ans = []
dfs(root, 0)
return ans
| [
"[email protected]"
] | |
36aa53c07596f0697adcc2a9facc301ec460cbac | c9a6b59b7164b6e402105c802b91d6c2695cec21 | /blog/templatetags/isliked.py | 36f43c2eee37d031b1f40c6b9824525f9ca61c65 | [] | no_license | harunurkst/amar-campus | 9b9d10e216c9e85b2c78e0c6720310084d389187 | fe0474274fb7419ef70f9463842260af7d6dea2f | refs/heads/master | 2021-01-18T20:24:40.343852 | 2017-04-25T06:07:26 | 2017-04-25T06:07:26 | 86,963,328 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from django import template
register = template.Library()
@register.filter(name="is_liked")
def is_liked(user):
post_id = request.POST.get('id', None)
post = get_object_or_404(Post, pk=post_id)
if post.likes.filter(id=user.id).exists():
post.likes.remove(user)
is_liked = False
else:
post.likes.add(user)
is_liked = True
return is_liked
| [
"[email protected]"
] | |
7a52bd2dc5f9b0f200ec0b550facbc8715441923 | 043ca446cbee59c1926de7473869ef34748e5b2b | /_2019/windmill.py | c133ab63983de261a692cf2dd68f69183eeb79b6 | [
"MIT"
] | permissive | soubam/videos | 785d04907d6955456797f989893b3f811c7f721e | 04a00e521808e2b733903bd1c91435a29ba2c678 | refs/heads/master | 2023-08-28T15:38:08.284733 | 2021-11-11T23:22:35 | 2021-11-11T23:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120,504 | py | from manim_imports_ext import *
import json
class IntroduceIMO(Scene):
CONFIG = {
"num_countries": 130,
"use_real_images": True,
# "use_real_images": False,
"include_labels": False,
"camera_config": {"background_color": GREY_E},
"random_seed": 6,
"year": 2019,
"n_flag_rows": 10,
"reorganize_students": True,
}
def construct(self):
self.add_title()
self.show_flags()
self.show_students()
self.move_title()
self.isolate_usa()
def add_title(self):
title = TexText(
"International ", "Mathematical ", "Olympiad",
)
title.scale(1.25)
logo = ImageMobject("imo_logo")
logo.set_height(1)
group = Group(logo, title)
group.arrange(RIGHT)
group.to_edge(UP, buff=MED_SMALL_BUFF)
self.add(title, logo)
self.title = title
self.logo = logo
def show_flags(self):
flags = self.get_flags()
flags.set_height(6)
flags.to_edge(DOWN)
random_flags = Group(*flags)
random_flags.shuffle()
self.play(
LaggedStartMap(
FadeInFromDown, random_flags,
run_time=2,
lag_ratio=0.03,
)
)
self.remove(random_flags)
self.add(flags)
self.wait()
self.flags = flags
def show_students(self):
flags = self.flags
student_groups = VGroup()
all_students = VGroup()
for flag in flags:
group = self.get_students(flag)
student_groups.add(group)
for student in group:
student.preimage = VectorizedPoint()
student.preimage.move_to(flag)
all_students.add(student)
all_students.shuffle()
student_groups.generate_target()
student_groups.target.arrange_in_grid(
n_rows=self.n_flag_rows,
buff=SMALL_BUFF,
)
# student_groups.target[-9:].align_to(student_groups.target[0], LEFT)
student_groups.target.match_height(flags)
student_groups.target.match_y(flags)
student_groups.target.to_edge(RIGHT, buff=0.25)
self.play(LaggedStart(
*[
ReplacementTransform(
student.preimage, student
)
for student in all_students
],
run_time=2,
lag_ratio=0.2,
))
self.wait()
if self.reorganize_students:
self.play(
MoveToTarget(student_groups),
flags.space_out_submobjects, 0.75,
flags.to_edge, LEFT, MED_SMALL_BUFF,
)
self.wait()
self.student_groups = student_groups
def move_title(self):
title = self.title
logo = self.logo
new_title = TexText("IMO")
new_title.match_height(title)
logo.generate_target()
group = Group(logo.target, new_title)
group.arrange(RIGHT, buff=SMALL_BUFF)
group.match_y(title)
group.match_x(self.student_groups, UP)
title.generate_target()
for word, letter in zip(title.target, new_title[0]):
for nl in word:
nl.move_to(letter)
word.set_opacity(0)
word[0].set_opacity(1)
word[0].become(letter)
self.play(
MoveToTarget(title),
MoveToTarget(logo),
)
self.wait()
def isolate_usa(self):
flags = self.flags
student_groups = self.student_groups
us_flag = flags[0]
random_flags = Group(*flags[1:])
random_flags.shuffle()
old_height = us_flag.get_height()
us_flag.label.set_width(0.8 * us_flag.get_width())
us_flag.label.next_to(
us_flag, DOWN,
buff=0.2 * us_flag.get_height(),
)
us_flag.label.set_opacity(0)
us_flag.add(us_flag.label)
us_flag.generate_target()
us_flag.target.scale(1 / old_height)
us_flag.target.to_corner(UL)
us_flag.target[1].set_opacity(1)
self.remove(us_flag)
self.play(
LaggedStart(
*[
FadeOut(flag, DOWN)
for flag in random_flags
],
lag_ratio=0.05,
run_time=1.5
),
MoveToTarget(us_flag),
student_groups[1:].fade, 0.9,
)
self.wait()
#
def get_students(self, flag):
dots = VGroup(*[Dot() for x in range(6)])
dots.arrange_in_grid(n_cols=2, buff=SMALL_BUFF)
dots.match_height(flag)
dots.next_to(flag, RIGHT, SMALL_BUFF)
dots[flag.n_students:].set_opacity(0)
if isinstance(flag, ImageMobject):
rgba = random.choice(random.choice(flag.pixel_array))
if np.all(rgba < 100):
rgba = interpolate(rgba, 256 * np.ones(len(rgba)), 0.5)
color = rgba_to_color(rgba / 256)
else:
color = random_bright_color()
dots.set_color(color)
dots.set_stroke(WHITE, 1, background=True)
return dots
def get_flags(self):
year = self.year
file = "{}_imo_countries.json".format(year)
with open(os.path.join("assets", file)) as fp:
countries_with_counts = json.load(fp)
with open(os.path.join("assets", "country_codes.json")) as fp:
country_codes = json.load(fp)
country_to_code2 = dict([
(country.lower(), code2.lower())
for country, code2, code3 in country_codes
])
country_to_code3 = dict([
(country.lower(), code3.lower())
for country, code2, code3 in country_codes
])
images = Group()
for country, count in countries_with_counts:
country = country.lower()
alt_names = [
("united states of america", "united states"),
("people's republic of china", "china"),
("macau", "macao"),
("syria", "syrian arab republic"),
("north macedonia", "macedonia, the former yugoslav republic of"),
("tanzania", "united republic of tanzania"),
("vietnam", "viet nam"),
("ivory coast", "cote d'ivoire")
]
for n1, n2 in alt_names:
if country == n1:
country = n2
if country not in country_to_code2:
print("Can't find {}".format(country))
continue
short_code = country_to_code2[country]
try:
image = ImageMobject(os.path.join("flags", short_code))
image.set_width(1)
label = VGroup(*[
TexText(l)
for l in country_to_code3[country].upper()
])
label.arrange(RIGHT, buff=0.05, aligned_edge=DOWN)
label.set_height(0.25)
if not self.use_real_images:
rect = SurroundingRectangle(image, buff=0)
rect.set_stroke(WHITE, 1)
image = rect
image.label = label
image.n_students = count
images.add(image)
except OSError:
print("Failed on {}".format(country))
n_rows = self.n_flag_rows
images.arrange_in_grid(
n_rows=n_rows,
buff=1.25,
)
images[-(len(images) % n_rows):].align_to(images[0], LEFT)
sf = 1.7
images.stretch(sf, 0)
for i, image in enumerate(images):
image.set_height(1)
image.stretch(1 / sf, 0)
image.label.next_to(image, DOWN, SMALL_BUFF)
if self.include_labels:
image.add(image.label)
images.set_width(FRAME_WIDTH - 1)
if images.get_height() > FRAME_HEIGHT - 1:
images.set_height(FRAME_HEIGHT - 1)
images.center()
return images
class ShowTinyTao(IntroduceIMO):
CONFIG = {
"reorganize_students": False,
}
def construct(self):
self.force_skipping()
self.add_title()
self.show_flags()
self.show_students()
self.revert_to_original_skipping_status()
image = ImageMobject("TerryTaoIMO")
label = TexText("Terence Tao at 12")
label.match_width(image)
label.next_to(image, DOWN, SMALL_BUFF)
image.add(label)
ausie = self.flags[17]
image.replace(ausie)
image.set_opacity(0)
self.play(image.set_opacity, 1)
self.play(
image.set_height, 5,
image.to_corner, DR, {"buff": MED_SMALL_BUFF},
)
self.wait()
self.play(FadeOut(image))
class FootnoteToIMOIntro(Scene):
def construct(self):
words = TexText("$^*$Based on data from 2019 test")
self.play(FadeIn(words, UP))
self.wait()
class ShowTest(Scene):
def construct(self):
self.introduce_test()
def introduce_test(self):
test = self.get_test()
test.generate_target()
test.target.to_edge(UP)
# Time label
time_labels = VGroup(
TexText("Day 1", ": 4.5 hours"),
TexText("Day 2", ": 4.5 hours"),
)
time_labels.scale(1.5)
day_labels = VGroup()
hour_labels = VGroup()
for label, page in zip(time_labels, test.target):
label.next_to(page, DOWN)
label[0].save_state()
label[0].next_to(page, DOWN)
label[1][1:].set_color(YELLOW)
day_labels.add(label[0])
hour_labels.add(label[1])
# Problem descriptions
problem_rects = self.get_problem_rects(test.target[0])
proof_words = VGroup()
for rect in problem_rects:
word = TexText("Proof")
word.scale(2)
word.next_to(rect, RIGHT, buff=3)
word.set_color(BLUE)
proof_words.add(word)
proof_words.space_out_submobjects(2)
proof_arrows = VGroup()
for rect, word in zip(problem_rects, proof_words):
arrow = Arrow(word.get_left(), rect.get_right())
arrow.match_color(word)
proof_arrows.add(arrow)
scores = VGroup()
for word in proof_words:
score = VGroup(Integer(0), Tex("/"), Integer(7))
score.arrange(RIGHT, buff=SMALL_BUFF)
score.scale(2)
score.move_to(word)
score.to_edge(RIGHT)
scores.add(score)
score[0].add_updater(lambda m: m.set_color(
interpolate_color(RED, GREEN, m.get_value() / 7)
))
# Introduce test
self.play(
LaggedStart(
FadeIn(test[0], 2 * RIGHT),
FadeIn(test[1], 2 * LEFT),
lag_ratio=0.3,
)
)
self.wait()
self.play(
MoveToTarget(test, lag_ratio=0.2),
FadeIn(day_labels, UP, lag_ratio=0.2),
)
self.wait()
self.play(
*map(Restore, day_labels),
FadeIn(hour_labels, LEFT),
)
self.wait()
# Discuss problems
self.play(
FadeOut(test[1]),
FadeOut(time_labels[1]),
LaggedStartMap(ShowCreation, problem_rects),
run_time=1,
)
self.play(
LaggedStart(*[
FadeIn(word, LEFT)
for word in proof_words
]),
LaggedStart(*[
GrowArrow(arrow)
for arrow in proof_arrows
]),
)
self.wait()
self.play(FadeIn(scores))
self.play(
LaggedStart(*[
ChangeDecimalToValue(score[0], 7)
for score in scores
], lag_ratio=0.2, rate_func=rush_into)
)
self.wait()
self.scores = scores
self.proof_arrows = proof_arrows
self.proof_words = proof_words
self.problem_rects = problem_rects
self.test = test
self.time_labels = time_labels
def get_test(self):
group = Group(
ImageMobject("imo_2011_p1"),
ImageMobject("imo_2011_p2"),
)
group.set_height(6)
group.arrange(RIGHT, buff=LARGE_BUFF)
for page in group:
rect = SurroundingRectangle(page, buff=0.01)
rect.set_stroke(WHITE, 1)
page.add(rect)
# page.pixel_array[:, :, :3] = 255 - page.pixel_array[:, :, :3]
return group
def get_problem_rects(self, page):
pw = page.get_width()
rects = VGroup(*[Rectangle() for x in range(3)])
rects.set_stroke(width=2)
rects.set_color_by_gradient([BLUE_E, BLUE_C, BLUE_D])
rects.set_width(pw * 0.75)
for factor, rect in zip([0.095, 0.16, 0.1], rects):
rect.set_height(factor * pw, stretch=True)
rects.arrange(DOWN, buff=0.08)
rects.move_to(page)
rects.shift(0.09 * pw * DOWN)
return rects
class USProcessAlt(IntroduceIMO):
CONFIG = {
}
def construct(self):
self.add_flag_and_label()
self.show_tests()
self.show_imo()
def add_flag_and_label(self):
flag = ImageMobject("flags/us")
flag.set_height(1)
flag.to_corner(UL)
label = VGroup(*map(TexText, "USA"))
label.arrange(RIGHT, buff=0.05, aligned_edge=DOWN)
label.set_width(0.8 * flag.get_width())
label.next_to(flag, DOWN, buff=0.2 * flag.get_height())
self.add(flag, label)
self.flag = flag
def show_tests(self):
tests = VGroup(
self.get_test(
["American ", "Mathematics ", "Contest"],
n_questions=25,
time_string="75 minutes",
hours=1.25,
n_students=250000,
),
self.get_test(
["American ", "Invitational ", "Math ", "Exam"],
n_questions=15,
time_string="3 hours",
hours=3,
n_students=12000,
),
self.get_test(
["U", "S", "A ", "Math ", "Olympiad"],
n_questions=6,
time_string="$2 \\times 4.5$ hours",
hours=4.5,
n_students=500,
),
self.get_test(
["Mathematical ", "Olympiad ", "Program"],
n_questions=None,
time_string="3 weeks",
hours=None,
n_students=60
)
)
amc, aime, usamo, mop = tests
arrows = VGroup()
amc.to_corner(UR)
top_point = amc.get_top()
last_arrow = VectorizedPoint()
last_arrow.to_corner(DL)
next_anims = []
self.force_skipping()
for test in tests:
test.move_to(top_point, UP)
test.shift_onto_screen()
self.play(
Write(test.name),
*next_anims,
run_time=1,
)
self.wait()
self.animate_name_abbreviation(test)
self.wait()
if isinstance(test.nq_label[0], Integer):
int_mob = test.nq_label[0]
n = int_mob.get_value()
int_mob.set_value(0)
self.play(
ChangeDecimalToValue(int_mob, n),
FadeIn(test.nq_label[1:])
)
else:
self.play(FadeIn(test.nq_label))
self.play(
FadeIn(test.t_label)
)
self.wait()
test.generate_target()
test.target.scale(0.575)
test.target.next_to(last_arrow, RIGHT, buff=SMALL_BUFF)
test.target.shift_onto_screen()
next_anims = [
MoveToTarget(test),
GrowArrow(last_arrow),
]
last_arrow = Vector(0.5 * RIGHT)
last_arrow.set_color(WHITE)
last_arrow.next_to(test.target, RIGHT, SMALL_BUFF)
arrows.add(last_arrow)
self.play(*next_anims)
self.revert_to_original_skipping_status()
self.play(
LaggedStartMap(
FadeInFrom, tests,
lambda m: (m, LEFT),
),
LaggedStartMap(
GrowArrow, arrows[:-1]
),
lag_ratio=0.4,
)
self.wait()
self.tests = tests
def show_imo(self):
tests = self.tests
logo = ImageMobject("imo_logo")
logo.set_height(1)
name = TexText("IMO")
name.scale(2)
group = Group(logo, name)
group.arrange(RIGHT)
group.to_corner(UR)
group.shift(2 * LEFT)
students = VGroup(*[
PiCreature()
for x in range(6)
])
students.arrange_in_grid(n_cols=3, buff=LARGE_BUFF)
students.set_height(2)
students.next_to(group, DOWN)
colors = it.cycle([RED, GREY_B, BLUE])
for student, color in zip(students, colors):
student.set_color(color)
student.save_state()
student.move_to(tests[-1])
student.fade(1)
self.play(
FadeInFromDown(group),
LaggedStartMap(
Restore, students,
run_time=3,
lag_ratio=0.3,
)
)
self.play(
LaggedStart(*[
ApplyMethod(student.change, "hooray")
for student in students
])
)
for x in range(3):
self.play(Blink(random.choice(students)))
self.wait()
#
def animate_name_abbreviation(self, test):
name = test.name
short_name = test.short_name
short_name.move_to(name, LEFT)
name.generate_target()
for p1, p2 in zip(name.target, short_name):
for letter in p1:
letter.move_to(p2[0])
letter.set_opacity(0)
p1[0].set_opacity(1)
self.add(test.rect, test.name, test.ns_label)
self.play(
FadeIn(test.rect),
MoveToTarget(name),
FadeIn(test.ns_label),
)
test.remove(name)
test.add(short_name)
self.remove(name)
self.add(short_name)
def get_test(self, name_parts, n_questions, time_string, hours, n_students):
T_COLOR = GREEN_B
Q_COLOR = YELLOW
name = TexText(*name_parts)
short_name = TexText(*[np[0] for np in name_parts])
if n_questions:
nq_label = VGroup(
Integer(n_questions),
TexText("questions")
)
nq_label.arrange(RIGHT)
else:
nq_label = TexText("Lots of training")
nq_label.set_color(Q_COLOR)
if time_string:
t_label = TexText(time_string)
t_label.set_color(T_COLOR)
else:
t_label = Integer(0).set_opacity(0)
clock = Clock()
clock.hour_hand.set_opacity(0)
clock.minute_hand.set_opacity(0)
clock.set_stroke(WHITE, 2)
if hours:
sector = Sector(
start_angle=TAU / 4,
angle=-TAU * (hours / 12),
outer_radius=clock.get_width() / 2,
arc_center=clock.get_center()
)
sector.set_fill(T_COLOR, 0.5)
sector.set_stroke(T_COLOR, 2)
clock.add(sector)
if hours == 4.5:
plus = Tex("+").scale(2)
plus.next_to(clock, RIGHT)
clock_copy = clock.copy()
clock_copy.next_to(plus, RIGHT)
clock.add(plus, clock_copy)
else:
clock.set_opacity(0)
clock.set_height(1)
clock.next_to(t_label, RIGHT, buff=MED_LARGE_BUFF)
t_label.add(clock)
ns_label = TexText("$\\sim${:,} students".format(n_students))
result = VGroup(
name,
nq_label,
t_label,
ns_label,
)
result.arrange(
DOWN,
buff=MED_LARGE_BUFF,
aligned_edge=LEFT,
)
rect = SurroundingRectangle(result, buff=MED_SMALL_BUFF)
rect.set_width(
result[1:].get_width() + MED_LARGE_BUFF,
about_edge=LEFT,
stretch=True,
)
rect.set_stroke(WHITE, 2)
rect.set_fill(BLACK, 1)
result.add_to_back(rect)
result.name = name
result.short_name = short_name
result.nq_label = nq_label
result.t_label = t_label
result.ns_label = ns_label
result.rect = rect
result.clock = clock
return result
class Describe2011IMO(IntroduceIMO):
CONFIG = {
"year": 2011,
"use_real_images": True,
"n_flag_rows": 10,
"student_data": [
[1, "Lisa Sauermann", "de", [7, 7, 7, 7, 7, 7]],
[2, "Jeck Lim", "sg", [7, 5, 7, 7, 7, 7]],
[3, "Lin Chen", "cn", [7, 3, 7, 7, 7, 7]],
[4, "Jun Jie Joseph Kuan", "sg", [7, 7, 7, 7, 7, 1]],
[4, "David Yang", "us", [7, 7, 7, 7, 7, 1]],
[6, "Jie Jun Ang", "sg", [7, 7, 7, 7, 7, 0]],
[6, "Kensuke Yoshida", "jp", [7, 6, 7, 7, 7, 1]],
[6, "Raul Sarmiento", "pe", [7, 7, 7, 7, 7, 0]],
[6, "Nipun Pitimanaaree", "th", [7, 7, 7, 7, 7, 0]],
],
}
def construct(self):
self.add_title()
self.add_flags_and_students()
self.comment_on_primality()
self.show_top_three_scorers()
def add_title(self):
year = Tex("2011")
logo = ImageMobject("imo_logo")
imo = TexText("IMO")
group = Group(year, logo, imo)
group.scale(1.25)
logo.set_height(1)
group.arrange(RIGHT)
group.to_corner(UR, buff=MED_SMALL_BUFF)
group.shift(LEFT)
self.add(group)
self.play(FadeIn(year, RIGHT))
self.title = group
def add_flags_and_students(self):
flags = self.get_flags()
flags.space_out_submobjects(0.8)
sf = 0.8
flags.stretch(sf, 0)
for flag in flags:
flag.stretch(1 / sf, 0)
flags.set_height(5)
flags.to_corner(DL)
student_groups = VGroup(*[
self.get_students(flag)
for flag in flags
])
student_groups.arrange_in_grid(
n_rows=self.n_flag_rows,
buff=SMALL_BUFF,
)
student_groups[-1].align_to(student_groups, LEFT)
student_groups.set_height(6)
student_groups.next_to(self.title, DOWN)
flags.align_to(student_groups, UP)
all_students = VGroup(*it.chain(*[
[
student
for student in group
if student.get_fill_opacity() > 0
]
for group in student_groups
]))
# Counters
student_counter = VGroup(
Integer(0),
TexText("Participants"),
)
student_counter.set = all_students
student_counter.next_to(self.title, LEFT, MED_LARGE_BUFF)
student_counter.right_edge = student_counter.get_right()
def update_counter(counter):
counter[0].set_value(len(counter.set))
counter.arrange(RIGHT)
counter[0].align_to(counter[1][0][0], DOWN)
counter.move_to(counter.right_edge, RIGHT)
student_counter.add_updater(update_counter)
flag_counter = VGroup(
Integer(0),
TexText("Countries")
)
flag_counter.set = flags
flag_counter.next_to(student_counter, LEFT, buff=0.75)
flag_counter.align_to(student_counter[0], DOWN)
flag_counter.right_edge = flag_counter.get_right()
flag_counter.add_updater(update_counter)
self.add(student_counter)
self.play(
ShowIncreasingSubsets(all_students),
run_time=3,
)
self.wait()
self.add(flag_counter)
self.play(
ShowIncreasingSubsets(flags),
run_time=3,
)
self.wait()
self.student_counter = student_counter
self.flag_counter = flag_counter
self.all_students = all_students
self.student_groups = student_groups
self.flags = flags
def comment_on_primality(self):
full_rect = FullScreenFadeRectangle(opacity=0.9)
numbers = VGroup(
self.title[0],
self.student_counter[0],
self.flag_counter[0],
)
lines = VGroup(*[
Line().match_width(number).next_to(number, DOWN, SMALL_BUFF)
for number in numbers
])
lines.set_stroke(TEAL, 2)
randy = Randolph()
randy.to_corner(DL)
randy.look_at(numbers)
words = VGroup(*[
TexText("Prime").next_to(line, DOWN)
for line in reversed(lines)
])
words.match_color(lines)
self.add(full_rect, numbers)
self.play(
FadeIn(full_rect),
randy.change, "sassy",
VFadeIn(randy),
)
self.play(
ShowCreation(lines),
randy.change, "pondering",
)
self.play(Blink(randy))
self.play(
randy.change, "thinking",
LaggedStart(*[
FadeIn(word, UP)
for word in words
], run_time=3, lag_ratio=0.5)
)
self.play(Blink(randy))
self.play(
FadeOut(randy),
FadeOut(words),
)
self.play(FadeOut(full_rect), FadeOut(lines))
def show_top_three_scorers(self):
student_groups = self.student_groups
all_students = self.all_students
flags = self.flags
student_counter = self.student_counter
flag_counter = self.flag_counter
student = student_groups[10][0]
flag = flags[10]
students_to_fade = VGroup(*filter(
lambda s: s is not student,
all_students
))
flags_to_fade = Group(*filter(
lambda s: s is not flag,
flags
))
grid = self.get_score_grid()
grid.shift(3 * DOWN)
title_row = grid.rows[0]
top_row = grid.rows[1]
self.play(
LaggedStartMap(FadeOutAndShiftDown, students_to_fade),
LaggedStartMap(FadeOutAndShiftDown, flags_to_fade),
ChangeDecimalToValue(student_counter[0], 1),
FadeOut(flag_counter),
run_time=2
)
student_counter[1][0][-1].fade(1)
self.play(
Write(top_row[0]),
ReplacementTransform(student, top_row[1][1]),
flag.replace, top_row[1][0],
)
self.remove(flag)
self.add(top_row[1])
self.play(
LaggedStartMap(FadeIn, title_row[2:]),
LaggedStartMap(FadeIn, top_row[2:]),
)
self.wait()
self.play(
LaggedStart(*[
FadeIn(row, UP)
for row in grid.rows[2:4]
]),
LaggedStart(*[
ShowCreation(line)
for line in grid.h_lines[:2]
]),
lag_ratio=0.5,
)
self.wait()
self.play(
ShowCreationThenFadeAround(
Group(title_row[3], grid.rows[3][3]),
)
)
self.wait()
student_counter.clear_updaters()
self.play(
FadeOut(self.title, UP),
FadeOut(student_counter, UP),
grid.rows[:4].shift, 3 * UP,
grid.h_lines[:3].shift, 3 * UP,
)
remaining_rows = grid.rows[4:]
remaining_lines = grid.h_lines[3:]
Group(remaining_rows, remaining_lines).shift(3 * UP)
self.play(
LaggedStartMap(
FadeInFrom, remaining_rows,
lambda m: (m, UP),
),
LaggedStartMap(ShowCreation, remaining_lines),
lag_ratio=0.3,
run_time=2,
)
self.wait()
def get_score_grid(self):
data = self.student_data
ranks = VGroup(*[
Integer(row[0])
for row in data
])
# Combine students with flags
students = VGroup(*[
TexText(row[1])
for row in data
])
flags = Group(*[
ImageMobject("flags/{}.png".format(row[2])).set_height(0.3)
for row in data
])
students = Group(*[
Group(flag.next_to(student, LEFT, buff=0.2), student)
for flag, student in zip(flags, students)
])
score_rows = VGroup(*[
VGroup(*map(Integer, row[3]))
for row in data
])
colors = color_gradient([RED, YELLOW, GREEN], 8)
for score_row in score_rows:
for score in score_row:
score.set_color(colors[score.get_value()])
titles = VGroup(*[
VectorizedPoint(),
VectorizedPoint(),
*[
TexText("P{}".format(i))
for i in range(1, 7)
]
])
titles.arrange(RIGHT, buff=MED_LARGE_BUFF)
titles[2:].shift(students.get_width() * RIGHT)
rows = Group(titles, *[
Group(rank, student, *score_row)
for rank, flag, student, score_row in zip(
ranks, flags, students, score_rows
)
])
rows.arrange(DOWN)
rows.to_edge(UP)
for row in rows:
for i, e1, e2 in zip(it.count(), titles, row):
if i < 2:
e2.align_to(e1, LEFT)
else:
e2.match_x(e1)
ranks.next_to(students, LEFT)
h_lines = VGroup()
for r1, r2 in zip(rows[1:], rows[2:]):
line = Line()
line.set_stroke(WHITE, 0.5)
line.match_width(r2)
line.move_to(midpoint(r1.get_bottom(), r2.get_top()))
line.align_to(r2, LEFT)
h_lines.add(line)
grid = Group(rows, h_lines)
grid.rows = rows
grid.h_lines = h_lines
return grid
class AskWhatsOnTest(ShowTest, MovingCameraScene):
def construct(self):
self.force_skipping()
self.introduce_test()
self.revert_to_original_skipping_status()
self.ask_about_questions()
def ask_about_questions(self):
scores = self.scores
arrows = self.proof_arrows
proof_words = self.proof_words
question = TexText("What kind \\\\ of problems?")
question.scale(1.5)
question.move_to(proof_words, LEFT)
research = TexText("Research-lite")
research.scale(1.5)
research.move_to(question, LEFT)
research.shift(MED_SMALL_BUFF * RIGHT)
research.set_color(BLUE)
arrows.generate_target()
for arrow in arrows.target:
end = arrow.get_end()
start = arrow.get_start()
arrow.put_start_and_end_on(
interpolate(question.get_left(), start, 0.1),
end
)
self.play(
FadeOut(scores),
FadeOut(proof_words),
MoveToTarget(arrows),
Write(question),
)
self.wait()
self.play(
FadeIn(research, DOWN),
question.shift, 2 * UP,
)
self.wait()
# Experience
randy = Randolph(height=2)
randy.move_to(research.get_corner(UL), DL)
randy.shift(SMALL_BUFF * RIGHT)
clock = Clock()
clock.set_height(1)
clock.next_to(randy, UR)
self.play(
FadeOut(question),
FadeIn(randy),
FadeInFromDown(clock),
)
self.play(
randy.change, "pondering",
ClockPassesTime(clock, run_time=5, hours_passed=5),
)
self.play(
ClockPassesTime(clock, run_time=2, hours_passed=2),
VFadeOut(clock),
Blink(randy),
VFadeOut(randy),
LaggedStartMap(
FadeOut,
VGroup(
research,
*arrows,
*self.problem_rects,
self.time_labels[0]
)
),
)
# Second part
big_rect = FullScreenFadeRectangle()
lil_rect = self.problem_rects[1].copy()
lil_rect.reverse_points()
big_rect.append_vectorized_mobject(lil_rect)
frame = self.camera_frame
frame.generate_target()
frame.target.scale(0.35)
frame.target.move_to(lil_rect)
self.play(
FadeInFromDown(self.test[1]),
)
self.wait()
self.play(
FadeIn(big_rect),
MoveToTarget(frame, run_time=6),
)
self.wait()
class ReadQuestions(Scene):
def construct(self):
background = ImageMobject("AskWhatsOnTest_final_image")
background.set_height(FRAME_HEIGHT)
self.add(background)
lines = SVGMobject("imo_2011_2_underline-01")
lines.set_width(FRAME_WIDTH - 1)
lines.move_to(0.1 * DOWN)
lines.set_stroke(TEAL, 3)
clump_sizes = [1, 2, 3, 2, 1, 2]
partial_sums = list(np.cumsum(clump_sizes))
clumps = VGroup(*[
lines[i:j]
for i, j in zip(
[0] + partial_sums,
partial_sums,
)
])
faders = []
for clump in clumps:
rects = VGroup()
for line in clump:
rect = Rectangle()
rect.set_stroke(width=0)
rect.set_fill(TEAL, 0.25)
rect.set_width(line.get_width() + SMALL_BUFF)
rect.set_height(0.35, stretch=True)
rect.move_to(line, DOWN)
rects.add(rect)
self.play(
ShowCreation(clump, run_time=2),
FadeIn(rects),
*faders,
)
self.wait()
faders = [
FadeOut(clump),
FadeOut(rects),
]
self.play(*faders)
self.wait()
# Windmill scenes
class WindmillScene(Scene):
CONFIG = {
"dot_config": {
"fill_color": GREY_B,
"radius": 0.05,
"background_stroke_width": 2,
"background_stroke_color": BLACK,
},
"windmill_style": {
"stroke_color": RED,
"stroke_width": 2,
"background_stroke_width": 3,
"background_stroke_color": BLACK,
},
"windmill_length": 2 * FRAME_WIDTH,
"windmill_rotation_speed": 0.25,
# "windmill_rotation_speed": 0.5,
# "hit_sound": "pen_click.wav",
"hit_sound": "pen_click.wav",
"leave_shadows": False,
}
def get_random_point_set(self, n_points=11, width=6, height=6):
return np.array([
[
-width / 2 + np.random.random() * width,
-height / 2 + np.random.random() * height,
0
]
for n in range(n_points)
])
def get_dots(self, points):
return VGroup(*[
Dot(point, **self.dot_config)
for point in points
])
def get_windmill(self, points, pivot=None, angle=TAU / 4):
line = Line(LEFT, RIGHT)
line.set_length(self.windmill_length)
line.set_angle(angle)
line.set_style(**self.windmill_style)
line.point_set = points
if pivot is not None:
line.pivot = pivot
else:
line.pivot = points[0]
line.rot_speed = self.windmill_rotation_speed
line.add_updater(lambda l: l.move_to(l.pivot))
return line
def get_pivot_dot(self, windmill, color=YELLOW):
pivot_dot = Dot(color=YELLOW)
pivot_dot.add_updater(lambda d: d.move_to(windmill.pivot))
return pivot_dot
def start_leaving_shadows(self):
self.leave_shadows = True
self.add(self.get_windmill_shadows())
def get_windmill_shadows(self):
if not hasattr(self, "windmill_shadows"):
self.windmill_shadows = VGroup()
return self.windmill_shadows
def next_pivot_and_angle(self, windmill):
curr_angle = windmill.get_angle()
pivot = windmill.pivot
non_pivots = list(filter(
lambda p: not np.all(p == pivot),
windmill.point_set
))
angles = np.array([
-(angle_of_vector(point - pivot) - curr_angle) % PI
for point in non_pivots
])
# Edge case for 2 points
tiny_indices = angles < 1e-6
if np.all(tiny_indices):
return non_pivots[0], PI
angles[tiny_indices] = np.inf
index = np.argmin(angles)
return non_pivots[index], angles[index]
def rotate_to_next_pivot(self, windmill, max_time=None, added_anims=None):
"""
Returns animations to play following the contact, and total run time
"""
new_pivot, angle = self.next_pivot_and_angle(windmill)
change_pivot_at_end = True
if added_anims is None:
added_anims = []
run_time = angle / windmill.rot_speed
if max_time is not None and run_time > max_time:
ratio = max_time / run_time
rate_func = (lambda t: ratio * t)
run_time = max_time
change_pivot_at_end = False
else:
rate_func = linear
for anim in added_anims:
if anim.run_time > run_time:
anim.run_time = run_time
self.play(
Rotate(
windmill,
-angle,
rate_func=rate_func,
run_time=run_time,
),
*added_anims,
)
if change_pivot_at_end:
self.handle_pivot_change(windmill, new_pivot)
# Return animations to play
return [self.get_hit_flash(new_pivot)], run_time
def handle_pivot_change(self, windmill, new_pivot):
windmill.pivot = new_pivot
self.add_sound(self.hit_sound)
if self.leave_shadows:
new_shadow = windmill.copy()
new_shadow.fade(0.5)
new_shadow.set_stroke(width=1)
new_shadow.clear_updaters()
shadows = self.get_windmill_shadows()
shadows.add(new_shadow)
def let_windmill_run(self, windmill, time):
# start_time = self.get_time()
# end_time = start_time + time
# curr_time = start_time
anims_from_last_hit = []
while time > 0:
anims_from_last_hit, last_run_time = self.rotate_to_next_pivot(
windmill,
max_time=time,
added_anims=anims_from_last_hit,
)
time -= last_run_time
# curr_time = self.get_time()
def add_dot_color_updater(self, dots, windmill, **kwargs):
for dot in dots:
dot.add_updater(lambda d: self.update_dot_color(
d, windmill, **kwargs
))
def update_dot_color(self, dot, windmill, color1=BLUE, color2=GREY_BROWN):
perp = rotate_vector(windmill.get_vector(), TAU / 4)
dot_product = np.dot(perp, dot.get_center() - windmill.pivot)
if dot_product > 0:
dot.set_color(color1)
# elif dot_product < 0:
else:
dot.set_color(color2)
# else:
# dot.set_color(WHITE)
dot.set_stroke(
# interpolate_color(dot.get_fill_color(), WHITE, 0.5),
WHITE,
width=2,
background=True
)
def get_hit_flash(self, point):
flash = Flash(
point,
line_length=0.1,
flash_radius=0.2,
run_time=0.5,
remover=True,
)
flash_mob = flash.mobject
for submob in flash_mob:
submob.reverse_points()
return Uncreate(
flash.mobject,
run_time=0.25,
lag_ratio=0,
)
def get_pivot_counters(self, windmill, counter_height=0.25, buff=0.2, color=WHITE):
points = windmill.point_set
counters = VGroup()
for point in points:
counter = Integer(0)
counter.set_color(color)
counter.set_height(counter_height)
counter.next_to(point, UP, buff=buff)
counter.point = point
counter.windmill = windmill
counter.is_pivot = False
counter.add_updater(self.update_counter)
counters.add(counter)
return counters
def update_counter(self, counter):
dist = get_norm(counter.point - counter.windmill.pivot)
counter.will_be_pivot = (dist < 1e-6)
if (not counter.is_pivot) and counter.will_be_pivot:
counter.increment_value()
counter.is_pivot = counter.will_be_pivot
def get_orientation_arrows(self, windmill, n_tips=20):
tips = VGroup(*[
ArrowTip(start_angle=0)
for x in range(n_tips)
])
tips.stretch(0.75, 1)
tips.scale(0.5)
tips.rotate(windmill.get_angle())
tips.match_color(windmill)
tips.set_stroke(BLACK, 1, background=True)
for tip, a in zip(tips, np.linspace(0, 1, n_tips)):
tip.shift(
windmill.point_from_proportion(a) - tip.get_points()[0]
)
return tips
def get_left_right_colorings(self, windmill, opacity=0.3):
rects = VGroup(VMobject(), VMobject())
rects.const_opacity = opacity
def update_regions(rects):
p0, p1 = windmill.get_start_and_end()
v = p1 - p0
vl = rotate_vector(v, 90 * DEGREES)
vr = rotate_vector(v, -90 * DEGREES)
p2 = p1 + vl
p3 = p0 + vl
p4 = p1 + vr
p5 = p0 + vr
rects[0].set_points_as_corners([p0, p1, p2, p3])
rects[1].set_points_as_corners([p0, p1, p4, p5])
rects.set_stroke(width=0)
rects[0].set_fill(BLUE, rects.const_opacity)
rects[1].set_fill(GREY_BROWN, rects.const_opacity)
return rects
rects.add_updater(update_regions)
return rects
class IntroduceWindmill(WindmillScene):
CONFIG = {
"final_run_time": 60,
"windmill_rotation_speed": 0.5,
}
def construct(self):
self.add_points()
self.exclude_colinear()
self.add_line()
self.switch_pivots()
self.continue_and_count()
def add_points(self):
points = self.get_random_point_set(8)
points[-1] = midpoint(points[0], points[1])
dots = self.get_dots(points)
dots.set_color(YELLOW)
dots.set_height(3)
braces = VGroup(
Brace(dots, LEFT),
Brace(dots, RIGHT),
)
group = VGroup(dots, braces)
group.set_height(4)
group.center().to_edge(DOWN)
S, eq = S_eq = Tex("\\mathcal{S}", "=")
S_eq.scale(2)
S_eq.next_to(braces, LEFT)
self.play(
FadeIn(S_eq),
FadeIn(braces[0], RIGHT),
FadeIn(braces[1], LEFT),
)
self.play(
LaggedStartMap(FadeInFromLarge, dots)
)
self.wait()
self.play(
S.next_to, dots, LEFT,
{"buff": 2, "aligned_edge": UP},
FadeOut(braces),
FadeOut(eq),
)
self.S_label = S
self.dots = dots
def exclude_colinear(self):
dots = self.dots
line = Line(dots[0].get_center(), dots[1].get_center())
line.scale(1.5)
line.set_stroke(WHITE)
words = TexText("Not allowed!")
words.scale(2)
words.set_color(RED)
words.next_to(line.get_center(), RIGHT)
self.add(line, dots)
self.play(
ShowCreation(line),
FadeIn(words, LEFT),
dots[-1].set_color, RED,
)
self.wait()
self.play(
FadeOut(line),
FadeOut(words),
)
self.play(
FadeOut(
dots[-1], 3 * RIGHT,
path_arc=-PI / 4,
rate_func=running_start,
)
)
dots.remove(dots[-1])
self.wait()
def add_line(self):
dots = self.dots
points = np.array(list(map(Mobject.get_center, dots)))
p0 = points[0]
windmill = self.get_windmill(points, p0, angle=60 * DEGREES)
pivot_dot = self.get_pivot_dot(windmill)
l_label = Tex("\\ell")
l_label.scale(1.5)
p_label = Tex("P")
l_label.next_to(
p0 + 2 * normalize(windmill.get_vector()),
RIGHT,
)
l_label.match_color(windmill)
p_label.next_to(p0, RIGHT)
p_label.match_color(pivot_dot)
arcs = VGroup(*[
Arc(angle=-45 * DEGREES, radius=1.5)
for x in range(2)
])
arcs[1].rotate(PI, about_point=ORIGIN)
for arc in arcs:
arc.add_tip(tip_length=0.2)
arcs.rotate(windmill.get_angle())
arcs.shift(p0)
self.add(windmill, dots)
self.play(
GrowFromCenter(windmill),
FadeIn(l_label, DL),
)
self.wait()
self.play(
TransformFromCopy(pivot_dot, p_label),
GrowFromCenter(pivot_dot),
dots.set_color, WHITE,
)
self.wait()
self.play(*map(ShowCreation, arcs))
self.wait()
# Rotate to next pivot
next_pivot, angle = self.next_pivot_and_angle(windmill)
self.play(
*[
Rotate(
mob, -0.99 * angle,
about_point=p0,
rate_func=linear,
)
for mob in [windmill, arcs, l_label]
],
VFadeOut(l_label),
)
self.add_sound(self.hit_sound)
self.play(
self.get_hit_flash(next_pivot)
)
self.wait()
self.pivot2 = next_pivot
self.pivot_dot = pivot_dot
self.windmill = windmill
self.p_label = p_label
self.arcs = arcs
def switch_pivots(self):
windmill = self.windmill
pivot2 = self.pivot2
p_label = self.p_label
arcs = self.arcs
q_label = Tex("Q")
q_label.set_color(YELLOW)
q_label.next_to(pivot2, DR, buff=SMALL_BUFF)
self.rotate_to_next_pivot(windmill)
self.play(
FadeIn(q_label, LEFT),
FadeOut(p_label),
FadeOut(arcs),
)
self.wait()
flashes, run_time = self.rotate_to_next_pivot(windmill)
self.remove(q_label)
self.add_sound(self.hit_sound)
self.play(*flashes)
self.wait()
self.let_windmill_run(windmill, 10)
def continue_and_count(self):
windmill = self.windmill
pivot_dot = self.pivot_dot
p_label = Tex("P")
p_label.match_color(pivot_dot)
p_label.next_to(pivot_dot, DR, buff=0)
l_label = Tex("\\ell")
l_label.scale(1.5)
l_label.match_color(windmill)
l_label.next_to(
windmill.get_center() + -3 * normalize(windmill.get_vector()),
DR,
buff=SMALL_BUFF,
)
self.play(FadeIn(p_label, UL))
self.play(FadeIn(l_label, LEFT))
self.wait()
self.add(
windmill.copy().fade(0.75),
pivot_dot.copy().fade(0.75),
)
pivot_counters = self.get_pivot_counters(windmill)
self.add(pivot_counters)
windmill.rot_speed *= 2
self.let_windmill_run(windmill, self.final_run_time)
class ContrastToOtherOlympiadProblems(AskWhatsOnTest):
def construct(self):
self.zoom_to_other_questions()
def zoom_to_other_questions(self):
test = self.get_test()
rects = self.get_all_rects()
big_rects = VGroup()
for rect in rects:
big_rect = FullScreenFadeRectangle()
rect.reverse_points()
big_rect.append_vectorized_mobject(rect)
big_rects.add(big_rect)
frame = self.camera_frame
frame.generate_target()
frame.target.scale(0.35)
frame.target.move_to(rects[1])
big_rect = big_rects[1].copy()
self.add(test)
self.play(
FadeIn(big_rect),
MoveToTarget(frame, run_time=3),
)
self.wait()
for i in [2, 0, 3, 5]:
self.play(
frame.move_to, rects[i],
Transform(big_rect, big_rects[i])
)
self.wait()
def get_all_rects(self, test):
rects = self.get_problem_rects(test[0])
new_rects = VGroup(rects[1], rects[0], rects[2]).copy()
new_rects[0].stretch(0.85, 1)
new_rects[1].stretch(0.8, 1)
new_rects[2].stretch(0.8, 1)
new_rects.arrange(DOWN, buff=0.08)
new_rects.move_to(test[1])
new_rects.align_to(rects, UP)
rects.add(*new_rects)
return rects
class WindmillExample30Points(WindmillScene):
CONFIG = {
"n_points": 30,
"random_seed": 0,
"run_time": 60,
"counter_config": {
"counter_height": 0.15,
"buff": 0.1,
},
}
def construct(self):
points = self.get_random_point_set(self.n_points)
points[:, 0] *= 1.5
sorted_points = sorted(list(points), key=lambda p: p[1])
sorted_points[4] += RIGHT
dots = self.get_dots(points)
windmill = self.get_windmill(points, sorted_points[5], angle=PI / 4)
pivot_dot = self.get_pivot_dot(windmill)
# self.add_dot_color_updater(dots, windmill)
self.add(windmill)
self.add(dots)
self.add(pivot_dot)
self.add(self.get_pivot_counters(
windmill, **self.counter_config
))
self.let_windmill_run(windmill, self.run_time)
class WindmillExample15Points(WindmillExample30Points):
CONFIG = {
"n_points": 15,
"run_time": 60,
"random_seed": 2,
"counter_config": {
"counter_height": 0.25,
"buff": 0.1,
},
}
class TheQuestion(Scene):
def construct(self):
words = TexText(
"Will each point be hit infinitely many times?"
)
words.set_width(FRAME_WIDTH - 1)
words.to_edge(UP)
self.add(words)
class SpiritOfIMO(PiCreatureScene):
def construct(self):
randy = self.pi_creature
problems = VGroup(*[
TexText("P{})".format(i))
for i in range(1, 7)
])
problems.arrange_in_grid(n_cols=2, buff=LARGE_BUFF)
problems.scale(1.5)
problems[3:].shift(1.5 * RIGHT)
problems.to_corner(UR, buff=LARGE_BUFF)
problems.shift(2 * LEFT)
light_bulbs = VGroup()
lights = VGroup()
for problem in problems:
light_bulb = Lightbulb()
light_bulb.base = light_bulb[:3]
light_bulb.light = light_bulb[3:]
light_bulb.set_height(1)
light_bulb.next_to(problem, RIGHT)
light_bulbs.add(light_bulb)
light = self.get_light(light_bulb.get_center())
lights.add(light)
self.play(
LaggedStartMap(FadeInFromDown, problems)
)
self.play(
LaggedStartMap(
FadeIn, light_bulbs,
run_time=1,
),
LaggedStartMap(
LaggedStartMap, lights,
lambda l: (VFadeInThenOut, l),
run_time=3
),
randy.change, "thinking"
)
self.wait()
self.pi_creature_thinks(
"Oh, I've\\\\seen this...",
target_mode="surprised",
)
self.wait(3)
def get_light(self, point):
radii = np.arange(0, 5, 0.1)
result = VGroup(*[
Annulus(
inner_radius=r1,
outer_radius=r2,
arc_center=point,
fill_opacity=(1 / (r1 + 1)**2),
fill_color=YELLOW,
)
for r1, r2 in zip(radii[1:], radii[2:])
])
return result
# TODO
class HowToPrepareForThis(Scene):
def construct(self):
pass
class HarderThanExpected(TeacherStudentsScene):
def construct(self):
title = TexText("Unusual aspect \\#2")
title.scale(1.5)
title.to_edge(UP)
line = Line(LEFT, RIGHT)
line.match_width(title)
line.next_to(title, DOWN)
words = TexText("Harder than expected")
words.set_color(RED)
words.scale(1.5)
words.next_to(line, DOWN, LARGE_BUFF)
self.play(
FadeInFromDown(title),
ShowCreation(line),
self.teacher.change, "raise_right_hand",
self.get_student_changes("pondering", "confused", "sassy")
)
self.wait()
self.play(
FadeIn(words, UP),
self.get_student_changes(*3 * ["horrified"]),
)
self.wait(3)
class TraditionalDifficulty(ContrastToOtherOlympiadProblems):
def construct(self):
test = self.get_test()
rects = self.get_all_rects(test)
for rect in rects:
rect.reverse_points()
big_rects = VGroup(*[
FullScreenFadeRectangle()
for x in range(3)
])
for br, r1, r2 in zip(big_rects, rects, rects[3:]):
br.append_vectorized_mobject(r1)
br.append_vectorized_mobject(r2)
big_rect = big_rects[0].copy()
p_labels = VGroup()
for i, rect in enumerate(rects):
p_label = TexText("P{}".format(i + 1))
p_label.next_to(rect, LEFT)
p_labels.add(p_label)
arrow = Vector(3 * DOWN)
arrow.next_to(test[0], RIGHT)
arrow.match_y(rects)
harder_words = TexText("Get harder")
harder_words.scale(2)
harder_words.next_to(arrow, RIGHT)
harder_words.set_color(RED)
p_words = VGroup(
TexText("Doable", color=GREEN),
TexText("Challenging", color=YELLOW),
TexText("Brutal", color=RED),
)
p_words.add(*p_words.copy())
for rect, word, label in zip(rects, p_words, p_labels):
word.next_to(rect, UP)
label.match_color(word)
self.add(test[0])
self.play(
FadeIn(harder_words),
GrowArrow(arrow),
LaggedStart(*[FadeIn(p, UP) for p in p_labels[:3]]),
LaggedStartMap(ShowCreation, rects[:3]),
)
self.wait()
self.play(
FadeIn(test[1]),
FadeIn(p_labels[3:]),
FadeIn(rects[3:]),
FadeOut(harder_words),
FadeOut(arrow),
)
self.wait()
self.add(big_rect, p_labels[0], p_labels[3])
self.play(
FadeIn(big_rect),
FadeOut(rects),
FadeOut(p_labels[1:3]),
FadeOut(p_labels[4:]),
FadeInFromDown(p_words[0::3]),
)
self.wait()
self.play(
Transform(big_rect, big_rects[1]),
FadeOut(p_labels[0::3]),
FadeIn(p_labels[1::3]),
FadeOut(p_words[0::3], DOWN),
FadeIn(p_words[1::3], UP),
)
self.wait()
self.play(
Transform(big_rect, big_rects[2]),
FadeOut(p_labels[1::3]),
FadeIn(p_labels[2::3]),
FadeOut(p_words[1::3], DOWN),
FadeIn(p_words[2::3], UP),
)
self.wait()
class PerfectScoreData(Describe2011IMO):
CONFIG = {
"n_students": 563,
"n_perfect_scores_per_problem": [
345, 22, 51, 267, 170, 6,
],
"full_bar_width": 7,
}
def construct(self):
self.add_title()
self.show_total_number_of_students()
self.add_subtitle()
self.show_data()
self.analyze_data()
def add_title(self):
self.force_skipping()
super().add_title()
self.revert_to_original_skipping_status()
self.title.center().to_edge(UP)
def show_total_number_of_students(self):
title = self.title
bar = self.get_bar(self.n_students, ORIGIN)
bar.next_to(title, DOWN, buff=0.3)
counter = self.get_bar_counter(bar)
counter_label = TexText("Students")
counter_label.add_updater(
lambda m: m.next_to(counter, RIGHT)
)
self.add(counter, counter_label)
self.play(
self.get_bar_growth_anim(bar),
run_time=2,
)
self.wait()
def add_subtitle(self):
title = self.title
subtitle = TexText(
"Number of perfect scores on each problem:"
)
subtitle.scale(1.25)
subtitle.set_color(GREEN)
subtitle.next_to(title, DOWN, buff=LARGE_BUFF)
problems = VGroup(*[
TexText("P{})".format(i))
for i in range(1, 7)
])
problems.arrange_in_grid(n_cols=2, buff=LARGE_BUFF)
problems[3:].shift(5 * RIGHT)
problems.next_to(subtitle, DOWN, LARGE_BUFF)
problems.to_edge(LEFT)
self.play(
FadeInFromDown(subtitle),
LaggedStartMap(FadeInFromDown, problems),
)
self.problems = problems
def show_data(self):
problems = self.problems
bars = VGroup(*[
self.get_bar(n, p.get_right() + SMALL_BUFF * RIGHT)
for n, p in zip(
self.n_perfect_scores_per_problem,
problems,
)
])
counters = VGroup(*map(self.get_bar_counter, bars))
self.play(
VFadeIn(counters),
*[
self.get_bar_growth_anim(bar)
for bar in bars
],
)
counters.set_fill(WHITE, 1)
self.wait()
self.problem_bars = bars
self.problem_counters = counters
def analyze_data(self):
problems = VGroup(*[
VGroup(p, pb, pc)
for p, pb, pc in zip(
self.problems,
self.problem_bars,
self.problem_counters,
)
])
rects = VGroup(*[
SurroundingRectangle(p, color=p[1].get_color())
for p in problems
])
rect = rects[1].copy()
self.play(ShowCreation(rect))
self.wait()
self.play(TransformFromCopy(rect, rects[4]))
self.wait()
self.play(TransformFromCopy(rect, rects[2]))
self.wait()
self.play(
ReplacementTransform(rect, rects[5]),
ReplacementTransform(rects[4], rects[5]),
ReplacementTransform(rects[2], rects[5]),
)
self.wait()
#
def get_bar(self, number, left_side):
bar = Rectangle()
bar.set_stroke(width=0)
bar.set_fill(WHITE, 1)
bar.set_height(0.25)
bar.set_width(
self.full_bar_width * number / self.n_students,
stretch=True
)
bar.move_to(left_side, LEFT)
def update_bar_color(bar):
frac = bar.get_width() / self.full_bar_width
if 0 < frac <= 0.25:
alpha = 4 * frac
bar.set_color(interpolate_color(RED, YELLOW, alpha))
elif 0.25 < frac <= 0.5:
alpha = 4 * (frac - 0.25)
bar.set_color(interpolate_color(YELLOW, GREEN, alpha))
else:
alpha = 2 * (frac - 0.5)
bar.set_color(interpolate_color(GREEN, BLUE, alpha))
bar.add_updater(update_bar_color)
return bar
def get_bar_growth_anim(self, bar):
bar.save_state()
bar.stretch(0, 0, about_edge=LEFT)
return Restore(
bar,
suspend_mobject_updating=False,
run_time=2,
)
def get_bar_counter(self, bar):
counter = Integer()
counter.add_updater(
lambda m: m.set_value(
self.n_students * bar.get_width() / self.full_bar_width
)
)
counter.add_updater(lambda m: m.next_to(bar, RIGHT, SMALL_BUFF))
return counter
class SixOnSix(Describe2011IMO):
CONFIG = {
"student_data": [
[1, "Lisa Sauermann", "de", [7, 7, 7, 7, 7, 7]],
[2, "Jeck Lim", "sg", [7, 5, 7, 7, 7, 7]],
[3, "Lin Chen", "cn", [7, 3, 7, 7, 7, 7]],
[14, "Mina Dalirrooyfard", "ir", [7, 0, 2, 7, 7, 7]],
[202, "Georgios Kalantzis", "gr", [7, 0, 1, 1, 2, 7]],
[202, "Chi Hong Chow", "hk", [7, 0, 3, 1, 0, 7]],
],
}
def construct(self):
grid = self.get_score_grid()
grid.to_edge(DOWN, buff=LARGE_BUFF)
for row in grid.rows:
row[0].set_opacity(0)
grid.h_lines.stretch(0.93, 0, about_edge=RIGHT)
sf = 1.25
title = TexText("Only 6 solved P6")
title.scale(sf)
title.to_edge(UP, buff=MED_SMALL_BUFF)
subtitle = TexText("P2 evaded 5 of them")
subtitle.set_color(YELLOW)
subtitle.scale(sf)
subtitle.next_to(title, DOWN)
six_rect, two_rect = [
SurroundingRectangle(VGroup(
grid.rows[0][index],
grid.rows[-1][index],
))
for index in [7, 3]
]
self.play(
Write(title),
LaggedStart(*[FadeIn(row, UP) for row in grid.rows]),
LaggedStart(*[ShowCreation(line) for line in grid.h_lines]),
)
self.play(ShowCreation(six_rect))
self.wait()
self.play(
ReplacementTransform(six_rect, two_rect),
FadeIn(subtitle, UP)
)
self.wait()
class AlwaysStartSimple(TeacherStudentsScene):
def construct(self):
self.teacher_says("Always start\\\\simple")
self.change_all_student_modes("pondering")
self.wait(3)
class TryOutSimplestExamples(WindmillScene):
CONFIG = {
"windmill_rotation_speed": TAU / 8,
}
def construct(self):
self.two_points()
self.add_third_point()
self.add_fourth_point()
self.move_starting_line()
def two_points(self):
points = [1.5 * LEFT, 1.5 * RIGHT]
dots = self.dots = self.get_dots(points)
windmill = self.windmill = self.get_windmill(points, angle=TAU / 8)
pivot_dot = self.pivot_dot = self.get_pivot_dot(windmill)
self.play(
ShowCreation(windmill),
LaggedStartMap(
FadeInFromLarge, dots,
scale_factor=10,
run_time=1,
lag_ratio=0.4,
),
GrowFromCenter(pivot_dot),
)
self.let_windmill_run(windmill, 8)
def add_third_point(self):
windmill = self.windmill
new_point = 2 * DOWN
new_dot = self.get_dots([new_point])
windmill.point_set.append(new_point)
self.add(new_dot, self.pivot_dot)
self.play(FadeInFromLarge(new_dot, scale_factor=10))
self.let_windmill_run(windmill, 8)
def add_fourth_point(self):
windmill = self.windmill
dot = self.get_dots([ORIGIN])
dot.move_to(DOWN + 2 * RIGHT)
words = TexText("Never hit!")
words.set_color(RED)
words.scale(0.75)
words.move_to(0.7 * DOWN, DOWN)
self.add(dot, self.pivot_dot)
self.play(
FadeInFromLarge(dot, scale_factor=10)
)
windmill.point_set.append(dot.get_center())
windmill.rot_speed = TAU / 4
self.let_windmill_run(windmill, 4)
# Shift point
self.play(
dot.next_to, words, DOWN,
FadeIn(words, RIGHT),
)
windmill.point_set[3] = dot.get_center()
self.let_windmill_run(windmill, 4)
self.wait()
self.dots.add(dot)
self.never_hit_words = words
def move_starting_line(self):
windmill = self.windmill
dots = self.dots
windmill.suspend_updating()
self.play(
windmill.move_to, dots[-1],
FadeOut(self.never_hit_words),
)
windmill.pivot = dots[-1].get_center()
windmill.resume_updating()
counters = self.get_pivot_counters(windmill)
self.play(
LaggedStart(*[
FadeIn(counter, DOWN)
for counter in counters
])
)
self.wait()
windmill.rot_speed = TAU / 8
self.let_windmill_run(windmill, 16)
highlight = windmill.copy()
highlight.set_stroke(YELLOW, 4)
self.play(
ShowCreationThenDestruction(highlight),
)
self.let_windmill_run(windmill, 8)
class FearedCase(WindmillScene):
CONFIG = {
"n_points": 25,
"windmill_rotation_speed": TAU / 16,
}
def construct(self):
points = self.get_random_point_set(self.n_points)
sorted_points = sorted(list(points), key=lambda p: p[1])
dots = self.get_dots(points)
windmill = self.get_windmill(
points,
sorted_points[self.n_points // 2],
angle=0,
)
pivot_dot = self.get_pivot_dot(windmill)
# self.add_dot_color_updater(dots, windmill)
counters = self.get_pivot_counters(
windmill,
counter_height=0.15,
buff=0.1
)
self.add(windmill)
self.add(dots)
self.add(pivot_dot)
self.add(counters)
self.let_windmill_run(windmill, 32)
windmill.pivot = sorted_points[0]
self.let_windmill_run(windmill, 32)
class WhereItStartsItEnds(WindmillScene):
CONFIG = {
"n_points": 11,
"windmill_rotation_speed": TAU / 8,
"random_seed": 1,
"points_shift_val": 2 * LEFT,
}
def construct(self):
self.show_stays_in_middle()
self.ask_about_proof()
def show_stays_in_middle(self):
points = self.get_random_point_set(self.n_points)
points += self.points_shift_val
sorted_points = sorted(list(points), key=lambda p: p[1])
dots = self.get_dots(points)
windmill = self.get_windmill(
points,
sorted_points[self.n_points // 2],
angle=0
)
pivot_dot = self.get_pivot_dot(windmill)
sf = 1.25
start_words = TexText("Starts in the ", "``middle''")
start_words.scale(sf)
start_words.next_to(windmill, UP, MED_SMALL_BUFF)
start_words.to_edge(RIGHT)
end_words = TexText("Stays in the ", "``middle''")
end_words.scale(sf)
end_words.next_to(windmill, DOWN, MED_SMALL_BUFF)
end_words.to_edge(RIGHT)
start_words.match_x(end_words)
self.add(dots)
self.play(
ShowCreation(windmill),
GrowFromCenter(pivot_dot),
FadeIn(start_words, LEFT),
)
self.wait()
self.start_leaving_shadows()
self.add(windmill, dots, pivot_dot)
half_time = PI / windmill.rot_speed
self.let_windmill_run(windmill, time=half_time)
self.play(FadeIn(end_words, UP))
self.wait()
self.let_windmill_run(windmill, time=half_time)
self.wait()
self.start_words = start_words
self.end_words = end_words
self.windmill = windmill
self.dots = dots
self.pivot_dot = pivot_dot
def ask_about_proof(self):
sf = 1.25
middle_rects = self.get_middle_rects()
middle_words = TexText("Can you formalize this?")
middle_words.scale(sf)
middle_words.next_to(middle_rects, DOWN, MED_LARGE_BUFF)
middle_words.to_edge(RIGHT)
middle_words.match_color(middle_rects)
proof_words = TexText("Can you prove this?")
proof_words.next_to(
self.end_words.get_left(),
DL,
buff=2,
)
proof_words.shift(RIGHT)
proof_words.scale(sf)
proof_arrow = Arrow(
proof_words.get_top(),
self.end_words.get_corner(DL),
buff=SMALL_BUFF,
)
proof_words2 = TexText("Then prove the result?")
proof_words2.scale(sf)
proof_words2.next_to(middle_words, DOWN, MED_LARGE_BUFF)
proof_words2.to_edge(RIGHT)
VGroup(proof_words, proof_words2, proof_arrow).set_color(YELLOW)
self.play(
Write(proof_words),
GrowArrow(proof_arrow),
run_time=1,
)
self.wait()
self.play(
FadeOut(proof_arrow),
FadeOut(proof_words),
LaggedStartMap(ShowCreation, middle_rects),
Write(middle_words),
)
self.wait()
self.play(FadeIn(proof_words2, UP))
self.wait()
self.let_windmill_run(self.windmill, time=10)
def get_middle_rects(self):
middle_rects = VGroup(*[
SurroundingRectangle(words[1])
for words in [
self.start_words,
self.end_words
]
])
middle_rects.set_color(TEAL)
return middle_rects
class AltWhereItStartsItEnds(WhereItStartsItEnds):
CONFIG = {
"n_points": 9,
"random_seed": 3,
}
class FormalizeMiddle(WhereItStartsItEnds):
CONFIG = {
"random_seed": 2,
"points_shift_val": 3 * LEFT,
}
def construct(self):
self.show_stays_in_middle()
self.problem_solving_tip()
self.define_colors()
self.mention_odd_case()
self.ask_about_numbers()
def problem_solving_tip(self):
mid_words = VGroup(
self.start_words,
self.end_words,
)
mid_words.save_state()
sf = 1.25
pst = TexText("Problem-solving tip:")
pst.scale(sf)
underline = Line(LEFT, RIGHT)
underline.match_width(pst)
underline.move_to(pst.get_bottom())
pst.add(underline)
pst.to_corner(UR)
# pst.set_color(YELLOW)
steps = VGroup(
TexText("Vague idea"),
TexText("Put numbers to it"),
TexText("Ask about those numbers"),
)
steps.scale(sf)
steps.arrange(DOWN, buff=LARGE_BUFF)
steps.next_to(pst, DOWN, buff=MED_LARGE_BUFF)
steps.shift_onto_screen()
pst.match_x(steps)
colors = color_gradient([BLUE, YELLOW], 3)
for step, color in zip(steps, colors):
step.set_color(color)
arrows = VGroup()
for s1, s2 in zip(steps, steps[1:]):
arrow = Arrow(s1.get_bottom(), s2.get_top(), buff=SMALL_BUFF)
arrows.add(arrow)
self.play(Write(pst), run_time=1)
self.wait()
self.play(
mid_words.scale, 0.75,
mid_words.set_opacity, 0.25,
mid_words.to_corner, DL,
FadeInFromDown(steps[0]),
)
self.wait()
for arrow, step in zip(arrows, steps[1:]):
self.play(
FadeIn(step, UP),
GrowArrow(arrow),
)
self.wait()
steps.generate_target()
steps.target.scale(0.75)
steps.target.arrange(DOWN, buff=0.2)
steps.target.to_corner(UR)
self.play(
FadeOut(pst),
MoveToTarget(steps),
Restore(mid_words),
FadeOut(arrows)
)
self.wait()
self.tip_words = steps
self.mid_words = mid_words
def define_colors(self):
windmill = self.windmill
mid_words = self.mid_words
tip_words = self.tip_words
shadows = self.windmill_shadows
self.leave_shadows = False
full_time = TAU / windmill.rot_speed
self.play(FadeOut(shadows))
self.add(windmill, tip_words, mid_words, self.dots, self.pivot_dot)
self.let_windmill_run(windmill, time=full_time / 4)
windmill.rotate(PI)
self.wait()
# Show regions
rects = self.get_left_right_colorings(windmill)
rects.suspend_updating()
rects.save_state()
rects.stretch(0, 0, about_point=windmill.get_center())
counters = VGroup(Integer(0), Integer(0))
counters.scale(2)
counters[0].set_stroke(BLUE, 3, background=True)
counters[1].set_stroke(GREY_BROWN, 3, background=True)
new_dots = self.dots.copy()
new_dots.set_color(WHITE)
for dot in new_dots:
dot.scale(1.25)
new_dots.sort(lambda p: p[0])
k = self.n_points // 2
dot_sets = VGroup(new_dots[:k], new_dots[-k:])
label_sets = VGroup()
for dot_set, direction in zip(dot_sets, [LEFT, RIGHT]):
label_set = VGroup()
for i, dot in zip(it.count(1), dot_set):
label = Integer(i)
label.set_height(0.15)
label.next_to(dot, direction, SMALL_BUFF)
label_set.add(label)
label_sets.add(label_set)
for counter, dot_set in zip(counters, dot_sets):
counter.move_to(dot_set)
counter.to_edge(UP)
self.add(rects, *self.get_mobjects())
self.play(
Restore(rects),
FadeIn(counters),
)
for counter, dot_set, label_set in zip(counters, dot_sets, label_sets):
self.play(
ShowIncreasingSubsets(dot_set),
ShowIncreasingSubsets(label_set),
ChangingDecimal(counter, lambda a: len(dot_set)),
rate_func=linear,
)
self.wait()
self.wait()
self.remove(self.dots)
self.dots = new_dots
# Show orientation
tips = self.get_orientation_arrows(windmill)
self.play(ShowCreation(tips))
windmill.add(tips)
self.wait()
self.add_dot_color_updater(new_dots, windmill)
rects.suspend_updating()
for rect in rects:
self.play(rect.set_opacity, 1)
self.play(rect.set_opacity, rects.const_opacity)
rects.resume_updating()
self.wait()
self.play(
counters.space_out_submobjects, 0.8,
counters.next_to, mid_words, DOWN, LARGE_BUFF,
FadeOut(label_sets),
)
eq = Tex("=")
eq.scale(2)
eq.move_to(counters)
self.play(FadeIn(eq))
self.wait()
self.counters = counters
self.colored_regions = rects
rects.resume_updating()
def mention_odd_case(self):
dots = self.dots
counters = self.counters
sf = 1.0
words = TexText(
"Assume odd \\# points"
)
words.scale(sf)
words.to_corner(UL)
example = VGroup(
TexText("Example:"),
Integer(0)
)
example.arrange(RIGHT)
example.scale(sf)
example.next_to(words, DOWN)
example.align_to(words, LEFT)
k = self.n_points // 2
dot_rects = VGroup()
for i, dot in zip(it.count(1), dots):
dot_rect = SurroundingRectangle(dot)
dot_rect.match_color(dot)
dot_rects.add(dot_rect)
self.play(FadeIn(words, DOWN))
self.wait()
self.play(
ShowCreationThenFadeAround(dots[k]),
self.pivot_dot.set_color, WHITE,
)
self.play(FadeIn(example, UP))
self.play(
ShowIncreasingSubsets(dot_rects),
ChangingDecimal(
example[1],
lambda a: len(dot_rects)
),
rate_func=linear
)
self.wait()
self.remove(dot_rects)
self.play(
ShowCreationThenFadeOut(dot_rects[:k]),
ShowCreationThenFadeOut(
SurroundingRectangle(counters[0], color=BLUE)
),
)
self.play(
ShowCreationThenFadeOut(dot_rects[-k:]),
ShowCreationThenFadeOut(
SurroundingRectangle(counters[1], color=GREY_BROWN)
),
)
self.wait()
self.play(
FadeOut(words),
FadeOut(example),
)
def ask_about_numbers(self):
self.windmill.rot_speed *= 0.5
self.add(self.dots, self.pivot_dot)
self.let_windmill_run(self.windmill, 20)
class SecondColoringExample(WindmillScene):
CONFIG = {
"run_time": 30,
"n_points": 9,
}
def construct(self):
points = self.get_random_point_set(self.n_points)
points += RIGHT
sorted_points = sorted(list(points), key=lambda p: p[0])
dots = self.get_dots(points)
windmill = self.get_windmill(
points,
pivot=sorted_points[self.n_points // 2],
angle=PI / 2
)
pivot_dot = self.get_pivot_dot(windmill)
pivot_dot.set_color(WHITE)
rects = self.get_left_right_colorings(windmill)
self.add_dot_color_updater(dots, windmill)
counts = VGroup(
TexText("\\# Blues = 4"),
TexText("\\# Browns = 4"),
)
counts.arrange(DOWN, aligned_edge=LEFT, buff=MED_LARGE_BUFF)
counts.to_corner(UL)
counts[0].set_color(interpolate_color(BLUE, WHITE, 0.25))
counts[1].set_color(interpolate_color(GREY_BROWN, WHITE, 0.5))
counts[0].set_stroke(BLACK, 5, background=True)
counts[1].set_stroke(BLACK, 5, background=True)
const_words = TexText("Stay constant$\\dots$why?")
const_words.next_to(counts, RIGHT, buff=1.5, aligned_edge=UP)
arrows = VGroup(*[
Arrow(
const_words.get_left(),
count.get_right(),
buff=SMALL_BUFF,
max_tip_length_to_length_ratio=0.15,
max_stroke_width_to_length_ratio=3,
)
for count in counts
])
self.add(rects, windmill, dots, pivot_dot)
self.add(counts, const_words, arrows)
self.let_windmill_run(windmill, time=self.run_time)
class TalkThroughPivotChange(WindmillScene):
CONFIG = {
"windmill_rotation_speed": 0.2,
}
def construct(self):
self.setup_windmill()
self.ask_about_pivot_change()
self.show_above_and_below()
self.change_pivot()
def setup_windmill(self):
points = self.points = np.array([
DR, UR, UL, DL, 0.5 * LEFT
])
points *= 3
self.dots = self.get_dots(points)
self.windmill = self.get_windmill(points, points[-1])
self.pivot_dot = self.get_pivot_dot(self.windmill)
self.pivot_dot.set_color(WHITE)
self.add_dot_color_updater(self.dots, self.windmill)
self.rects = self.get_left_right_colorings(self.windmill)
self.add(
self.rects,
self.windmill,
self.dots,
self.pivot_dot,
)
def ask_about_pivot_change(self):
windmill = self.windmill
new_pivot, angle = self.next_pivot_and_angle(windmill)
words = TexText("Think about\\\\pivot change")
words.next_to(new_pivot, UP, buff=2)
words.to_edge(LEFT)
arrow = Arrow(words.get_bottom(), new_pivot, buff=0.2)
self.play(
Rotate(
windmill, -0.9 * angle,
run_time=3,
rate_func=linear
),
Write(words, run_time=1),
ShowCreation(arrow),
)
self.wait()
self.question = words
self.question_arrow = arrow
def show_above_and_below(self):
windmill = self.windmill
vect = normalize(windmill.get_vector())
angle = windmill.get_angle()
tips = self.get_orientation_arrows(windmill)
top_half = Line(windmill.get_center(), windmill.get_end())
low_half = Line(windmill.get_center(), windmill.get_start())
top_half.set_stroke(YELLOW, 3)
low_half.set_stroke(PINK, 3)
halves = VGroup(top_half, low_half)
top_words = TexText("Above pivot")
low_words = TexText("Below pivot")
all_words = VGroup(top_words, low_words)
for words, half in zip(all_words, halves):
words.next_to(ORIGIN, DOWN)
words.rotate(angle, about_point=ORIGIN)
words.shift(half.point_from_proportion(0.15))
words.match_color(half)
self.play(ShowCreation(tips))
self.wait()
self.add(top_half, tips)
self.play(
ShowCreationThenFadeOut(top_half),
FadeIn(top_words, -vect),
)
self.add(low_half, tips)
self.play(
ShowCreationThenFadeOut(low_half),
FadeIn(low_words, vect),
)
self.wait()
windmill.add(tips)
self.above_below_words = all_words
def change_pivot(self):
windmill = self.windmill
dots = self.dots
arrow = self.question_arrow
blue_rect = SurroundingRectangle(dots[3])
blue_rect.set_color(BLUE)
new_pivot_word = TexText("New pivot")
new_pivot_word.next_to(blue_rect, LEFT)
old_pivot_word = TexText("Old pivot")
old_pivot = windmill.pivot
old_pivot_word.next_to(
old_pivot, LEFT,
buff=SMALL_BUFF + MED_SMALL_BUFF
)
self.play(
FadeOut(self.above_below_words),
ReplacementTransform(
self.question,
new_pivot_word,
),
ReplacementTransform(arrow, blue_rect),
)
self.wait()
anims, time = self.rotate_to_next_pivot(windmill)
self.play(
*anims,
Rotate(
windmill,
angle=-windmill.rot_speed,
rate_func=linear,
)
)
self.wait()
self.play(
TransformFromCopy(new_pivot_word, old_pivot_word),
blue_rect.move_to, old_pivot,
)
self.wait(2)
# Hit new point
brown_rect = SurroundingRectangle(dots[1])
brown_rect.set_color(GREY_BROWN)
self.play(TransformFromCopy(blue_rect, brown_rect))
self.play(
blue_rect.move_to, windmill.pivot,
blue_rect.set_color, GREY_BROWN,
old_pivot_word.move_to, new_pivot_word,
FadeOut(new_pivot_word, DL)
)
self.let_windmill_run(windmill, 1)
self.wait()
self.play(
FadeOut(old_pivot_word),
FadeOut(blue_rect),
FadeOut(brown_rect),
)
self.let_windmill_run(windmill, 20)
class InsightNumber1(Scene):
def construct(self):
words = TexText(
"Key insight 1: ",
"\\# Points on either side is constant"
)
words[0].set_color(YELLOW)
words.set_width(FRAME_WIDTH - 1)
self.play(FadeInFromDown(words))
self.wait()
class Rotate180Argument(WindmillScene):
CONFIG = {
"n_points": 21,
"random_seed": 3,
}
def construct(self):
self.setup_windmill()
self.add_total_rotation_label()
self.rotate_180()
self.show_parallel_lines()
self.rotate_180()
self.rotate_180()
def setup_windmill(self):
n = self.n_points
points = self.get_random_point_set(n)
points[:, 0] *= 1.5
points += RIGHT
points = sorted(points, key=lambda p: p[0])
mid_point = points[n // 2]
points[n // 2 - 1] += 0.2 * LEFT
self.points = points
self.dots = self.get_dots(points)
self.windmill = self.get_windmill(points, mid_point)
self.pivot_dot = self.get_pivot_dot(self.windmill)
self.pivot_dot.set_color(WHITE)
self.add_dot_color_updater(self.dots, self.windmill)
self.rects = self.get_left_right_colorings(self.windmill)
p_label = Tex("P_0")
p_label.next_to(mid_point, RIGHT, SMALL_BUFF)
self.p_label = p_label
self.add(
self.rects,
self.windmill,
self.dots,
self.pivot_dot,
self.p_label,
)
def add_total_rotation_label(self):
windmill = self.windmill
words = TexText("Total rotation:")
counter = Integer(0, unit="^\\circ")
title = VGroup(words, counter)
title.arrange(RIGHT)
title.to_corner(UL)
rot_arrow = Vector(UP)
rot_arrow.set_color(RED)
rot_arrow.next_to(title, DOWN)
circle = Circle()
circle.replace(rot_arrow, dim_to_match=1)
circle.set_stroke(WHITE, 1)
rot_arrow.add_updater(
lambda m: m.set_angle(windmill.get_angle())
)
rot_arrow.add_updater(
lambda m: m.move_to(circle)
)
def update_count(c):
new_val = 90 - windmill.get_angle() * 360 / TAU
while abs(new_val - c.get_value()) > 90:
new_val += 360
c.set_value(new_val)
counter.add_updater(update_count)
rect = SurroundingRectangle(
VGroup(title, circle),
buff=MED_LARGE_BUFF,
)
rect.set_fill(BLACK, 0.8)
rect.set_stroke(WHITE, 1)
title.shift(MED_SMALL_BUFF * LEFT)
self.rotation_label = VGroup(
rect, words, counter, circle, rot_arrow
)
self.add(self.rotation_label)
def rotate_180(self):
windmill = self.windmill
self.add(self.pivot_dot)
self.let_windmill_run(
windmill,
PI / windmill.rot_speed,
)
self.wait()
def show_parallel_lines(self):
points = self.get_points()
rotation_label = self.rotation_label
dots = self.dots
windmill = self.windmill
lines = VGroup()
for point in points:
line = Line(DOWN, UP)
line.set_height(2 * FRAME_HEIGHT)
line.set_stroke(RED, 1, opacity=0.5)
line.move_to(point)
lines.add(line)
lines.shuffle()
self.add(lines, dots, rotation_label)
self.play(
ShowCreation(lines, lag_ratio=0.5, run_time=3)
)
self.wait()
self.rects.suspend_updating()
for rect in self.rects:
self.play(
rect.set_opacity, 0,
rate_func=there_and_back,
run_time=2
)
self.rects.resume_updating()
self.wait()
pivot_tracker = VectorizedPoint(windmill.pivot)
pivot_tracker.save_state()
def update_pivot(w):
w.pivot = pivot_tracker.get_center()
windmill.add_updater(update_pivot)
for x in range(4):
point = random.choice(points)
self.play(
pivot_tracker.move_to, point
)
self.wait()
self.play(Restore(pivot_tracker))
self.play(FadeOut(lines))
windmill.remove_updater(update_pivot)
self.wait()
class Rotate180ArgumentFast(Rotate180Argument):
CONFIG = {
"windmill_rotation_speed": 0.5,
}
class EvenCase(Rotate180Argument):
CONFIG = {
"n_points": 10,
"dot_config": {"radius": 0.075},
}
def construct(self):
self.ask_about_even_number()
self.choose_halfway_point()
self.add_total_rotation_label()
self.rotate_180()
self.rotate_180()
self.show_parallel_lines()
self.rotate_180()
self.rotate_180()
def ask_about_even_number(self):
n = self.n_points
points = self.get_random_point_set(n)
points[:, 0] *= 2
points += DOWN
points = sorted(points, key=lambda p: p[0])
dots = self.get_dots(points)
windmill = self.get_windmill(points, points[3])
region_rects = self.rects = self.get_left_right_colorings(windmill)
pivot_dot = self.get_pivot_dot(windmill)
pivot_dot.set_color(WHITE)
dot_rects = VGroup(*map(SurroundingRectangle, dots))
question = TexText("What about an even number?")
# question.to_corner(UL)
question.to_edge(UP)
counter_label = TexText("\\# Points", ":")
counter = Integer(0)
counter_group = VGroup(counter_label, counter)
counter_group.arrange(RIGHT)
counter.align_to(counter_label[1], DOWN)
counter_group.next_to(question, DOWN, MED_LARGE_BUFF)
counter_group.set_color(YELLOW)
# counter_group.align_to(question, LEFT)
self.add(question, counter_label)
self.add(windmill, dots, pivot_dot)
self.add_dot_color_updater(dots, windmill)
self.add(region_rects, question, counter_group, windmill, dots, pivot_dot)
self.play(
ShowIncreasingSubsets(dot_rects),
ChangingDecimal(counter, lambda a: len(dot_rects)),
rate_func=linear
)
self.play(FadeOut(dot_rects))
self.wait()
# region_rects.suspend_updating()
# self.play(
# FadeIn(region_rects),
# FadeOut(dot_rects),
# )
# region_rects.resume_updating()
# self.wait()
# Count by color
blue_rects = dot_rects[:3]
blue_rects.set_color(BLUE)
brown_rects = dot_rects[4:]
brown_rects.set_color(GREY_BROWN)
pivot_rect = dot_rects[3]
pivot_rect.set_color(GREY_BROWN)
blues_label = TexText("\\# Blues", ":")
blues_counter = Integer(len(blue_rects))
blues_group = VGroup(blues_label, blues_counter)
blues_group.set_color(BLUE)
browns_label = TexText("\\# Browns", ":")
browns_counter = Integer(len(brown_rects))
browns_group = VGroup(browns_label, browns_counter)
browns_group.set_color(interpolate_color(GREY_BROWN, WHITE, 0.5))
groups = VGroup(blues_group, browns_group)
for group in groups:
group.arrange(RIGHT)
group[-1].align_to(group[0][-1], DOWN)
groups.arrange(DOWN, aligned_edge=LEFT)
groups.next_to(counter_group, DOWN, aligned_edge=LEFT)
self.play(
FadeIn(blues_group, UP),
ShowCreation(blue_rects),
)
self.play(
FadeIn(browns_group, UP),
ShowCreation(brown_rects),
)
self.wait()
# Pivot counts as brown
pivot_words = TexText("Pivot counts as brown")
arrow = Vector(LEFT)
arrow.next_to(pivot_dot, RIGHT, SMALL_BUFF)
pivot_words.next_to(arrow, RIGHT, SMALL_BUFF)
self.play(
FadeIn(pivot_words, LEFT),
ShowCreation(arrow),
)
self.play(
ShowCreation(pivot_rect),
ChangeDecimalToValue(browns_counter, len(brown_rects) + 1),
FadeOut(pivot_dot),
)
self.wait()
self.play(
FadeOut(dot_rects),
FadeOut(pivot_words),
FadeOut(arrow),
)
self.wait()
blues_counter.add_updater(
lambda c: c.set_value(len(list(filter(
lambda d: d.get_fill_color() == Color(BLUE),
dots
))))
)
browns_counter.add_updater(
lambda c: c.set_value(len(list(filter(
lambda d: d.get_fill_color() == Color(GREY_BROWN),
dots
))))
)
self.windmill = windmill
self.dots = dots
self.points = points
self.question = question
self.counter_group = VGroup(
counter_group,
blues_group,
browns_group,
)
def choose_halfway_point(self):
windmill = self.windmill
points = self.points
n = self.n_points
p_label = Tex("P_0")
p_label.next_to(points[n // 2], RIGHT, SMALL_BUFF)
pivot_tracker = VectorizedPoint(windmill.pivot)
def update_pivot(w):
w.pivot = pivot_tracker.get_center()
windmill.add_updater(update_pivot)
self.play(
pivot_tracker.move_to, points[n // 2],
run_time=2
)
self.play(FadeIn(p_label, LEFT))
self.wait()
windmill.remove_updater(update_pivot)
def add_total_rotation_label(self):
super().add_total_rotation_label()
self.rotation_label.scale(0.8, about_edge=UL)
self.play(
FadeOut(self.question),
FadeIn(self.rotation_label),
self.counter_group.to_edge, UP,
)
class TwoTakeaways(TeacherStudentsScene):
def construct(self):
title = TexText("Two takeaways")
title.scale(2)
title.to_edge(UP)
line = Line()
line.match_width(title)
line.next_to(title, DOWN, SMALL_BUFF)
items = VGroup(*[
TexText("1) Social"),
TexText("2) Mathematical"),
])
items.scale(1.5)
items.arrange(DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
items.next_to(line, DOWN, buff=MED_LARGE_BUFF)
self.play(
ShowCreation(line),
GrowFromPoint(title, self.hold_up_spot),
self.teacher.change, "raise_right_hand",
)
self.change_all_student_modes("pondering")
self.wait()
for item in items:
self.play(FadeIn(item, LEFT))
item.big = item.copy()
item.small = item.copy()
item.big.scale(1.5, about_edge=LEFT)
item.big.set_color(BLUE)
item.small.scale(0.75, about_edge=LEFT)
item.small.fade(0.5)
self.play(self.teacher.change, "happy")
self.wait()
for i, j in [(0, 1), (1, 0)]:
self.play(
items[i].become, items[i].big,
items[j].become, items[j].small,
)
self.wait()
class EasyToFoolYourself(PiCreatureScene):
CONFIG = {
"default_pi_creature_kwargs": {
"color": GREY_BROWN,
}
}
def construct(self):
morty = self.pi_creature
morty.to_corner(DL)
bubble = ThoughtBubble()
for i, part in enumerate(bubble):
part.shift(2 * i * SMALL_BUFF * DOWN)
bubble.pin_to(morty)
fool_word = TexText("Fool")
fool_word.scale(1.5)
fool_arrow = Vector(LEFT)
fool_arrow.next_to(morty, RIGHT, buff=0)
fool_word.next_to(fool_arrow, RIGHT)
self.add(morty)
self.play(
ShowCreation(bubble),
morty.change, "pondering",
)
self.play(
bubble[3].set_fill, GREEN_SCREEN, 0.5,
)
self.wait()
self.play(morty.change, "thinking")
self.play(
FadeIn(fool_word, LEFT),
ShowCreation(fool_arrow),
)
self.wait()
self.pi_creature_says(
"Isn't it\\\\obvious?",
target_mode="maybe",
added_anims=[FadeOut(bubble)]
)
self.wait(4)
#
words = TexText("No it's not!")
words.scale(1.5)
words.set_color(RED)
words.next_to(morty.bubble, RIGHT, LARGE_BUFF)
words.match_y(morty.bubble.content)
self.play(
FadeInFromLarge(words),
morty.change, "guilty",
)
self.wait()
# for i, part in enumerate(bubble):
# self.add(Integer(i).move_to(part))
class FailureToEmpathize(PiCreatureScene):
def construct(self):
randy, morty = self.pi_creatures
# What a mess...
big_bubble = ThoughtBubble(height=4, width=5)
big_bubble.scale(1.75)
big_bubble.flip(UR)
for part in big_bubble:
part.rotate(90 * DEGREES)
big_bubble[:3].rotate(-30 * DEGREES)
for i, part in enumerate(big_bubble[:3]):
part.rotate(30 * DEGREES)
part.shift((3 - i) * SMALL_BUFF * DOWN)
big_bubble[0].shift(MED_SMALL_BUFF * RIGHT)
big_bubble[:3].next_to(big_bubble[3], LEFT)
big_bubble[:3].shift(0.3 * DOWN)
big_bubble.set_fill(GREY_E)
big_bubble.to_corner(UR)
equation = Tex(
"\\sum_{k=1}^n (2k - 1) = n^2"
)
self.pi_creature_thinks(
randy, equation,
target_mode="confused",
look_at_arg=equation,
)
randy_group = VGroup(
randy, randy.bubble,
randy.bubble.content
)
self.wait()
self.play(
DrawBorderThenFill(big_bubble),
morty.change, "confused",
randy_group.scale, 0.5,
randy_group.move_to, big_bubble.get_bubble_center(),
randy_group.shift, 0.5 * DOWN + RIGHT,
)
self.wait()
self.play(morty.change, "maybe")
self.wait(2)
# Zoom out
morty_group = VGroup(morty, big_bubble)
ap = 5 * RIGHT + 2.5 * UP
self.add(morty_group, randy_group)
self.play(
morty_group.scale, 2, {"about_point": ap},
morty_group.fade, 1,
randy_group.scale, 2, {"about_point": ap},
run_time=2
)
self.wait()
def create_pi_creatures(self):
randy = Randolph()
morty = Mortimer()
randy.flip().to_corner(DR)
morty.flip().to_corner(DL)
return (randy, morty)
class DifficultyEstimateVsReality(Scene):
def construct(self):
axes = Axes(
x_min=-1,
x_max=10,
x_axis_config={
"include_tip": False,
},
y_min=-1,
y_max=5,
)
axes.set_height(FRAME_HEIGHT - 1)
axes.center()
axes.x_axis.tick_marks.set_opacity(0)
y_label = TexText("Average score")
y_label.scale(1.25)
y_label.rotate(90 * DEGREES)
y_label.next_to(axes.y_axis, LEFT, SMALL_BUFF)
y_label.shift(UP)
estimated = [1.8, 2.6, 3, 4, 5]
actual = [1.5, 0.5, 1, 1.2, 1.8]
colors = [GREEN, RED]
estimated_color, actual_color = colors
estimated_bars = VGroup()
actual_bars = VGroup()
bar_pairs = VGroup()
width = 0.25
for a, e in zip(actual, estimated):
bars = VGroup(
Rectangle(width=width, height=e),
Rectangle(width=width, height=a),
)
bars.set_stroke(width=1)
bars[0].set_fill(estimated_color, 0.75)
bars[1].set_fill(actual_color, 0.75)
bars.arrange(RIGHT, buff=0, aligned_edge=DOWN)
bar_pairs.add(bars)
estimated_bars.add(bars[0])
actual_bars.add(bars[1])
bar_pairs.arrange(RIGHT, buff=1.5, aligned_edge=DOWN)
bar_pairs.move_to(axes.c2p(5, 0), DOWN)
for bp in bar_pairs:
for bar in bp:
bar.save_state()
bar.stretch(0, 1, about_edge=DOWN)
x_labels = VGroup(*[
TexText("Q{}".format(i)).next_to(bp, DOWN)
for i, bp in zip(it.count(1), bar_pairs)
])
data_labels = VGroup(
TexText("Estimated average"),
TexText("Actual average"),
)
data_labels.arrange(DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
data_labels.to_edge(UP)
for color, label in zip(colors, data_labels):
square = Square()
square.set_height(0.5)
square.set_fill(color, 0.75)
square.set_stroke(WHITE, 1)
square.next_to(label, LEFT, SMALL_BUFF)
label.add(square)
self.play(Write(axes))
self.play(Write(y_label))
self.play(
LaggedStartMap(
FadeInFrom, x_labels,
lambda m: (m, UP),
run_time=2,
),
LaggedStartMap(
Restore,
estimated_bars,
run_time=3,
),
FadeIn(data_labels[0]),
)
self.wait()
self.play(
LaggedStartMap(
Restore,
actual_bars,
run_time=3,
),
FadeIn(data_labels[1]),
)
self.wait()
class KeepInMindWhenTeaching(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"I don't know\\\\what you know!",
target_mode="pleading"
)
self.wait(2)
self.play(
PiCreatureSays(
self.students[0], "We know",
target_mode="hooray",
),
self.students[1].change, "happy",
self.students[2].change, "happy",
)
self.wait(2)
class VastSpaceOfConsiderations(Scene):
def construct(self):
considerations = VGroup(*[
TexText(phrase)
for phrase in [
"Define ``outer'' points",
"Convex hulls",
"Linear equations",
"Sort points by when they're hit",
"Sort points by some kind of angle?",
"How does this permute the $n \\choose 2$ lines through pairs?",
"Some points are hit more than others, can we quantify this?",
]
])
considerations.arrange(DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
considerations.to_edge(LEFT)
self.play(LaggedStart(*[
FadeIn(mob, UP)
for mob in considerations
], run_time=3, lag_ratio=0.2))
class WhatStaysConstantWrapper(Scene):
CONFIG = {
"camera_config": {
"background_color": GREY_E
}
}
def construct(self):
rect = ScreenRectangle()
rect.set_height(6)
rect.set_stroke(WHITE, 2)
rect.set_fill(BLACK, 1)
title1 = TexText("What stays constant?")
title2 = TexText("Find an ", "``invariant''")
title2[1].set_color(YELLOW)
for title in [title1, title2]:
title.scale(2)
title.to_edge(UP)
rect.next_to(title1, DOWN)
self.add(rect)
self.play(FadeInFromDown(title1))
self.wait()
self.play(
FadeOut(title1, UP),
FadeInFromDown(title2),
)
self.wait()
class CountHoles(Scene):
def construct(self):
labels = VGroup(
TexText("Genus ", "0"),
TexText("Genus ", "1"),
TexText("Genus ", "2"),
)
labels.scale(2)
labels.arrange(RIGHT, buff=1.5)
labels.move_to(2 * DOWN)
equation = Tex("y^2 = x^3 + ax + b")
equation.scale(1.5)
equation.shift(UP)
equation.to_edge(LEFT)
# arrow = Tex("\\approx").scale(2)
arrow = Vector(2 * RIGHT)
arrow.next_to(equation, RIGHT)
equation_text = TexText("Some other problem")
equation_text.next_to(equation, DOWN, MED_LARGE_BUFF)
equation_text.match_width(equation)
equation_text.set_color(YELLOW)
self.play(LaggedStartMap(
FadeInFromDown, labels,
lag_ratio=0.5,
))
self.wait()
self.play(
labels[1].shift, 4 * RIGHT,
FadeOut(labels[0::2]),
)
self.play(
FadeIn(equation, RIGHT),
GrowArrow(arrow),
)
self.play(FadeIn(equation_text, UP))
self.wait()
class LorenzTransform(Scene):
def construct(self):
grid = NumberPlane(
# faded_line_ratio=0,
# y_axis_config={
# "y_min": -10,
# "y_max": 10,
# }
)
grid.scale(2)
back_grid = grid.copy()
back_grid.set_stroke(GREY, 0.5)
# back_grid.set_opacity(0.5)
c_lines = VGroup(Line(DL, UR), Line(DR, UL))
c_lines.scale(FRAME_HEIGHT)
c_lines.set_stroke(YELLOW, 3)
equation = Tex(
"d\\tau^2 = dt^2 - dx^2"
)
equation.scale(1.7)
equation.to_corner(UL, buff=MED_SMALL_BUFF)
equation.shift(2.75 * DOWN)
equation.set_stroke(BLACK, 5, background=True)
self.add(back_grid, grid, c_lines)
self.add(equation)
beta = 0.4
self.play(
grid.apply_matrix, np.array([
[1, beta],
[beta, 1],
]) / (1 - beta**2),
run_time=2
)
self.wait()
class OnceACleverDiscovery(Scene):
def construct(self):
energy = TexText("energy")
rect = SurroundingRectangle(energy)
words = TexText("Once a clever discovery")
vect = Vector(DR)
vect.next_to(rect.get_top(), UL, SMALL_BUFF)
words.next_to(vect.get_start(), UP)
words.set_color(YELLOW)
vect.set_color(YELLOW)
self.play(
ShowCreation(vect),
ShowCreation(rect),
)
self.play(FadeInFromDown(words))
self.wait()
class TerryTaoQuote(Scene):
def construct(self):
image = ImageMobject("TerryTao")
image.set_height(4)
name = TexText("Terence Tao")
name.scale(1.5)
name.next_to(image, DOWN, buff=0.2)
tao = Group(image, name)
tao.to_corner(DL, buff=MED_SMALL_BUFF)
tiny_tao = ImageMobject("TerryTaoIMO")
tiny_tao.match_height(image)
tiny_tao.next_to(image, RIGHT, LARGE_BUFF)
quote = self.get_quote()
self.play(
FadeInFromDown(image),
Write(name),
)
self.wait()
self.play(
FadeIn(tiny_tao, LEFT)
)
self.wait()
self.play(FadeOut(tiny_tao))
#
self.play(
FadeIn(
quote,
lag_ratio=0.05,
run_time=5,
rate_func=bezier([0, 0, 1, 1])
)
)
self.wait()
story_line = Line()
story_line.match_width(quote.story_part)
story_line.next_to(quote.story_part, DOWN, buff=0)
story_line.set_color(TEAL),
self.play(
quote.story_part.set_color, TEAL,
ShowCreation(story_line),
lag_ratio=0.2,
)
self.wait()
def get_quote(self):
story_words = "fables, stories, and anecdotes"
quote = TexText(
"""
\\Large
``Mathematical problems, or puzzles, are important to real mathematics
(like solving real-life problems), just as fables, stories, and anecdotes
are important to the young in understanding real life.''\\\\
""",
alignment="",
arg_separator=" ",
isolate=[story_words]
)
quote.story_part = quote.get_part_by_tex(story_words)
quote.set_width(FRAME_WIDTH - 2.5)
quote.to_edge(UP)
return quote
class WindmillFairyTale(Scene):
def construct(self):
paths = SVGMobject(file_name="windmill_fairytale")
paths.set_height(FRAME_HEIGHT - 1)
paths.set_stroke(width=0)
paths.set_fill([GREY_B, WHITE])
for path in paths:
path.reverse_points()
self.play(Write(paths[0], run_time=3))
self.wait()
self.play(
LaggedStart(
FadeIn(paths[1], RIGHT),
FadeIn(paths[2], RIGHT),
lag_ratio=0.2,
run_time=3,
)
)
class SolveAProblemOneDay(SpiritOfIMO, PiCreatureScene):
def construct(self):
randy = self.pi_creature
light_bulb = Lightbulb()
light_bulb.base = light_bulb[:3]
light_bulb.light = light_bulb[3:]
light_bulb.set_height(1)
light_bulb.next_to(randy, UP, MED_LARGE_BUFF)
light = self.get_light(light_bulb.get_center())
bubble = ThoughtBubble()
bubble.pin_to(randy)
you = TexText("You")
you.scale(1.5)
arrow = Vector(LEFT)
arrow.next_to(randy, RIGHT)
you.next_to(arrow)
self.play(
ShowCreation(bubble),
randy.change, "pondering",
)
self.play(
FadeIn(you, LEFT),
GrowArrow(arrow)
)
self.wait(2)
self.play(
FadeInFromDown(light_bulb),
randy.change, "hooray",
)
self.play(
LaggedStartMap(
VFadeInThenOut, light,
run_time=2
),
randy.change, "thinking", light,
)
self.wait(2)
class QuixoteReference(Scene):
def construct(self):
rect = FullScreenFadeRectangle()
rect.set_fill([GREY_D, GREY])
windmill = SVGMobject("windmill")
windmill.set_fill([GREY_BROWN, WHITE], 1)
windmill.set_stroke(width=0)
windmill.set_height(6)
windmill.to_edge(RIGHT)
# windmill.to_edge(DOWN, buff=0)
quixote = SVGMobject("quixote")
quixote.flip()
quixote.set_height(4)
quixote.to_edge(LEFT)
quixote.set_stroke(BLACK, width=0)
quixote.set_fill(BLACK, 1)
quixote.align_to(windmill, DOWN)
self.add(rect)
# self.add(windmill)
self.play(LaggedStart(
DrawBorderThenFill(windmill),
DrawBorderThenFill(
quixote,
stroke_width=1,
),
lag_ratio=0.4,
run_time=3
))
self.wait()
class WindmillEndScreen(PatreonEndScreen):
CONFIG = {
"specific_patrons": [
"Juan Benet",
"Kurt Dicus",
"Vassili Philippov",
"Davie Willimoto",
"Burt Humburg",
"Hardik Meisheri",
"L. Z.",
"Matt Russell",
"Scott Gray",
"soekul",
"Tihan Seale",
"D. Sivakumar",
"Richard Barthel",
"Ali Yahya",
"Arthur Zey",
"dave nicponski",
"Joseph Kelly",
"Kaustuv DeBiswas",
"kkm",
"Lambda AI Hardware",
"Lukas Biewald",
"Mark Heising",
"Nicholas Cahill",
"Peter Mcinerney",
"Quantopian",
"Roy Larson",
"Scott Walter, Ph.D.",
"Tauba Auerbach",
"Yana Chernobilsky",
"Yu Jun",
"Jordan Scales",
"Lukas -krtek.net- Novy",
"Britt Selvitelle",
"David Gow",
"J",
"Jonathan Wilson",
"Joseph John Cox",
"Magnus Dahlström",
"Randy C. Will",
"Ryan Atallah",
"Luc Ritchie",
"1stViewMaths",
"Adrian Robinson",
"Aidan Shenkman",
"Alex Mijalis",
"Alexis Olson",
"Andreas Benjamin Brössel",
"Andrew Busey",
"Ankalagon",
"Antoine Bruguier",
"Antonio Juarez",
"Arjun Chakroborty",
"Art Ianuzzi",
"Austin Goodman",
"Awoo",
"Ayan Doss",
"AZsorcerer",
"Barry Fam",
"Bernd Sing",
"Boris Veselinovich",
"Bradley Pirtle",
"Brian Staroselsky",
"Charles Southerland",
"Charlie N",
"Chris Connett",
"Christian Kaiser",
"Clark Gaebel",
"Cooper Jones",
"Danger Dai",
"Daniel Pang",
"Dave B",
"Dave Kester",
"David B. Hill",
"David Clark",
"DeathByShrimp",
"Delton Ding",
"Dheeraj Vepakomma",
"eaglle",
"Empirasign",
"emptymachine",
"Eric Younge",
"Ero Carrera",
"Eryq Ouithaqueue",
"Federico Lebron",
"Fernando Via Canel",
"Gero Bone-Winkel",
"Giovanni Filippi",
"Hal Hildebrand",
"Hitoshi Yamauchi",
"Isaac Jeffrey Lee",
"Ivan Sorokin",
"j eduardo perez",
"Jacob Harmon",
"Jacob Hartmann",
"Jacob Magnuson",
"Jameel Syed",
"Jason Hise",
"Jeff Linse",
"Jeff Straathof",
"John C. Vesey",
"John Griffith",
"John Haley",
"John V Wertheim",
"Jonathan Eppele",
"Jordan A Purcell",
"Josh Kinnear",
"Joshua Claeys",
"Kai-Siang Ang",
"Kanan Gill",
"Kartik Cating-Subramanian",
"L0j1k",
"Lee Redden",
"Linh Tran",
"Ludwig Schubert",
"Magister Mugit",
"Mark B Bahu",
"Martin Price",
"Mathias Jansson",
"Matt Langford",
"Matt Roveto",
"Matthew Bouchard",
"Matthew Cocke",
"Michael Faust",
"Michael Hardel",
"Mirik Gogri",
"Mustafa Mahdi",
"Márton Vaitkus",
"Nero Li",
"Nikita Lesnikov",
"Omar Zrien",
"Owen Campbell-Moore",
"Patrick Lucas",
"Peter Ehrnstrom",
"RedAgent14",
"rehmi post",
"Ripta Pasay",
"Rish Kundalia",
"Roman Sergeychik",
"Roobie",
"Ryan Williams",
"Sebastian Garcia",
"Solara570",
"Steven Siddals",
"Stevie Metke",
"Tal Einav",
"Ted Suzman",
"Thomas Tarler",
"Tianyu Ge",
"Tom Fleming",
"Tyler VanValkenburg",
"Valeriy Skobelev",
"Vinicius Reis",
"Xuanji Li",
"Yavor Ivanov",
"YinYangBalance.Asia",
"Zach Cardwell",
],
}
class Thumbnail(WindmillScene):
CONFIG = {
"dot_config": {
"radius": 0.15,
"stroke_width": 1,
},
"random_seed": 7,
"animate": False,
}
def construct(self):
points = self.get_random_point_set(11)
points[:, 0] *= 1.7
points += 0.5 * LEFT
points[1] = ORIGIN
points[10] += LEFT
points[6] += 3 * RIGHT
windmill = self.get_windmill(
points, points[1],
angle=45 * DEGREES,
)
dots = self.get_dots(points)
# rects = self.get_left_right_colorings(windmill)
pivot_dot = self.get_pivot_dot(windmill)
pivot_dot.scale(2)
pivot_dot.set_color(WHITE)
new_pivot = points[5]
new_pivot2 = points[3]
flash = Flash(pivot_dot, flash_radius=0.5)
wa = windmill.get_angle()
arcs = VGroup(*[
Arc(
start_angle=wa + a,
angle=90 * DEGREES,
radius=1.5,
stroke_width=10,
).add_tip(tip_length=0.7)
for a in [0, PI]
])
arcs.move_to(windmill.pivot)
arcs.set_color([GREY_B, WHITE])
polygon1 = Polygon(
(FRAME_HEIGHT * UP + FRAME_WIDTH * LEFT) / 2,
(FRAME_HEIGHT * UP + FRAME_HEIGHT * RIGHT) / 2,
(FRAME_HEIGHT * DOWN + FRAME_HEIGHT * LEFT) / 2,
(FRAME_HEIGHT * DOWN + FRAME_WIDTH * LEFT) / 2,
)
polygon1.set_color([BLUE, GREY_E])
polygon1.set_fill(opacity=0.5)
polygon2 = Polygon(
(FRAME_HEIGHT * UP + FRAME_WIDTH * RIGHT) / 2,
(FRAME_HEIGHT * UP + FRAME_HEIGHT * RIGHT) / 2,
(FRAME_HEIGHT * DOWN + FRAME_HEIGHT * LEFT) / 2,
(FRAME_HEIGHT * DOWN + FRAME_WIDTH * RIGHT) / 2,
)
polygon2.set_sheen_direction(DR)
polygon2.set_color([GREY_BROWN, BLACK])
polygon2.set_fill(opacity=1)
self.add(polygon1, polygon2)
# self.add(rects[0])
self.add(windmill, dots, pivot_dot)
self.add(arcs)
self.add(flash.mobject)
self.add_dot_color_updater(dots, windmill, color2=WHITE)
words = TexText("Next\\\\", "pivot")
words2 = TexText("Next\\\\", "next\\\\", "pivot", alignment="")
words.scale(2)
words2.scale(2)
# words.next_to(windmill.pivot, RIGHT)
words.to_edge(UR)
words2.to_corner(DL)
arrow = Arrow(words[1].get_left(), new_pivot, buff=0.6)
arrow.set_stroke(width=10)
arrow.set_color(YELLOW)
arrow2 = Arrow(words2[-1].get_right(), new_pivot2, buff=0.6)
arrow2.match_style(arrow)
arrow.rotate(
arrow2.get_angle() + PI - arrow.get_angle(),
about_point=new_pivot,
)
self.add(words, arrow)
self.add(words2, arrow2)
# for i, dot in enumerate(dots):
# self.add(Integer(i).move_to(dot))
if self.animate:
sorted_dots = VGroup(*dots)
sorted_dots.sort(lambda p: np.dot(p, DR))
self.play(
polygon1.shift, FRAME_WIDTH * LEFT,
polygon2.shift, FRAME_WIDTH * RIGHT,
LaggedStart(*[
ApplyMethod(mob.scale, 0)
for mob in [sorted_dots[6], *flash.mobject, windmill, pivot_dot]
]),
LaggedStart(*[
ApplyMethod(dot.to_edge, LEFT, {"buff": -1})
for dot in sorted_dots[:6]
]),
LaggedStart(*[
ApplyMethod(dot.to_edge, RIGHT, {"buff": -1})
for dot in sorted_dots[7:]
]),
LaggedStart(*[
FadeOut(word, RIGHT)
for word in words
]),
LaggedStart(*[
FadeOut(word, LEFT)
for word in words2
]),
LaggedStartMap(
Uncreate,
VGroup(arrow, arrow2, *arcs),
),
run_time=3,
)
class ThumbanailAnimated(Thumbnail):
CONFIG = {
"animate": True,
}
class Thumbnail2(Scene):
def construct(self):
words = TexText("Olympics\\\\", "for\\\\", "math", alignment="")
# words.arrange(DOWN, aligned_edge=LEFT)
words.set_height(FRAME_HEIGHT - 1.5)
words.to_edge(LEFT)
logo = ImageMobject("imo_logo")
logo.set_height(4.5)
logo.to_corner(DR, buff=LARGE_BUFF)
rect = FullScreenFadeRectangle()
rect.set_fill([GREY, BLACK], 1)
self.clear()
self.add(rect)
self.add(words)
self.add(logo)
| [
"[email protected]"
] | |
1fa19d44a1c11b59e3a25c948ed5ac15c23cdb30 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_11/EBS-SVN/trunk/purchase_custom/__manifest__.py | 90748a083184db83669186389d9fa5f1e7757874 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | # -*- coding: utf-8 -*-
{
'name': "purchase_custom",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','base_custom','purchase','purchase_requisition','hr','account'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/views.xml',
'views/templates.xml',
'security/purchase_security.xml',
'wizard/purchase_order_wizard_view.xml',
'report/action_purchase_order_report.xml',
'report/template_purchase_order_report.xml',
'report/purchase_quotation_templates_inherit.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
| [
"[email protected]"
] | |
b7ac2271f415f595aa5380f77be150c49345beab | f0e25779a563c2d570cbc22687c614565501130a | /Think_Python/ackermann.py | 58574bbcd3d3e680558b07a0a04c15c6a2349f44 | [] | no_license | XyK0907/for_work | 8dcae9026f6f25708c14531a83a6593c77b38296 | 85f71621c54f6b0029f3a2746f022f89dd7419d9 | refs/heads/master | 2023-04-25T04:18:44.615982 | 2021-05-15T12:10:26 | 2021-05-15T12:10:26 | 293,845,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
def ackermann(m, n):
"""Computes the Ackermann function A(m, n)
See http://en.wikipedia.org/wiki/Ackermann_function
n, m: non-negative integers
"""
if m == 0:
return n+1
if n == 0:
return ackermann(m-1, 1)
return ackermann(m-1, ackermann(m, n-1))
print(ackermann(100, 101)) | [
"[email protected]"
] | |
bdb9812cf2874f454a6ae0a548efa9589981824c | 3528abad46b15133b2108c237f926a1ab252cbd5 | /Core/_Axiom/Transport.py | bc2b7d2b4e8787c324fb18ec12fce7581ef3879f | [] | no_license | scottmudge/MPK261_Ableton | 20f08234f4eab5ba44fde6e5e745752deb968df2 | c2e316b8347367bd157276f143b9f1a9bc2fe92c | refs/heads/master | 2020-03-20T10:56:32.421561 | 2018-06-14T19:12:47 | 2018-06-14T19:12:47 | 137,389,086 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,693 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Axiom/Transport.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
import Live
from .consts import *
class Transport:
u""" Class representing the transport section on the Axiom controllers """
def __init__(self, parent):
self.__parent = parent
self.__ffwd_held = False
self.__rwd_held = False
self.__delay_counter = 0
def build_midi_map(self, script_handle, midi_map_handle):
for cc_no in AXIOM_TRANSPORT:
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, 15, cc_no)
def receive_midi_cc(self, cc_no, cc_value):
if cc_no == AXIOM_STOP:
if cc_value > 0:
self.__parent.song().is_playing = False
else:
if cc_no == AXIOM_PLAY:
if cc_value > 0:
self.__parent.song().is_playing = True
else:
if cc_no == AXIOM_REC:
if cc_value > 0:
self.__parent.song().record_mode = not self.__parent.song().record_mode
else:
if self.__parent.application().view.is_view_visible('Session'):
if cc_value > 0:
self.__cc_in_session(cc_no)
else:
self.__cc_in_arranger(cc_no, cc_value)
def __cc_in_session(self, cc_no):
index = list(self.__parent.song().scenes).index(self.__parent.song().view.selected_scene)
if cc_no == AXIOM_LOOP:
self.__parent.song().view.selected_scene.fire_as_selected()
else:
if cc_no == AXIOM_RWD:
if index > 0:
index = index - 1
self.__parent.song().view.selected_scene = self.__parent.song().scenes[index]
else:
if cc_no == AXIOM_FFWD:
if index < len(self.__parent.song().scenes) - 1:
index = index + 1
self.__parent.song().view.selected_scene = self.__parent.song().scenes[index]
def __cc_in_arranger(self, cc_no, cc_value):
if cc_no == AXIOM_LOOP:
if cc_value > 0:
self.__parent.song().loop = not self.__parent.song().loop
else:
if cc_no == AXIOM_RWD:
if not self.__ffwd_held:
if cc_value > 0:
self.__rwd_held = True
self.__delay_counter = 0
self.__parent.song().jump_by(-1 * self.__parent.song().signature_denominator)
else:
self.__rwd_held = False
else:
if cc_no == AXIOM_FFWD:
if not self.__rwd_held:
if cc_value > 0:
self.__ffwd_held = True
self.__delay_counter = 0
self.__parent.song().jump_by(self.__parent.song().signature_denominator)
else:
self.__ffwd_held = False
def refresh_state(self):
if self.__ffwd_held:
self.__delay_counter += 1
if self.__delay_counter % 5 == 0:
self.__parent.song().jump_by(self.__parent.song().signature_denominator)
if self.__rwd_held:
self.__delay_counter += 1
if self.__delay_counter % 5 == 0:
self.__parent.song().jump_by(-1 * self.__parent.song().signature_denominator)
| [
"[email protected]"
] | |
d9e0bc511e4e2824de47b2ed8a38c4a528b2ad2b | 1333a965058e926649652ea55154bd73b6f05edd | /4_advanced/ai-py-param-validation/src/paramvalidator/exceptions/validation_exception.py | 594afb62d07c15d96b2052c9d63a8accbf4eb5fb | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | grecoe/teals | 42ebf114388b9f3f1580a41d5d03da39eb083082 | ea00bab4e90d3f71e3ec2d202ce596abcf006f37 | refs/heads/main | 2021-06-21T20:12:03.108427 | 2021-05-10T19:34:40 | 2021-05-10T19:34:40 | 223,172,099 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | """
(c) Microsoft. All rights reserved.
"""
class ParameterValidationException(Exception):
"""
Base exception for parameter validation
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
| [
"[email protected]"
] | |
a7be7d0c99595985e6a9bcda3ec4af33a03ae376 | 18057e01c81dc792a73a2e0bd1a4e037de8fefcb | /kaohantei/ninsiki.py | 606ec732585a18eb525347fe8015002c5d6950de | [] | no_license | kentahoriuchi/Kenta | 15e80018f5c14e1409ac13a7a52c4f64acdce938 | 97bb657a37f0d89525b04f9157a223b47664793e | refs/heads/master | 2020-03-22T14:21:30.221093 | 2018-07-08T14:22:15 | 2018-07-08T14:22:15 | 140,173,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from keras.models import load_model
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
import cv2
import sys
dir = "face.jpeg"
img = cv2.imread(sys.argv[1])
filepath = "sys.argv[1]"
cascade_path = "haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(cascade_path)
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=10, minSize=(30, 30))
for rect in facerect:
x = rect[0]
y = rect[1]
width = rect[2]
height = rect[3]
dst = img[y:y+height, x:x+width]
image = cv2.resize(dst, (100, 100))
image = image.transpose(2, 0, 1)
model = load_model('gazou.h5')
opt = Adam(0.0001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
result = model.predict_classes(np.array([image/255.]))
if result[0] == 0:
print("he may be man1")
elif result[0] == 1:
print("he may be man2")
| [
"[email protected]"
] | |
b7721870d8d7c53ef25f4eb6c25ca932b7aa76e7 | 10d8fab4b21d55cfef0139c04a7f70881f5196f4 | /Stack/simplify-directory-path-unix-like.py | db6af1a41488710662509a134bcd9f11f7e8172a | [] | no_license | wilfredarin/geeksforgeeks | a2afcfd2c64be682b836019407e557332d629ab8 | 5e27cb6706e0ae507694c2170fa00370f219c3e6 | refs/heads/master | 2021-08-07T05:48:39.426686 | 2020-08-19T07:25:19 | 2020-08-19T07:25:19 | 212,023,179 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | """Simplify the directory path (Unix like)
Given an absolute path for a file (Unix-style), simplify it. Note that absolute path always begin with ‘/’ ( root directory ), a dot in path represent current directory and double dot represents parent directory.
Examples:
"/a/./" --> means stay at the current directory 'a'
"/a/b/.." --> means jump to the parent directory
from 'b' to 'a'
"////" --> consecutive multiple '/' are a valid
path, they are equivalent to single "/".
Input : /home/
Output : /home
Input : /a/./b/../../c/
Output : /c
Input : /a/..
Output:/
Input : /a/../
Output : /
Input : /../../../../../a
Output : /a
Input : /a/./b/./c/./d/
Output : /a/b/c/d
Input : /a/../.././../../.
Output:/
Input : /a//b//c//////d
Output : /a/b/c/d
By looking at examples we can see that the above simplification process just behaves like a stack.
Whenever we encounter any file’s name, we simply push it into the stack. when we come across ” . ” we do nothing.
When we find “..” in our path, we simply pop the topmost element as we have to jump back to parent’s directory.
When we see multiple “////” we just ignore them as they are equivalent to one single “/”.
After iterating through the whole string the elements remaining in the stack is our simplified absolute path.
We have to create another stack to reverse the elements stored inside the original stack and then store the result inside a string.
"""
def simplifyPath(self, A):
stack = []
dir = ""
res = ""
res+="/"
n = len(A)
i = 0
while i<n:
#new comand
dir_str = ""
while i<n and A[i]=="/":
i+=1
while i<n and A[i]!="/":
dir_str+=A[i]
i+=1
if dir_str==".." :
if stack:
stack.pop()
elif dir_str==".":
continue
elif dir_str:
stack.append(dir_str)
i+=1
stack.reverse()
while stack:
temp = stack[-1]
if len(stack)!=1:
res+=temp+"/"
else:
res+=temp
stack.pop()
return res
| [
"[email protected]"
] | |
5dbea0ee8b5ef1ca38d84fa1aaf715f0c794feb4 | d89581e043a154a56de69f419f9e7c2f67cf4ff2 | /Apps/Engines/Nuke/NukeTools_1.01/Python/minorVersionUp.py | 214d1a6323b578537a59a33a18fc91be3ffd8409 | [
"MIT"
] | permissive | yazici/CyclopsVFX-Unity | 38b121333d5a5a610cf58489822c6f20f825be11 | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | refs/heads/master | 2020-04-29T15:05:04.942646 | 2017-11-21T17:16:45 | 2017-11-21T17:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | #The MIT License (MIT)
#
#Copyright (c) 2015 Geoffroy Givry
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import nuke
import os
geoffPath = os.getenv("SHOW_PATH")
task = os.getenv('TASK')
def minorVersionUp():
if not nuke.root()['name'].value() == "":
version = int(nuke.root()['name'].value().split('/')[-1].split('v')[1].split('_')[0])
take = int(nuke.root()['name'].value().split('/')[-1].split('v')[1].split('_')[1].split('.')[0])
shot = os.environ['SHOT']
job = os.environ['JOB']
baseFile = nuke.root()['name'].value().split('/')[-1].split('v')[0]
baseFile0 = '%s/%s/%s/TASKS/%s/Work/Nuke/' % (geoffPath, job, shot, task)
takeUp = take + 1
newFile = baseFile0 + '%sv%03d_%02d.nk' % (baseFile, version, takeUp)
nuke.scriptSaveAs(newFile,0)
else:
nuke.message('please choose a script to save first')
pass
| [
"[email protected]"
] | |
975050352947450358340060f69ced694a7463e3 | 143eb3ced0ff1f9cad745c620fcb572f72d66048 | /Assignment4/atom3/Kernel/GenericGraph/models/test_GenericGraph_mdl.py | fd96c32e3307d5950cd3b3ee0c3afc1ddfd9c6c7 | [] | no_license | pombreda/comp304 | 2c283c60ffd7810a1d50b69cab1d5c338563376d | d900f58f0ddc1891831b298d9b37fbe98193719d | refs/heads/master | 2020-12-11T07:26:19.594752 | 2014-11-07T12:29:28 | 2014-11-07T12:29:28 | 35,264,549 | 1 | 1 | null | 2015-05-08T07:18:18 | 2015-05-08T07:18:18 | null | UTF-8 | Python | false | false | 1,825 | py | from graph_ASG_ERmetaMetaModel import *
from stickylink import *
from widthXfillXdecoration import *
from ASG_GenericGraph import *
from ASG_GenericGraph import *
from GenericGraphNode import *
from GenericGraphEdge import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def test_GenericGraph_mdl(self, rootNode):
self.globalPrecondition( rootNode )
self.obj52=GenericGraphNode(self)
self.obj52.graphClass_= graph_GenericGraphNode
if self.genGraphics:
from graph_GenericGraphNode import *
new_obj = graph_GenericGraphNode(182.0,111.0,self.obj52)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("GenericGraphNode", new_obj.tag)
else: new_obj = None
self.obj52.graphObject_ = new_obj
rootNode.addNode(self.obj52)
self.globalAndLocalPostcondition(self.obj52, rootNode)
self.globalPrecondition( rootNode )
self.obj56=GenericGraphEdge(self)
self.obj56.graphClass_= graph_GenericGraphEdge
if self.genGraphics:
from graph_GenericGraphEdge import *
new_obj = graph_GenericGraphEdge(249.0,218.0,self.obj56)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("GenericGraphEdge", new_obj.tag)
else: new_obj = None
self.obj56.graphObject_ = new_obj
rootNode.addNode(self.obj56)
self.globalAndLocalPostcondition(self.obj56, rootNode)
self.drawConnections( )
newfunction = test_GenericGraph_mdl
loadedMMName = 'GenericGraph'
| [
"[email protected]"
] | |
65b3e12a7a4232da82a51c7d4fddf642b3b3700e | 2df82b931c89ac70d49b0716d642d8e355926d50 | /product/urls.py | 87647f6494c6266c870dd2feb79bc260185026f8 | [] | no_license | khanansha/producthunt | 1a638104e83803b9afc4a51ff3ead438ae47cab6 | 03b8d45091c88a2ff142f0a3082910ac1fa0ba41 | refs/heads/master | 2021-05-26T03:21:35.246011 | 2020-04-08T08:41:17 | 2020-04-08T08:41:17 | 254,031,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('create', views.create, name='create'),
path('<int:product_id>', views.detail, name='detail'),
path('<int:product_id>/upvote', views.upvote, name='upvote'),
]
| [
"[email protected]"
] | |
b98dfa1240d8bb0330bd47a98fbbe848f5c6744d | e9ef3cd143478660d098668a10e67544a42b5878 | /Lib/corpuscrawler/crawl_kjh.py | ed7f0805140a95e20c6d09212308da8737fd583b | [
"Apache-2.0"
] | permissive | google/corpuscrawler | a5c790c19b26e6397b768ce26cf12bbcb641eb90 | 10adaecf4ed5a7d0557c8e692c186023746eb001 | refs/heads/master | 2023-08-26T04:15:59.036883 | 2022-04-20T08:18:11 | 2022-04-20T08:18:11 | 102,909,145 | 119 | 40 | NOASSERTION | 2022-04-20T08:18:12 | 2017-09-08T22:21:03 | Python | UTF-8 | Python | false | false | 809 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='kjh')
crawl_bibleis(crawler, out, bible='KJHIBT')
| [
"[email protected]"
] | |
7f33ebf6e4c0e218d49274dab77575fbad6f4e72 | b3638a57ff986c9af7281f057bd4cb5641c11589 | /백준/210803_최단경로/11404플로이드.py | e3904cdf037eb2b00770b01adef797ef64f2da63 | [] | no_license | commGom/pythonStudy | 6adc01faddbe3ef88e0cbab9da174caa77857ff7 | a5d52e66dfd0b3b7538454ca2b6fcd9665f83e6c | refs/heads/main | 2023-08-25T12:21:27.670495 | 2021-10-04T08:19:42 | 2021-10-04T08:19:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | # 5
# 14
# 1 2 2
# 1 3 3
# 1 4 1
# 1 5 10
# 2 4 2
# 3 4 1
# 3 5 1
# 4 5 3
# 3 5 10
# 3 1 8
# 1 4 2
# 5 1 7
# 3 4 2
# 5 2 4
import sys
input=sys.stdin.readline
# 도시의 갯수 N, 버스의 갯수 M
N=int(input())
M=int(input())
# 버스의 도착도시, 드는 비용 값을 저장할 graph
#버스의 출발도시 도착도시 드는비용 순으로 값을 M번 받는다
graph=[[0 for col in range(N+1)] for row in range(N+1)]
for _ in range(M):
start,arrive,cost=map(int,input().split())
if graph[start][arrive]==0 or graph[start][arrive]>cost:
graph[start][arrive]=cost
# print(graph)
for k in range(1,N+1):
for i in range(1,N+1):
for j in range(1,N+1):
if i==j or i==k or j==k:continue
if graph[i][k]>0 and graph[k][j]>0:
if graph[i][j]==0:
graph[i][j]=graph[i][k]+graph[k][j]
else:
graph[i][j]=min(graph[i][j],graph[i][k]+graph[k][j])
# print(graph)
for i in range(1,len(graph)):
for j in range(1,len(graph[i])):
print(graph[i][j],end=" ")
print() | [
"[email protected]"
] | |
07632b55cea18f87762376f2c17c30d0fd1d32bc | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/compute/v1/network_attachment_iam_policy.py | a5af364be5feb5a7850e9d4462d7156faf070069 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 22,498 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkAttachmentIamPolicyArgs', 'NetworkAttachmentIamPolicy']
@pulumi.input_type
class NetworkAttachmentIamPolicyArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
resource: pulumi.Input[str],
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RuleArgs']]]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a NetworkAttachmentIamPolicy resource.
:param pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]] audit_configs: Specifies cloud audit logging configuration for this policy.
:param pulumi.Input[Sequence[pulumi.Input['BindingArgs']]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[Sequence[pulumi.Input['RuleArgs']]] rules: This is deprecated and has no effect. Do not use.
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "resource", resource)
if audit_configs is not None:
pulumi.set(__self__, "audit_configs", audit_configs)
if bindings is not None:
pulumi.set(__self__, "bindings", bindings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if project is not None:
pulumi.set(__self__, "project", project)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]]]:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@audit_configs.setter
def audit_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]]]):
pulumi.set(self, "audit_configs", value)
@property
@pulumi.getter
def bindings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@bindings.setter
def bindings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]]):
pulumi.set(self, "bindings", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleArgs']]]]:
"""
This is deprecated and has no effect. Do not use.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class NetworkAttachmentIamPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AuditConfigArgs']]]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
resource: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleArgs']]]]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AuditConfigArgs']]]] audit_configs: Specifies cloud audit logging configuration for this policy.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleArgs']]]] rules: This is deprecated and has no effect. Do not use.
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkAttachmentIamPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param NetworkAttachmentIamPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkAttachmentIamPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AuditConfigArgs']]]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
resource: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleArgs']]]]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkAttachmentIamPolicyArgs.__new__(NetworkAttachmentIamPolicyArgs)
__props__.__dict__["audit_configs"] = audit_configs
__props__.__dict__["bindings"] = bindings
__props__.__dict__["etag"] = etag
__props__.__dict__["project"] = project
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
__props__.__dict__["rules"] = rules
__props__.__dict__["version"] = version
replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["project", "region", "resource"])
opts = pulumi.ResourceOptions.merge(opts, replace_on_changes)
super(NetworkAttachmentIamPolicy, __self__).__init__(
'google-native:compute/v1:NetworkAttachmentIamPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkAttachmentIamPolicy':
"""
Get an existing NetworkAttachmentIamPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkAttachmentIamPolicyArgs.__new__(NetworkAttachmentIamPolicyArgs)
__props__.__dict__["audit_configs"] = None
__props__.__dict__["bindings"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["project"] = None
__props__.__dict__["region"] = None
__props__.__dict__["resource"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["version"] = None
return NetworkAttachmentIamPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> pulumi.Output[Sequence['outputs.AuditConfigResponse']]:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@property
@pulumi.getter
def bindings(self) -> pulumi.Output[Sequence['outputs.BindingResponse']]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
return pulumi.get(self, "region")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Sequence['outputs.RuleResponse']]:
"""
This is deprecated and has no effect. Do not use.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
| [
"[email protected]"
] | |
aeeee04864a4e9f90e0b78751f06a0c1734023fe | ffca2ab12cb1dad9e3ddd6bf0f615cef91db62e5 | /test.py | cf87ab1380fb1dd2c650be87a67e7c934d453c5d | [
"MIT"
] | permissive | CyborgVillager/Block-Tower-Defense | 0ee26678bb00951b1168f5bc20c762c04cf8a648 | 287da85c852e8596de9e57827845c6d7db286ec9 | refs/heads/master | 2020-12-02T02:22:29.347524 | 2019-12-30T10:54:27 | 2019-12-30T10:54:27 | 230,857,127 | 0 | 0 | MIT | 2019-12-30T06:00:15 | 2019-12-30T06:00:14 | null | UTF-8 | Python | false | false | 1,138 | py | import pygame
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("Hello, world!")
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((100, 60, 25))
clock = pygame.time.Clock()
keepGoing = True
color = (100, 100, 100)
size = (150, 50)
pos = (50, 50)
# Set up main loop
while keepGoing:
# Timer to set frame rate
clock.tick(30)
touch = pygame.mouse.get_pos()
bar = pygame.Surface(size)
bar = bar.convert()
bar.fill(color)
bar.fill(color)
for event in pygame.event.get():
if event.type == pygame.QUIT:
keepGoing = False
if touch[0] >= 50 and touch[0] <= 200 and touch[1] >= 50 and touch[1] <= 100:
if event.type == pygame.MOUSEBUTTONDOWN:
color = (50, 50, 50)
size = (160, 60)
pos = (45, 45)
if event.type == pygame.MOUSEBUTTONUP:
color = (100, 100, 100)
size = (150, 50)
pos = (50, 50)
screen.blit(background, (0, 0))
screen.blit(bar, pos)
pygame.display.flip() | [
"[email protected]"
] | |
d5a5c939def085847ffa6a958f51d3a0dee2867d | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/world/ZoneLOD.py | 484017d0780e356d8af203a398a3626258d15e5c | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,823 | py | from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.PythonUtil import *
from otp.otpbase import OTPGlobals
from pirates.piratesbase import PiratesGlobals
class ZoneLOD(DirectObject):
notify = directNotify.newCategory('ZoneLOD')
def __init__(self, uniqueNameFunc, zoneRadii = []):
self.uniqueNameFunc = uniqueNameFunc
self.zoneRadii = zoneRadii
self.zoneSphere = []
self.lastZoneLevel = None
self.numSpheres = 0
self.levelForced = False
self.lodCollideMask = PiratesGlobals.ZoneLODBitmask
self.allEnabled = False
def delete(self):
self.deleteZoneCollisions()
if self.zoneSphere:
del self.zoneSphere
self.ignoreAll()
del self.uniqueNameFunc
def cleanup(self):
if hasattr(self, 'outerSphere') and self.numSpheres:
self.setZoneLevel(self.outerSphere + 1)
def setZoneRadii(self, zoneRadii, zoneCenter=[0, 0]):
self.numSpheres = len(zoneRadii)
self.zoneRadii = zoneRadii
self.zoneCenter = zoneCenter
self.innerSphere = 0
self.outerSphere = self.numSpheres - 1
self.deleteZoneCollisions()
self.initZoneCollisions()
def setLodCollideMask(self, mask):
self.lodCollideMask = mask
for currSphere in self.zoneSphere:
currSphere.node().setIntoCollideMask(self.lodCollideMask)
def getLodCollideMask(self):
return self.lodCollideMask
def initZoneCollisions(self):
for i in xrange(len(self.zoneRadii)):
cSphere = CollisionSphere(0.0, 0.0, 0.0, self.zoneRadii[i])
cSphere.setTangible(0)
cName = self.uniqueNameFunc('zoneLevel' + str(i))
cSphereNode = CollisionNode(cName)
cSphereNode.setIntoCollideMask(self.lodCollideMask)
cSphereNode.addSolid(cSphere)
cRoot = self.find('collisions')
if not cRoot.isEmpty():
cSphereNodePath = cRoot.attachNewNode(cSphereNode)
else:
cSphereNodePath = self.attachNewNode(cSphereNode)
cSphereNodePath.setPos(self.zoneCenter[0], self.zoneCenter[1], 0)
self.zoneSphere.append(cSphereNodePath)
self.setZoneLevel(self.outerSphere + 1)
def deleteZoneCollisions(self):
for c in self.zoneSphere:
c.remove_node()
self.zoneSphere = []
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
def showZoneCollisions(self):
for c in self.zoneSphere:
c.show()
def hideZoneCollisions(self):
for c in self.zoneSphere:
c.hide()
def enableAllLODSpheres(self):
for i in xrange(self.numSpheres):
self.accept(self.uniqueNameFunc('exitzoneLevel' + str(i)), Functor(self.handleExitZoneLevel, i + 1))
self.accept(self.uniqueNameFunc('enterzoneLevel' + str(i)), Functor(self.handleEnterZoneLevel, i))
for sphere in self.zoneSphere:
sphere.unstash()
self.allEnabled = True
def disableAllLODSpheres(self):
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
for sphere in self.zoneSphere:
sphere.stash()
self.allEnabled = False
def clearAllEnabled(self, resetLastZoneLevel = False):
self.allEnabled = False
if resetLastZoneLevel:
self.setCollLevel(self.lastZoneLevel)
def setCollLevel(self, level):
if self.allEnabled:
return None
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
for sphere in self.zoneSphere:
sphere.stash()
if level <= self.outerSphere:
self.zoneSphere[level].unstash()
if level > self.innerSphere:
self.zoneSphere[level - 1].unstash()
if level <= self.outerSphere:
self.accept(self.uniqueNameFunc('exitzoneLevel' + str(level)), Functor(self.handleExitZoneLevel, level + 1))
if level > self.innerSphere:
self.accept(self.uniqueNameFunc('enterzoneLevel' + str(level - 1)), Functor(self.handleEnterZoneLevel, level - 1))
def handleEnterZoneLevel(self, level, entry = None):
if level >= self.lastZoneLevel:
return None
self.setZoneLevel(level, entry)
def handleExitZoneLevel(self, level, entry = None):
if level < self.lastZoneLevel:
return None
self.setZoneLevel(level, entry)
def setZoneLevel(self, level, entry = None):
self.notify.debug('Changing Zone %s:%s' % (self.name, level))
if self.levelForced:
return None
if self.lastZoneLevel == None:
self.loadZoneLevel(level)
elif self.lastZoneLevel > level:
for i in xrange(self.lastZoneLevel - 1, level - 1, -1):
self.loadZoneLevel(i)
self.lastZoneLevel = i
elif self.lastZoneLevel < level:
for i in xrange(self.lastZoneLevel, level):
self.unloadZoneLevel(i)
if i == self.numSpheres:
self.allEnabled = False
self.lastZoneLevel = i
self.setCollLevel(level)
self.lastZoneLevel = level
def setInitialZone(self, pos):
avDist = pos.length()
curLevel = self.outerSphere + 1
for i in xrange(self.numSpheres):
dist = self.zoneRadii[i]
if avDist < dist:
curLevel = i
break
continue
self.setZoneLevel(curLevel)
def setZoneLevelOuter(self):
if self.outerSphere > self.lastZoneLevel:
self.setZoneLevel(self.outerSphere)
def turnOff(self):
for i in xrange(self.numSpheres):
self.ignore(self.uniqueNameFunc('enterzoneLevel' + str(i)))
self.ignore(self.uniqueNameFunc('exitzoneLevel' + str(i)))
for sphere in self.zoneSphere:
sphere.stash()
def turnOn(self):
self.allEnabled = False
if self.lastZoneLevel is not None:
self.setCollLevel(self.lastZoneLevel)
else:
self.setCollLevel(self.outerSphere)
def loadZoneLevel(self, level):
pass
def unloadZoneLevel(self, level):
pass
def forceZoneLevel(self, level):
self.setZoneLevel(level)
self.levelForced = True
def clearForceZoneLevel(self):
self.levelForced = False
self.setZoneLevel(self.outerSphere)
def childLeft(self, myDoId, parentObj, isIsland = True):
if isIsland:
self.builder.left()
for island in parentObj.islands.values():
if island.doId != myDoId:
if isIsland:
island.builder.areaGeometry.unstash()
island.enableAllLODSpheres()
if isIsland:
island.builder.collisions.unstash()
def childArrived(self, myDoId, parentObj, isIsland = True):
if isIsland:
self.builder.arrived()
for island in parentObj.islands.values():
if island.doId != myDoId:
if isIsland:
island.builder.areaGeometry.stash()
island.disableAllLODSpheres()
island.builder.collisions.stash()
else:
island.clearAllEnabled(True)
| [
"[email protected]"
] | |
b6f879be814c5cb7ae7e78b1b76cb8c2557580c5 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_4_neat/16_0_4_kylewilson_d.py | f0a169990ea6d378bafc747d1a45704aacf52a63 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 355 | py | f = open("D-small-attempt0.in", "r")
fo = open("out.txt","w")
count = int(f.readline())
for case in xrange(0,count):
print case
line = f.readline().split()
k = line[0]
c = line[1]
s = line[2]
fo.write("Case #" + str(case + 1) + ":")
if s < k:
fo.write(" IMPOSSIBLE")
else:
for x in range(int(k)):
fo.write(" " + str(x+1))
fo.write("\n") | [
"[[email protected]]"
] | |
d3a1c4a014e244b6ab4565a5e932f0082a4ab67a | b3f58deae474db9035cd340b4e66b3e6bdafdeca | /components/proximity_auth.gypi | 3b4307df79370a117178a4013c1d5396e3ea6421 | [
"BSD-3-Clause"
] | permissive | quanxinglong/chromium | 0c45f232254c056e27f35e00da8472ff3ac49fd9 | 16dda055c8799052d45a593059b614ea682d4f6c | refs/heads/master | 2021-01-24T17:58:29.213579 | 2015-08-10T19:39:07 | 2015-08-10T19:39:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,788 | gypi | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
# GN version: //components/proximity_auth and
# //components/proximity_auth/ble.
'target_name': 'proximity_auth',
'type': 'static_library',
'include_dirs': [
'..',
],
'dependencies': [
':cryptauth',
':cryptauth_proto',
':proximity_auth_logging',
'../base/base.gyp:base',
'../base/base.gyp:base_prefs',
'../device/bluetooth/bluetooth.gyp:device_bluetooth',
'../net/net.gyp:net',
],
'sources': [
"proximity_auth/authenticator.h",
"proximity_auth/ble/bluetooth_low_energy_characteristics_finder.cc",
"proximity_auth/ble/bluetooth_low_energy_characteristics_finder.h",
"proximity_auth/ble/bluetooth_low_energy_connection.cc",
"proximity_auth/ble/bluetooth_low_energy_connection.h",
"proximity_auth/ble/bluetooth_low_energy_connection_finder.cc",
"proximity_auth/ble/bluetooth_low_energy_connection_finder.h",
"proximity_auth/ble/bluetooth_low_energy_device_whitelist.cc",
"proximity_auth/ble/bluetooth_low_energy_device_whitelist.h",
"proximity_auth/ble/remote_attribute.h",
"proximity_auth/ble/fake_wire_message.cc",
"proximity_auth/ble/fake_wire_message.h",
"proximity_auth/ble/pref_names.cc",
"proximity_auth/ble/pref_names.h",
"proximity_auth/ble/proximity_auth_ble_system.cc",
"proximity_auth/ble/proximity_auth_ble_system.h",
"proximity_auth/bluetooth_connection.cc",
"proximity_auth/bluetooth_connection.h",
"proximity_auth/bluetooth_connection_finder.cc",
"proximity_auth/bluetooth_connection_finder.h",
"proximity_auth/bluetooth_throttler.h",
"proximity_auth/bluetooth_throttler_impl.cc",
"proximity_auth/bluetooth_throttler_impl.h",
"proximity_auth/bluetooth_util.cc",
"proximity_auth/bluetooth_util.h",
"proximity_auth/bluetooth_util_chromeos.cc",
"proximity_auth/client.h",
"proximity_auth/client_impl.cc",
"proximity_auth/client_impl.h",
"proximity_auth/client_observer.h",
"proximity_auth/connection.cc",
"proximity_auth/connection.h",
"proximity_auth/connection_finder.h",
"proximity_auth/connection_observer.h",
"proximity_auth/device_to_device_authenticator.cc",
"proximity_auth/device_to_device_authenticator.h",
"proximity_auth/device_to_device_initiator_operations.cc",
"proximity_auth/device_to_device_initiator_operations.h",
"proximity_auth/device_to_device_secure_context.cc",
"proximity_auth/device_to_device_secure_context.h",
"proximity_auth/metrics.cc",
"proximity_auth/metrics.h",
"proximity_auth/proximity_auth_client.h",
"proximity_auth/proximity_auth_system.cc",
"proximity_auth/proximity_auth_system.h",
"proximity_auth/proximity_monitor.h",
"proximity_auth/proximity_monitor_impl.cc",
"proximity_auth/proximity_monitor_impl.h",
"proximity_auth/proximity_monitor_observer.h",
"proximity_auth/remote_device.cc",
"proximity_auth/remote_device.h",
"proximity_auth/remote_status_update.cc",
"proximity_auth/remote_status_update.h",
"proximity_auth/screenlock_bridge.cc",
"proximity_auth/screenlock_bridge.h",
"proximity_auth/screenlock_state.h",
"proximity_auth/secure_context.h",
"proximity_auth/switches.cc",
"proximity_auth/switches.h",
"proximity_auth/throttled_bluetooth_connection_finder.cc",
"proximity_auth/throttled_bluetooth_connection_finder.h",
"proximity_auth/wire_message.cc",
"proximity_auth/wire_message.h",
],
'export_dependent_settings': [
'cryptauth_proto',
],
},
{
'target_name': 'proximity_auth_test_support',
'type': 'static_library',
'include_dirs': [
'..',
],
'dependencies': [
':cryptauth_test_support',
'../base/base.gyp:base',
],
'sources': [
"proximity_auth/device_to_device_responder_operations.cc",
"proximity_auth/device_to_device_responder_operations.h",
],
},
{
# GN version: //components/proximity_auth/logging
'target_name': 'proximity_auth_logging',
'type': 'static_library',
'include_dirs': [
'..',
],
'dependencies': [
'../base/base.gyp:base',
],
'sources': [
"proximity_auth/logging/log_buffer.cc",
"proximity_auth/logging/log_buffer.h",
"proximity_auth/logging/logging.h",
"proximity_auth/logging/logging.cc",
]
},
{
# GN version: //components/proximity_auth/cryptauth/proto
'target_name': 'cryptauth_proto',
'type': 'static_library',
'sources': [
'proximity_auth/cryptauth/proto/cryptauth_api.proto',
'proximity_auth/cryptauth/proto/securemessage.proto',
],
'variables': {
'proto_in_dir': 'proximity_auth/cryptauth/proto',
'proto_out_dir': 'components/proximity_auth/cryptauth/proto',
},
'includes': [ '../build/protoc.gypi' ]
},
{
'target_name': 'cryptauth',
'type': 'static_library',
'include_dirs': [
'..',
],
'dependencies': [
'cryptauth_proto',
'../base/base.gyp:base',
'../crypto/crypto.gyp:crypto',
'../components/components.gyp:gcm_driver',
'../google_apis/google_apis.gyp:google_apis',
'../net/net.gyp:net',
],
'sources': [
"proximity_auth/cryptauth/base64url.cc",
"proximity_auth/cryptauth/base64url.h",
"proximity_auth/cryptauth/cryptauth_access_token_fetcher.h",
"proximity_auth/cryptauth/cryptauth_access_token_fetcher_impl.cc",
"proximity_auth/cryptauth/cryptauth_access_token_fetcher_impl.h",
"proximity_auth/cryptauth/cryptauth_api_call_flow.cc",
"proximity_auth/cryptauth/cryptauth_api_call_flow.h",
"proximity_auth/cryptauth/cryptauth_client.h",
"proximity_auth/cryptauth/cryptauth_client_impl.cc",
"proximity_auth/cryptauth/cryptauth_client_impl.h",
"proximity_auth/cryptauth/cryptauth_device_manager.cc",
"proximity_auth/cryptauth/cryptauth_device_manager.h",
"proximity_auth/cryptauth/cryptauth_enroller.h",
"proximity_auth/cryptauth/cryptauth_enroller_impl.cc",
"proximity_auth/cryptauth/cryptauth_enroller_impl.h",
"proximity_auth/cryptauth/cryptauth_enrollment_manager.cc",
"proximity_auth/cryptauth/cryptauth_enrollment_manager.h",
"proximity_auth/cryptauth/cryptauth_enrollment_utils.cc",
"proximity_auth/cryptauth/cryptauth_gcm_manager.cc",
"proximity_auth/cryptauth/cryptauth_gcm_manager.h",
"proximity_auth/cryptauth/cryptauth_gcm_manager_impl.cc",
"proximity_auth/cryptauth/cryptauth_gcm_manager_impl.h",
"proximity_auth/cryptauth/pref_names.cc",
"proximity_auth/cryptauth/pref_names.h",
"proximity_auth/cryptauth/secure_message_delegate.cc",
"proximity_auth/cryptauth/secure_message_delegate.h",
"proximity_auth/cryptauth/sync_scheduler.cc",
"proximity_auth/cryptauth/sync_scheduler.h",
"proximity_auth/cryptauth/sync_scheduler_impl.cc",
"proximity_auth/cryptauth/sync_scheduler_impl.h",
],
'export_dependent_settings': [
'cryptauth_proto',
],
},
{
'target_name': 'cryptauth_test_support',
'type': 'static_library',
'include_dirs': [
'..',
],
'dependencies': [
'cryptauth_proto',
'../base/base.gyp:base',
'../testing/gmock.gyp:gmock',
],
'sources': [
"proximity_auth/cryptauth/fake_cryptauth_gcm_manager.cc",
"proximity_auth/cryptauth/fake_cryptauth_gcm_manager.h",
"proximity_auth/cryptauth/fake_secure_message_delegate.cc",
"proximity_auth/cryptauth/fake_secure_message_delegate.h",
"proximity_auth/cryptauth/mock_cryptauth_client.cc",
"proximity_auth/cryptauth/mock_cryptauth_client.h",
"proximity_auth/cryptauth/mock_sync_scheduler.cc",
"proximity_auth/cryptauth/mock_sync_scheduler.h",
],
'export_dependent_settings': [
'cryptauth_proto',
],
},
{
# GN version: //components/proximity_auth/webui
'target_name': 'proximity_auth_webui',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../content/content.gyp:content_browser',
'../ui/resources/ui_resources.gyp:ui_resources',
'components_resources.gyp:components_resources',
'cryptauth',
'cryptauth_proto',
'proximity_auth',
],
'include_dirs': [
'..',
],
'sources': [
'proximity_auth/webui/cryptauth_enroller_factory_impl.cc',
'proximity_auth/webui/cryptauth_enroller_factory_impl.h',
'proximity_auth/webui/proximity_auth_ui.cc',
'proximity_auth/webui/proximity_auth_ui.h',
'proximity_auth/webui/proximity_auth_ui_delegate.h',
'proximity_auth/webui/proximity_auth_webui_handler.cc',
'proximity_auth/webui/proximity_auth_webui_handler.h',
'proximity_auth/webui/reachable_phone_flow.cc',
'proximity_auth/webui/reachable_phone_flow.h',
'proximity_auth/webui/url_constants.cc',
'proximity_auth/webui/url_constants.h',
],
},
],
}
| [
"[email protected]"
] | |
0468f8f8d3852fec3500d09cba904bdbd0e2e2c9 | bc9abf538f5d4093324b2f055f0b090fe4b03247 | /python/etc/primer3d.py | e4712fca4b2656dfe05717a7fff112e19d3cfd1f | [] | no_license | galaxysd/GalaxyCodeBases | 3c8900d0c2ca0ed73e9cf4c30630aca4da6cc971 | 58946261bf72afd6a7287e781a2176cdfaddf50e | refs/heads/master | 2023-09-04T04:59:35.348199 | 2023-08-25T01:04:02 | 2023-08-25T01:04:02 | 33,782,566 | 7 | 6 | null | 2020-09-04T06:06:49 | 2015-04-11T16:12:14 | C | UTF-8 | Python | false | false | 4,773 | py | #!/usr/bin/env python3
import argparse
import logging
import primer3 # https://github.com/libnano/primer3-py
# https://brcaexchange.org/variants and click "Show All Public Data", then click "Download" to get `variants.tsv`.
# gzcat ~/Downloads/variants.tsv.gz|head -n30|awk -F'\t' '{if (length($124)+length($125)==2 || NR==1) print $43,$108,$122,$123,$124,$125}'|column -t
# DBID_LOVD Genomic_Coordinate_hg38 Chr Pos Ref Alt
# BRCA1_001574 chr17:g.43093299:A>G 17 43093299 A G
# BRCA1_003516 chr17:g.43078507:T>G 17 43078507 T G
# BRCA1_004379 chr17:g.43103085:A>G 17 43103085 A G
from os.path import expanduser
InFile: str = r'~/tmp/variants.tsv.0.gz'
InFile = expanduser(InFile)
InRef: str = expanduser(r'~/tmp/GRCh38.fa')
from pyfaidx import Fasta
RefSeqs = Fasta(InRef)
#print(RefSeqs['chr1'])
# Random access of BGZip is not supported now, see https://github.com/mdshw5/pyfaidx/issues/126
#InColNames = ['DBID_LOVD','Chr','Pos','Ref','Alt','Genomic_Coordinate_hg38']
InColNames = ['Chr','Pos','Ref','Alt']
#import numpy
#import pandas as pd
#pd.read_table(InFile,compression='gzip',sep='\t')
import gzip
import csv
Total: int = 0
Skipped: int = 0
from typing import Dict, List, Tuple
InData: Dict[str,Dict[int,Tuple[str,List[str]]]] = {}
'''
第一种引物,上游引物3‘端设一个,下游距离300bp-400bp设置
第二种,目标点上游100bp设置上游引物,不要覆盖目标点,下游,200-300,
只考虑一对引物中间的部分,引物本身不考虑。
Tm 参考范围55-62
'''
thePara: Dict[str,int] = dict(MaxAmpLen=400, MinAmpLen=300, P5Up1=0, P5Up2=100,
TmMax=63, TmMin=55, TmDeltra=5,
PrimerLenMin=25, PrimerLenMax=36, Mode2LeftMax=100
)
with gzip.open(InFile, 'rt') as tsvin:
tsvin = csv.DictReader(tsvin, delimiter='\t')
#headers = tsvin.fieldnames
#print(headers)
for row in tsvin:
#print(', '.join(row[col] for col in InColNames))
Total += 1
if len(row['Ref']) > 1 or len(row['Alt']) > 1 :
#print(', '.join(row[col] for col in ['Chr','Pos','Ref','Alt']))
Skipped += 1
else :
print(', '.join(row[col] for col in InColNames))
row['Pos'] = int(row['Pos'])
if row['Chr'] in InData :
if row['Pos'] in InData[row['Chr']] :
InData[row['Chr']][row['Pos']][1].append(row['Alt'])
#print(InData[row['Chr']][row['Pos']])
else :
InData[row['Chr']][row['Pos']] = (row['Ref'],[row['Alt']])
else :
InData[row['Chr']] = { row['Pos'] : (row['Ref'],[row['Alt']]) }
Primer3GlobalArgs: Dict = {
'PRIMER_OPT_SIZE': 2+thePara['PrimerLenMin'],
'PRIMER_PICK_INTERNAL_OLIGO': 1,
'PRIMER_INTERNAL_MAX_SELF_END': 8,
'PRIMER_MIN_SIZE': thePara['PrimerLenMin'],
'PRIMER_MAX_SIZE': thePara['PrimerLenMax'],
'PRIMER_OPT_TM': 60.0,
'PRIMER_MIN_TM': thePara['TmMin'],
'PRIMER_MAX_TM': thePara['TmMax'],
'PRIMER_MIN_GC': 20.0,
'PRIMER_MAX_GC': 80.0,
'PRIMER_MAX_POLY_X': 10,
'PRIMER_INTERNAL_MAX_POLY_X': 10,
'PRIMER_SALT_MONOVALENT': 50.0,
'PRIMER_DNA_CONC': 50.0,
'PRIMER_MAX_NS_ACCEPTED': 0,
'PRIMER_MAX_SELF_ANY': 12,
'PRIMER_MAX_SELF_END': 8,
'PRIMER_PAIR_MAX_COMPL_ANY': 12,
'PRIMER_PAIR_MAX_COMPL_END': 8,
'PRIMER_PRODUCT_SIZE_RANGE': [[thePara['MinAmpLen']-thePara['PrimerLenMax'],thePara['MaxAmpLen']+thePara['PrimerLenMax']]],
'PRIMER_TASK': 'generic',
'PRIMER_PICK_LEFT_PRIMER': 1,
'PRIMER_PICK_INTERNAL_OLIGO': 0,
'PRIMER_PICK_RIGHT_PRIMER': 1,
'PRIMER_PAIR_MAX_DIFF_TM': thePara['TmDeltra'],
}
primer3.bindings.setP3Globals(Primer3GlobalArgs)
for ChrID in InData.keys() :
for thePos in InData[ChrID].keys() :
FulChrID: str = ''.join(['chr',ChrID])
# Start attributes are 1-based
Left: int = thePos - thePara['Mode2LeftMax'] - thePara['PrimerLenMax'] -1
if Left < 0 : Left = 0
#Left = thePos-1
# End attributes are 0-based
Right: int = thePos + thePara['MaxAmpLen'] + thePara['PrimerLenMax']
if Right > len(RefSeqs[FulChrID]) : Right = len(RefSeqs[FulChrID])
theSeq: str = RefSeqs[FulChrID][Left:Right]
print(':'.join([ChrID,str(thePos),FulChrID,str(theSeq),str(InData[ChrID][thePos]) ]))
Primer3Ret: Dict = primer3.bindings.designPrimers({
'SEQUENCE_ID': theSeq.fancy_name,
'SEQUENCE_TEMPLATE': str(theSeq),
'SEQUENCE_INCLUDED_REGION': [ thePara['PrimerLenMax'],thePara['MaxAmpLen'] ],
})
print(Primer3Ret)
print(b'[!] %(skipped)d InDels skipped in %(Total)d items.' % {b'skipped': Skipped, b'Total': Total})
| [
"[email protected]"
] | |
217989fa3591eada71f3986a3f8d7079071acfa7 | d52ee2f7ec5dcd8825f4e221a7f084d488d35634 | /new_scripts/baselines/dmass/model/seq2seq.py | 8e1de823151e02d643e898a3ce67afbef2823090 | [] | no_license | rekriz11/sockeye-recipes | 9dbf96140e4d9d546210dd1c29801132e1b9201c | 644363b92e2f38311cc2b7e926b6558aa41900f3 | refs/heads/master | 2020-03-29T16:52:52.542574 | 2020-03-13T18:18:25 | 2020-03-13T18:18:25 | 150,131,769 | 5 | 3 | null | 2018-09-24T16:15:47 | 2018-09-24T16:15:46 | null | UTF-8 | Python | false | false | 16,250 | py | from model.graph import Graph
from model.graph import ModelOutput
from model.seq2seq_beamsearch import beam_search
import tensorflow as tf
from util import constant, nn
class Seq2SeqGraph(Graph):
def __init__(self, data, is_train, model_config):
super(Seq2SeqGraph, self).__init__(data, is_train, model_config)
self.model_fn = self.seq2seq_fn
self.rand_unif_init = tf.contrib.layers.xavier_initializer() # tf.random_uniform_initializer(-0.05, 0.05, seed=123)
self.trunc_norm_init = tf.contrib.layers.xavier_initializer() # tf.truncated_normal_initializer(stddev=1e-4)
def decode_inputs_to_outputs(self, inp, prev_state_c, prev_state_h, encoder_outputs, encoder_padding_bias,
encoder_features, attn_v,
rule_id_input_placeholder, mem_contexts, mem_outputs, global_step):
def attention(query):
if self.model_config.attention_type == 'ffn':
decoder_feature = tf.expand_dims(
nn.linear(query, self.model_config.dimension, True, scope='decoder_feature'),
axis=1)
energy = tf.expand_dims(tf.reduce_sum(
attn_v * tf.tanh(encoder_features + decoder_feature), axis=2), axis=1)
energy += encoder_padding_bias
energy_norm = tf.nn.softmax(energy, axis=2)
context_vector = tf.matmul(energy_norm, encoder_outputs)
return tf.squeeze(context_vector, axis=1), tf.squeeze(energy_norm, axis=1)
elif self.model_config.attention_type == 'dot':
query = tf.expand_dims(tf.concat(query, axis=1), axis=1)
weight = tf.matmul(query, encoder_outputs, transpose_b=True)
weight += encoder_padding_bias
weight = tf.nn.softmax(weight, axis=2)
context_vector = tf.matmul(weight, encoder_outputs)
return tf.squeeze(context_vector, axis=1), tf.squeeze(weight, axis=1)
elif self.model_config.attention_type == 'bilinear':
query = tf.expand_dims(tf.concat(query, axis=1), axis=1)
weight = tf.matmul(query, encoder_features, transpose_b=True)
weight += encoder_padding_bias
weight = tf.nn.softmax(weight, axis=2)
context_vector = tf.matmul(weight, encoder_outputs)
return tf.squeeze(context_vector, axis=1), tf.squeeze(weight, axis=1)
prev_state = tf.contrib.rnn.LSTMStateTuple(prev_state_c, prev_state_h)
if self.is_train:
inp = tf.nn.dropout(inp,
1.0 - self.model_config.layer_prepostprocess_dropout)
cell_output, state = self.decode_cell(inp, prev_state)
context_vector, attn_dist = attention(state)
final_output = nn.linear([context_vector] + [cell_output], self.model_config.dimension, True,
scope='projection')
cur_context = None
if 'rule' in self.model_config.memory:
cur_context = context_vector
cur_mem_contexts = tf.stack(self.embedding_fn(rule_id_input_placeholder, mem_contexts), axis=1)
cur_mem_outputs = tf.stack(self.embedding_fn(rule_id_input_placeholder, mem_outputs), axis=1)
bias = tf.expand_dims(
-1e9 * tf.to_float(tf.equal(tf.stack(rule_id_input_placeholder, axis=1), 0)),
axis=1)
weights = tf.nn.softmax(bias + tf.matmul(tf.expand_dims(context_vector, axis=1), cur_mem_contexts, transpose_b=True))
mem_output = tf.squeeze(tf.matmul(weights, cur_mem_outputs), axis=1)
nmem_output = nn.linear([final_output] + [mem_output], self.model_config.dimension, True)
g = tf.greater(global_step, tf.constant(2 * self.model_config.memory_prepare_step, dtype=tf.int64))
final_output = tf.cond(g, lambda: nmem_output, lambda: final_output)
return final_output, state[0], state[1], attn_dist, cur_context
def seq2seq_fn(self, sentence_complex_input_placeholder, emb_complex,
sentence_simple_input_placeholder, emb_simple,
w, b, rule_id_input_placeholder, mem_contexts, mem_outputs, global_step):
train_mode = self.model_config.train_mode
with tf.variable_scope('seq2seq_encoder'):
encoder_embed_inputs = tf.stack(
self.embedding_fn(sentence_complex_input_placeholder, emb_complex), axis=1)
encoder_len = tf.cast(tf.reduce_sum(tf.to_float(tf.not_equal(tf.stack(sentence_complex_input_placeholder, axis=1),
self.data.vocab_complex.encode(constant.SYMBOL_PAD))), axis=1), tf.int32)
cell_fw = tf.contrib.rnn.LSTMCell(self.model_config.dimension, initializer=self.rand_unif_init,
state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self.model_config.dimension, initializer=self.rand_unif_init,
state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_embed_inputs,
dtype=tf.float32,
sequence_length=encoder_len,
swap_memory=False)
encoder_outputs = tf.concat(axis=2, values=encoder_outputs)
encoder_padding_bias = tf.expand_dims(tf.to_float(tf.equal(tf.stack(sentence_complex_input_placeholder, axis=1),
self.data.vocab_complex.encode(constant.SYMBOL_PAD))), axis=1) * -1e9
# variables prepare for decoder
encoder_features, attn_v = None, None
if self.model_config.attention_type == 'bilinear':
attn_w = tf.get_variable("attn_w",
[1, 2 * self.model_config.dimension, 2 * self.model_config.dimension])
encoder_features = tf.nn.conv1d(encoder_outputs, attn_w, 1, 'SAME')
elif self.model_config.attention_type == 'ffn':
attn_w = tf.get_variable("attn_w",
[1, 2 * self.model_config.dimension, self.model_config.dimension])
encoder_features = tf.nn.conv1d(encoder_outputs, attn_w, 1, 'SAME')
attn_v = tf.get_variable("v", [1, 1, self.model_config.dimension])
w_reduce_c = tf.get_variable('w_reduce_c',
[self.model_config.dimension * 2, self.model_config.dimension],
dtype=tf.float32,
initializer=self.trunc_norm_init)
w_reduce_h = tf.get_variable('w_reduce_h',
[self.model_config.dimension * 2, self.model_config.dimension],
dtype=tf.float32,
initializer=self.trunc_norm_init)
bias_reduce_c = tf.get_variable('bias_reduce_c', [self.model_config.dimension], dtype=tf.float32,
initializer=self.trunc_norm_init)
bias_reduce_h = tf.get_variable('bias_reduce_h', [self.model_config.dimension], dtype=tf.float32,
initializer=self.trunc_norm_init)
# Apply linear layer
old_c = tf.concat(axis=1, values=[fw_st.c, bw_st.c]) # Concatenation of fw and bw cell
old_h = tf.concat(axis=1, values=[fw_st.h, bw_st.h]) # Concatenation of fw and bw state
new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c) # Get new cell from old cell
new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h) # Get new state from old state
decoder_in_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
with tf.variable_scope('seq2seq_decoder'):
self.decode_cell = tf.contrib.rnn.LSTMCell(
self.model_config.dimension, state_is_tuple=True, initializer=self.rand_unif_init)
state = decoder_in_state
if self.is_train:
batch_go = tf.zeros(
[self.model_config.batch_size, self.model_config.dimension])
decoder_embed_inputs_list = self.embedding_fn(
sentence_simple_input_placeholder[:-1], emb_simple)
decoder_embed_inputs_list = [batch_go] + decoder_embed_inputs_list
outputs = []
logits = []
attn_dists = []
targats = []
contexts = []
sampled_targets = []
sampled_logits = []
for i, dec_inp in enumerate(decoder_embed_inputs_list):
if i > 0:
tf.get_variable_scope().reuse_variables()
x = dec_inp
state_c, state_h = state[0], state[1]
final_output, state_c, state_h, attn_dist, context = self.decode_inputs_to_outputs(
x, state_c, state_h, encoder_outputs, encoder_padding_bias,
encoder_features, attn_v,
rule_id_input_placeholder, mem_contexts, mem_outputs, global_step)
state = tf.contrib.rnn.LSTMStateTuple(state_c, state_h)
logit = self.output_to_logit(final_output, w, b)
target = tf.argmax(logit, axis=1)
if train_mode == 'dynamic_self-critical':
target = tf.stop_gradient(target)
sampled_target = tf.cast(tf.squeeze(
tf.multinomial(logit, 1), axis=1), tf.int32)
indices = tf.stack(
[tf.range(0, self.model_config.batch_size, dtype=tf.int32),
tf.squeeze(sampled_target)],
axis=-1)
sampled_logit = tf.gather_nd(tf.nn.softmax(logit, axis=1), indices)
sampled_targets.append(sampled_target)
sampled_logits.append(sampled_logit)
targats.append(target)
logits.append(logit)
outputs.append(final_output)
attn_dists.append(attn_dist)
contexts.append(context)
if 'rule' in self.model_config.memory:
contexts = tf.stack(contexts, axis=1)
gt_target_list = sentence_simple_input_placeholder
output = ModelOutput(
contexts=contexts if 'rule' in self.model_config.memory else None,
encoder_outputs=encoder_outputs,
decoder_outputs_list=outputs,
final_outputs_list=outputs,
decoder_logit_list=logits,
gt_target_list=gt_target_list,
encoder_embed_inputs_list=tf.unstack(encoder_embed_inputs, axis=1),
decoder_target_list=targats,
sample_logit_list=sampled_logits if train_mode == 'dynamic_self-critical' else None,
sample_target_list=sampled_targets if train_mode == 'dynamic_self-critical' else None,
decoder_score=0.0,
attn_distr_list=attn_dists,
)
self.attn_dists = tf.stack(attn_dists, axis=1)
self.targets = tf.stack(targats, axis=1)
self.cs = tf.stack(sentence_complex_input_placeholder, axis=1)
return output
else:
# encoder_beam_outputs = tf.concat(
# [tf.tile(tf.expand_dims(encoder_outputs[o, :, :], axis=0),
# [self.model_config.beam_search_size, 1, 1])
# for o in range(self.model_config.batch_size)], axis=0)
#
# encoder_beam_features = tf.concat(
# [tf.tile(tf.expand_dims(encoder_outputs[o, :, :], axis=0),
# [self.model_config.beam_search_size, 1, 1])
# for o in range(self.model_config.batch_size)], axis=0)
#
# encoder_beam_padding_bias = tf.concat(
# [tf.tile(tf.expand_dims(encoder_padding_bias[o, :], axis=0),
# [self.model_config.beam_search_size, 1])
# for o in range(self.model_config.batch_size)], axis=0)
def symbol_to_logits_fn(ids, pre_state_c, pre_state_h):
id = ids[:, -1]
inp = self.embedding_fn(id, emb_simple)
final_output, state_c, state_h, attn_dist, _ = self.decode_inputs_to_outputs(
inp, pre_state_c, pre_state_h, encoder_outputs, encoder_padding_bias, encoder_features, attn_v,
rule_id_input_placeholder, mem_contexts, mem_outputs, global_step)
logit = self.output_to_logit(final_output, w, b)
return logit, state_c, state_h, attn_dist
beam_ids, beam_score, beam_attn_distrs = beam_search(symbol_to_logits_fn,
tf.zeros([self.model_config.batch_size], tf.int32),
self.model_config.beam_search_size,
self.model_config.max_simple_sentence,
self.data.vocab_simple.vocab_size(),
self.model_config.penalty_alpha,
state[0], state[1],
model_config=self.model_config)
top_beam_ids = beam_ids[:, 0, 1:]
top_beam_ids = tf.pad(top_beam_ids,
[[0, 0],
[0, self.model_config.max_simple_sentence - tf.shape(top_beam_ids)[1]]])
decoder_target_list = [tf.squeeze(d, 1)
for d in tf.split(top_beam_ids, self.model_config.max_simple_sentence, axis=1)]
decoder_score = -beam_score[:, 0] / tf.to_float(tf.shape(top_beam_ids)[1])
top_attn_distrs = beam_attn_distrs[:, 0, 1:]
top_attn_distrs = tf.pad(top_attn_distrs,
[[0, 0],
[0, self.model_config.max_simple_sentence - tf.shape(top_attn_distrs)[1]], [0,0]])
top_attn_distrs.set_shape(
[self.model_config.batch_size, self.model_config.max_simple_sentence, self.model_config.max_complex_sentence])
gt_target_list = sentence_simple_input_placeholder
output = ModelOutput(
# contexts=cur_context if 'rule' in self.model_config.memory else None,
encoder_outputs=encoder_outputs,
# decoder_outputs_list=outputs if train_mode != 'dynamic_self-critical' else None,
# final_outputs_list=outputs if train_mode != 'dynamic_self-critical' else None,
# decoder_logit_list=logits if train_mode != 'dynamic_self-critical' else None,
gt_target_list=gt_target_list,
# encoder_embed_inputs_list=tf.unstack(encoder_embed_inputs, axis=1),
decoder_target_list=decoder_target_list,
# sample_logit_list=sampled_logit_list if train_mode == 'dynamic_self-critical' else None,
# sample_target_list=sampled_target_list if train_mode == 'dynamic_self-critical' else None,
decoder_score=decoder_score,
attn_distr_list=top_attn_distrs
)
return output
| [
"[email protected]"
] | |
d2fe44622bb24756b61b213bf3e55799154afa69 | ca752ad55da471392e8690437d9a672c9a52bf2a | /manage.py | 30ad0b0215c06bac6732623c2d633d4d66ec30aa | [] | no_license | fortable1999/zhaomengblog | 9280f8bbb1b8f8bbb8e56e26b0b7fb074e07685b | f9ee379affee99ebf8a4a6da2b322fb469451fe9 | refs/heads/master | 2021-01-10T21:30:37.297876 | 2013-07-12T13:05:47 | 2013-07-12T13:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zmblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
e7b36c21f59ad06459d4b86ada0988020ae3ef90 | 463c8ba5baad086d37819804af4ee10f43ab6dd5 | /Algorithm/190911/실수_연습문제3.py | 82ce02e942a893598cacf8b5c5949694673348f5 | [] | no_license | sooya14/TIL | dbbb0608d45ce273ddef6f7cea1b1195285f269d | 232b0d38d8f6ee2e6e5517bfd6a2a15cf1000dad | refs/heads/master | 2023-01-11T17:12:39.370178 | 2020-05-11T12:06:41 | 2020-05-11T12:06:41 | 195,916,241 | 0 | 0 | null | 2023-01-05T18:22:56 | 2019-07-09T02:17:42 | Jupyter Notebook | UTF-8 | Python | false | false | 169 | py |
def Bbit_print(i):
output = ''
for j in range(15, -1, -1):
output += '1' if i & (1 << j) else '0'
return output
a = 0xDEC
print(Bbit_print(a))
| [
"[email protected]"
] | |
ba50d67df374167270831ee86f66ac7d0f40ba3f | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/F/frabcus/p-francis.py | 16380646750da2ccabd56a5aaf6b52e69bb3b125 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | import scraperwiki
print "hello, world"
download = scraperwiki.scrape("http://un.org/")
print download
data = { 'foo': 10, 'bar': 'hello' }
scraperwiki.sqlite.save( ['foo'], data )
import scraperwiki
print "hello, world"
download = scraperwiki.scrape("http://un.org/")
print download
data = { 'foo': 10, 'bar': 'hello' }
scraperwiki.sqlite.save( ['foo'], data )
| [
"[email protected]"
] | |
511a12dcf6ad86e4c5a9d820d091e7c541027811 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03239/s021170085.py | 463b5e90f404a028b4f64d43dc4fefef0042eb47 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | def resolve():
n,T=map(int,input().split())
ans=10000
for i in range(n):
c,t=map(int,input().split())
if t<=T:
ans=min(ans,c)
if ans==10000:
print('TLE')
else:
print(ans)
if __name__ == '__main__':
resolve() | [
"[email protected]"
] | |
db0075a312c5191fbe8ab1c749b93da07077d880 | 2eeda6bfea74cf746f8223274ee9ec25b9387526 | /dgCubeDemo/testString.py | 8641d86826c9af2d47311dc85fb3b364d42349e4 | [] | no_license | PowerDG/PycharmProjects | 74f6468964d64846d8c979260a51f375e5d0476d | 74a7f18be4a7337eef546e4bf3cc6320b9f5b39d | refs/heads/master | 2022-12-09T09:25:11.993089 | 2020-09-18T13:28:00 | 2020-09-18T13:28:00 | 287,977,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# outhor:xinlan time:
# 我是中国人
"""
https://zhuanlan.zhihu.com/p/52770875
我是中国人,
那你呢
"""
str1 = '我叫 %s,俺爹是 %s' % ('小王', '老王')
print(str1)
nameStr = '马爸爸'
moneyStr = '有钱'
print('用+将字符串合并,', nameStr + moneyStr)
# === 列表----[ ]
nameList = ['猴子', '马云', '王健林', '马化腾']
nameLen = len(nameList)
print(nameLen)
nameList.append('刘强东')
print('增加1个元素的列表是', nameList)
del nameList[1]
print('删除第2个元素后的列表是', nameList)
name1 = nameList[0]
print('查询列表的第一个元素是:', name1)
print('修改之前的列表是', nameList)
nameList[0] = '孙悟空'
print('修改之后的列表是', nameList)
# === 定义元组
gafataTuple = ('腾讯', '阿里巴巴', '苹果', '谷歌', 'FB', '亚马逊')
# 元组的长度
gafataLen = len(gafataTuple)
print('元组的长度是:', gafataLen)
# 查询元组元素
print('第1个元素的值', gafataTuple[0])
# === 7、集合----花括号{ } 定义:集合(sets)是不重复的容器
#
# List有序、可重复,Set无序,不能重复的
gafataSets = {'腾讯', '阿里巴巴', '苹果', '谷歌', 'FB', '亚马 逊', '亚马逊'}
print(gafataSets)
stockSets = set()
# 使用update()增加元素
stockSets.update(['腾讯', '阿里巴巴', '京东'])
print(stockSets)
# (2)删除
stockSets.discard('京东')
print(stockSets)
# (3)查找
txBool = '京东' in stockSets
print(txBool)
# (#4)修改
stockSets.discard('京东')
stockSets.update(['京东'])
print(stockSets)
# 8、字典(映射(键值对))----{ }
# 定义字典
patientDic = {'001': ['猴子', 29, '1型糖尿病', '较差'], '002': ['马云', 34, '2型糖尿病', '好转'], '003': ['王健林', 28, '1型糖尿病', '显著好转'],
'004': ['马化腾', 52, '型糖尿病', '好转'], '005': ['王思聪', 30, '1型糖尿病', '好转']}
# === 字典的操作:
# (1)增加
print(patientDic)
# (2)删除
del patientDic['005']
print(patientDic)
# (3)查询
valueList = patientDic['001']
print(valueList)
# (4)修改
print('修改之前的,病人信息:', patientDic)
patientDic['001'] = ['猴子', 29, '1型糖尿病', '好转']
print('修改之后的,病人信息:', patientDic)
| [
"[email protected]"
] | |
480312fb4dd33c1c96bed2d89c5eda9c402cec34 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /third_party/blink/renderer/build/scripts/make_origin_trials.py | db5b93a998e28fcaa8ec63a52fc4e2229de671db | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 5,714 | py | #!/usr/bin/env python
# Copyright (C) 2015 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json5_generator
import make_runtime_features
import name_utilities
import template_expander
# We want exactly the same parsing as RuntimeFeatureWriter
# but generate different files.
class OriginTrialsWriter(make_runtime_features.BaseRuntimeFeatureWriter):
file_basename = 'origin_trials'
def __init__(self, json5_file_path, output_dir):
super(OriginTrialsWriter, self).__init__(json5_file_path, output_dir)
self._outputs = {
(self.file_basename + '.cc'): self.generate_implementation,
}
self._implied_mappings = self._make_implied_mappings()
self._trial_to_features_map = self._make_trial_to_features_map()
self._max_features_per_trial = max(
len(features) for features in self._trial_to_features_map.values())
self._set_trial_types()
@property
def origin_trial_features(self):
return self._origin_trial_features
def _make_implied_mappings(self):
# Set up the implied_by relationships between trials.
implied_mappings = dict()
for implied_feature in (feature
for feature in self._origin_trial_features
if feature['origin_trial_feature_name']
and feature['implied_by']):
# An origin trial can only be implied by other features that also
# have a trial defined.
implied_by_trials = []
for implied_by_name in implied_feature['implied_by']:
if any(implied_by_name == feature['name'].original
and feature['origin_trial_feature_name']
for feature in self._origin_trial_features):
implied_by_trials.append(implied_by_name)
# Keep a list of origin trial features implied for each
# trial. This is essentially an inverse of the implied_by
# list attached to each feature.
implied_list = implied_mappings.get(implied_by_name)
if implied_list is None:
implied_list = set()
implied_mappings[implied_by_name] = implied_list
implied_list.add(implied_feature['name'].original)
implied_feature['implied_by_origin_trials'] = implied_by_trials
return implied_mappings
def _make_trial_to_features_map(self):
trial_feature_mappings = {}
for feature in [
feature for feature in self._origin_trial_features
if feature['origin_trial_feature_name']
]:
trial_name = feature['origin_trial_feature_name']
if trial_name in trial_feature_mappings:
trial_feature_mappings[trial_name].append(feature)
else:
trial_feature_mappings[trial_name] = [feature]
return trial_feature_mappings
def _set_trial_types(self):
for feature in self._origin_trial_features:
trial_type = feature['origin_trial_type']
if feature[
'origin_trial_allows_insecure'] and trial_type != 'deprecation':
raise Exception('Origin trial must have type deprecation to '
'specify origin_trial_allows_insecure: %s' %
feature['name'])
if trial_type:
feature[
'origin_trial_type'] = name_utilities._upper_camel_case(
trial_type)
@template_expander.use_jinja('templates/' + file_basename + '.cc.tmpl')
def generate_implementation(self):
return {
'features': self._features,
'origin_trial_features': self._origin_trial_features,
'implied_origin_trial_features': self._implied_mappings,
'trial_to_features_map': self._trial_to_features_map,
'max_features_per_trial': self._max_features_per_trial,
'input_files': self._input_files,
}
if __name__ == '__main__':
json5_generator.Maker(OriginTrialsWriter).main()
| [
"[email protected]"
] | |
c4c4ccbd2c0b793fb79453c25f124a66643c2f62 | 8535bbc7781c4691880c935bd7025646f0dbb7c3 | /sum of square of digits.py | 56cc2426f15835c4fed720b0b451c4f4bf9814a9 | [] | no_license | Mahadev0317/Codekata | 3b2149f3116ebe4b48b2059b873544c27b23ff39 | c35fa0ed0c4870faea69152638f461e743a9ff69 | refs/heads/master | 2020-04-15T04:59:17.062947 | 2019-05-29T04:46:35 | 2019-05-29T04:46:35 | 164,404,727 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | n=list(input())
s=0
for i in n:
i=int(i)
s=s+(i**2)
print(s)
| [
"[email protected]"
] | |
2358045598136f87583bfb602e34f757cb5c6c6d | cff08cd0aefb52f1cf2c5a8bc5c301c6a03b886e | /maskrcnn_benchmark/config/defaults_2d.py | 672946d117c798f5610a2b65bfd98d1a35a4af03 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zhupan007/Detection_3D | 5bc0c4f185889c08a531990a257af7f3af25fafa | 2fead7b8d754912a53fed6c5826d4d898a520237 | refs/heads/master | 2020-11-28T13:48:16.763314 | 2019-11-25T11:12:47 | 2019-11-25T11:12:47 | 212,738,109 | 1 | 0 | MIT | 2019-10-04T04:51:29 | 2019-10-04T04:51:29 | null | UTF-8 | Python | false | false | 11,394 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
# -----------------------------------------------------------------------------
# Sparse 3D
# -----------------------------------------------------------------------------
_C.SPARSE3D = CN()
_C.SPARSE3D.VOXEL_SCALE = 20
_C.SPARSE3D.VOXEL_FULL_SCALE = 4096
_C.SPARSE3D.VAL_REPS = 3
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = 800 # (800,)
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [1., 1., 1.]
# Convert image to BGR format (for Caffe2 models), in range 0-255
_C.INPUT.TO_BGR255 = True
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.TRAIN = ()
# List of the dataset names for testing, as present in paths_catalog.py
_C.DATASETS.TEST = ()
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
_C.DATALOADER.SIZE_DIVISIBILITY = 0
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
_C.MODEL.BACKBONE.OUT_CHANNELS = 256 * 4
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Custom rpn head, empty to use default conv or separable conv
_C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead"
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# Whether or not resize and translate masks to the input image.
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 2500
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 16
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST.IMS_PER_BATCH = 8
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "."
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
| [
"[email protected]"
] | |
7c4beb4c63680a1ee45c5c97a528c02e1ef70d08 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 12493 stars/main.py | 779906f17e251db0fb19cfabdf8ad7b649c8263a | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import sys
import math
def primeFactors(n):
i = 2
result = []
while i <= math.sqrt(n):
while n % i == 0:
result.append(i)
n /= i
i += 1
if n != 1:
result.append(n)
result.sort()
return result
sys.stdin = open('input.txt')
while True:
try:
n = int(input())
except:
break
res = n
for f in set(primeFactors(n)):
res *= (1 - 1.0 / f)
print int(res / 2)
| [
"[email protected]"
] | |
c41013fcbae7df51d04a6e80c96ad4de24752f18 | d0aade2edd6ba5750d70c70198a4bfe16356355e | /maskrcnn_benchmark/modeling/poolers.py | 754b5147485769c9de1ff6429e4a1300a0e27231 | [
"MIT"
] | permissive | mjq11302010044/RRPN_pytorch | ca3a6b781d49b80323671581ea0a5c13ca500a7a | a966f6f238c03498514742cde5cd98e51efb440c | refs/heads/master | 2022-08-29T07:29:20.311262 | 2020-10-16T02:29:19 | 2020-10-16T02:29:19 | 184,703,273 | 305 | 68 | MIT | 2020-04-30T06:37:19 | 2019-05-03T05:28:03 | Python | UTF-8 | Python | false | false | 7,295 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign
from maskrcnn_benchmark.layers import RROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class PyramidRROIAlign(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(PyramidRROIAlign, self).__init__()
poolers = []
for scale in scales:
poolers.append(
RROIAlign(
output_size, spatial_scale=scale
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
# result = []
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level) # rois_per_level)
# result.append(pooler(per_level_feature, rois))
return result # torch.cat(result, 1)
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level)
return result
| [
"[email protected]"
] | |
ded84433bff0e82fd58f4dc304b9a645a18403dd | dfad28a2e1a0199c0117e551fd1e31804804d5b9 | /app/__init__.py | 6efcbe27d1084742b368ce958eb28dc8ecdb33de | [
"MIT"
] | permissive | wilbrone/Pitches | c33d60b142b43de9ccf60a86cf59acbc262c6711 | b20d234fd930a6551f26d9cf863c6d1631b62bc2 | refs/heads/master | 2022-12-09T08:02:08.631177 | 2019-11-25T23:47:13 | 2019-11-25T23:47:13 | 223,405,696 | 0 | 0 | MIT | 2022-12-08T06:55:48 | 2019-11-22T13:09:30 | Python | UTF-8 | Python | false | false | 1,139 | py | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_login import LoginManager
from flask_simplemde import SimpleMDE
# from flask_uploads import UploadSet,configure_uploads,IMAGES
from config import config_options
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
# photos = UploadSet('photos',IMAGES)
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
simple = SimpleMDE()
def create_app(config_name):
app = Flask(__name__)
# creating app configurations
app.config.from_object(config_options[config_name])
# initializing flask extentions
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
# registering BluePrint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix = '/authenticate')
# configure UploadSet
# configure_uploads(app,photos)
return app
| [
"[email protected]"
] | |
fa772d6c102d931e816220e31d045e9b09bf18ab | 3fd9c7ee49a32eae3013191b63154a9a5d6dafe6 | /12.6驾驶飞船/12.6.4调整飞船的速度/alien_invasion_0.10.py | 07be0e8c4159b335189b2368a83e687b00039bf0 | [] | no_license | taozhenting/alien_invasion | e0c03cd9797cb33e40ca47a13eadeda8b1c4cf85 | fd9bd97d6238da702fbb1eb6fcb78e8352875fe2 | refs/heads/master | 2020-04-27T05:31:48.862784 | 2019-01-30T09:43:49 | 2019-01-30T09:43:50 | 174,083,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | #修改while循环,每次执行循环时调用飞船方法update()
import pygame
from settings_2 import Settings
from ship_4 import Ship
import game_functions_5 as gf
def run_game():
#初始化游戏并创建一个屏幕对象
pygame.init()
#创建Settings实例存储在ai_settings变量中
ai_settings = Settings()
#使用ai_settings的属性screen_width和screen_height
screen = pygame.display.set_mode(
(ai_settings.screen_width,ai_settings.screen_height)
)
pygame.display.set_caption("Alien Invasion")
#创建一艘飞船
#需要传入实参ai_settings
ship = Ship(ai_settings,screen)
#开始游戏的循环
while True:
gf.check_events(ship)
#飞船的位置在检测到键盘事件后(但在更新屏幕前)更新。
ship.update()
gf.update_screen(ai_settings,screen,ship)
run_game() | [
"[email protected]"
] | |
584201b3a981910411696aaa3cbbeb9fa1d2944e | 8f8f40280afdd0c47fd39664b43c8fb45d86a285 | /code_sd_ssc/plot_utils.py | d11d53930b5434a904ba8695ee4e51e7e4f4b739 | [] | no_license | shibaji7/IP_Shock_SSC_SuperDARN | d463341c432e14c3007a0540ad96b4325289d6c0 | 32ea9d72d2ab68a7b80ab12f41228783370b6d4f | refs/heads/main | 2023-02-19T14:32:43.806681 | 2021-01-23T02:41:00 | 2021-01-23T02:41:00 | 303,022,533 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,803 | py | #!/usr/bin/env python
"""plot_utils.py: module is dedicated to plot different types of parameters"""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "[email protected]"
__status__ = "Research"
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, num2date
from matplotlib import patches
import matplotlib.patches as mpatches
import random
import utils
class RangeTimePlot(object):
"""
Create plots for velocity or power.
"""
def __init__(self, nrang, fig_title, num_subplots=1):
self.nrang = nrang
self.unique_gates = np.linspace(1, nrang, nrang)
self.num_subplots = num_subplots
self._num_subplots_created = 0
self.fig = plt.figure(figsize=(8, 3*num_subplots), dpi=100) # Size for website
plt.suptitle(fig_title, x=0.075, y=0.99, ha="left", fontweight="bold", fontsize=15)
mpl.rcParams.update({"font.size": 10})
return
def _tight_layout(self):
#self.fig.tight_layout(rect=[0, 0, 0.9, 0.97])
return
def show(self):
plt.show()
return
def save(self, filepath):
plt.savefig(filepath, bbox_inches="tight")
return
def close(self):
self.fig.clf()
plt.close()
return
def _add_axis(self):
self._num_subplots_created += 1
ax = self.fig.add_subplot(self.num_subplots, 1, self._num_subplots_created)
return ax
def _add_colorbar(self, fig, ax, bounds, colormap, label=""):
"""
Add a colorbar to the right of an axis.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.0125,
0.015, pos.height * 0.9] # this list defines (left, bottom, width, height
cax = fig.add_axes(cpos)
norm = mpl.colors.BoundaryNorm(bounds, colormap.N)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
ticks=bounds,
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
return
def addPlot(self, df, beam, param="v", title="", pmax=200, step=25, xlabel="Time UT"):
# add new axis
df = df[df.bmnum==beam]
self.ax = self._add_axis()
# set up variables for plotter
time = np.hstack(df["time"])
gate = np.hstack(df["slist"])
flags = np.hstack(df["v"])
bounds = list(range(-pmax, pmax+1, step))
cmap = plt.cm.jet
X, Y, Z = utils.get_gridded_parameters(df, xparam="time", yparam="slist", zparam="v")
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# cmap.set_bad("w", alpha=0.0)
# Configure axes
self.ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
hours = mdates.HourLocator(byhour=range(0, 24, 4))
self.ax.xaxis.set_major_locator(hours)
self.ax.set_xlabel(xlabel)
self.ax.set_xlim([df.time.tolist()[0], df.time.tolist()[-1]])
self.ax.set_ylabel("Range gate")
self.ax.pcolormesh(X, Y, Z.T, lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
self._tight_layout() # need to do this before adding the colorbar, because it depends on the axis position
self._add_colorbar(self.fig, self.ax, bounds, cmap, label="Velocity [m/s]")
self.ax.set_title(title, loc="left", fontdict={"fontweight": "bold"})
return
| [
"[email protected]"
] | |
f0b36e876db026e32aeb47a005d47490db58f2bd | ff23e5c890216a1a63278ecb40cd7ac79ab7a4cd | /clients/client/python/test/test_update_recovery_flow_body.py | 91fe274624feee373ef8d631fc0e1e6dd405c9ab | [
"Apache-2.0"
] | permissive | ory/sdk | fcc212166a92de9d27b2dc8ff587dcd6919e53a0 | 7184e13464948d68964f9b605834e56e402ec78a | refs/heads/master | 2023-09-01T10:04:39.547228 | 2023-08-31T08:46:23 | 2023-08-31T08:46:23 | 230,928,630 | 130 | 85 | Apache-2.0 | 2023-08-14T11:09:31 | 2019-12-30T14:21:17 | C# | UTF-8 | Python | false | false | 1,293 | py | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v1.1.51
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.update_recovery_flow_with_code_method import UpdateRecoveryFlowWithCodeMethod
from ory_client.model.update_recovery_flow_with_link_method import UpdateRecoveryFlowWithLinkMethod
globals()['UpdateRecoveryFlowWithCodeMethod'] = UpdateRecoveryFlowWithCodeMethod
globals()['UpdateRecoveryFlowWithLinkMethod'] = UpdateRecoveryFlowWithLinkMethod
from ory_client.model.update_recovery_flow_body import UpdateRecoveryFlowBody
class TestUpdateRecoveryFlowBody(unittest.TestCase):
"""UpdateRecoveryFlowBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateRecoveryFlowBody(self):
"""Test UpdateRecoveryFlowBody"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateRecoveryFlowBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
faeb98b87b529091b354db981aaab9a7664aa97d | a59d20b9918df7cc17cfaf4bd1abaa73a086eb2a | /tfcnn_retraining.py | bc37e62d3d31befff38348e8f53c5da0d95d24cf | [] | no_license | yanqinghao/TFdeeplearning | 5da5e7f4462c539ae25f29a42f2c5bc3b4abd171 | 8ec4f99b6e73b9d0866bc3706e2807cfa3229c58 | refs/heads/master | 2018-12-10T13:45:35.552852 | 2018-09-13T09:52:49 | 2018-09-13T09:52:49 | 118,085,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | import os
import tarfile
import _pickle as cPickle
import numpy as np
import urllib.request
import scipy.misc
cifar_link = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
data_dir = 'temp'
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
objects = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
target_file = os.path.join(data_dir, 'cifar-10-python.tar.gz')
if not os.path.isfile(target_file):
print('CIFAR-10 file not found. Downloading CIFAR data (Size = 163MB)')
print('This may take a few minutes, please wait.')
filename, headers = urllib.request.urlretrieve(cifar_link, target_file)
# Extract into memory
tar = tarfile.open(target_file)
tar.extractall(path=data_dir)
tar.close()
# Create train image folders
train_folder = 'train_dir'
if not os.path.isdir(os.path.join(data_dir, train_folder)):
for i in range(10):
folder = os.path.join(data_dir, train_folder, objects[i])
os.makedirs(folder)
# Create test image folders
test_folder = 'validation_dir'
if not os.path.isdir(os.path.join(data_dir, test_folder)):
for i in range(10):
folder = os.path.join(data_dir, test_folder, objects[i])
os.makedirs(folder)
def load_batch_from_file(file):
file_conn = open(file, 'rb')
image_dictionary = cPickle.load(file_conn, encoding='latin1')
file_conn.close()
return(image_dictionary)
def save_images_from_dict(image_dict, folder='data_dir'):
for ix, label in enumerate(image_dict['labels']):
folder_path = os.path.join(data_dir, folder, objects[label])
filename = image_dict['filenames'][ix]
#Transform image data
image_array = image_dict['data'][ix]
image_array.resize([3, 32, 32])
# Save image
output_location = os.path.join(folder_path, filename)
scipy.misc.imsave(output_location,image_array.transpose())
data_location = os.path.join(data_dir, 'cifar-10-batches-py')
train_names = ['data_batch_' + str(x) for x in range(1,6)]
test_names = ['test_batch']
# Sort train images
for file in train_names:
print('Saving images from file: {}'.format(file))
file_location = os.path.join(data_dir, 'cifar-10-batches-py', file)
image_dict = load_batch_from_file(file_location)
save_images_from_dict(image_dict, folder=train_folder)
# Sort test images
for file in test_names:
print('Saving images from file: {}'.format(file))
file_location = os.path.join(data_dir, 'cifar-10-batches-py', file)
image_dict = load_batch_from_file(file_location)
save_images_from_dict(image_dict, folder=test_folder)
cifar_labels_file = os.path.join(data_dir,'cifar10_labels.txt')
print('Writing labels file, {}'.format(cifar_labels_file))
with open(cifar_labels_file, 'w') as labels_file:
for item in objects:
labels_file.write("{}\n".format(item)) | [
"[email protected]"
] | |
f57e92ca341a4ef719218a9cc8d1a392d5f8ed20 | f14f48e50efb50cfe7078c68f0d61015ae2d646b | /Stock/Select/Ui/Other/DyStockSelectTestedStocksDlg.py | 2b6d429fd49050e93adc805172ad3f4f682f0fbe | [
"MIT"
] | permissive | stockcode/DevilYuan | 17a23da68954714cacae29f428c3005444e0e3a2 | 163d06cb7fd30a8f24b3f2e06206c1fd024353c3 | refs/heads/master | 2020-05-03T14:40:08.420822 | 2019-03-29T13:16:42 | 2019-03-29T13:16:42 | 178,683,886 | 2 | 1 | MIT | 2019-03-31T12:17:49 | 2019-03-31T12:17:49 | null | UTF-8 | Python | false | false | 2,126 | py | from datetime import *
import os
import re
from PyQt5.QtWidgets import QDialog, QGridLayout, QLabel, QTextEdit, QPushButton, QApplication
from DyCommon.DyCommon import *
class DyStockSelectTestedStocksDlg(QDialog):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
self._init()
self._initUi()
def _init(self):
path = DyCommon.createPath('Stock/User/Config/Testing')
self._file = os.path.join(path, 'DyStockSelectTestedStocks.dy')
def _read(self):
if os.path.exists(self._file):
with open(self._file) as f:
codes = f.read()
else:
codes = ""
return codes
def _save(self):
with open(self._file, 'w') as f:
f.write(self._codesTextEdit.toPlainText())
def _initUi(self):
self.setWindowTitle('要调试的股票')
# 控件
descriptionLabel = QLabel('要调试的股票代码')
self._codesTextEdit = QTextEdit()
self._codesTextEdit.setPlainText(self._read())
cancelPushButton = QPushButton('Cancel')
okPushButton = QPushButton('OK')
cancelPushButton.clicked.connect(self._cancel)
okPushButton.clicked.connect(self._ok)
# 布局
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(descriptionLabel, 0, 0)
grid.addWidget(self._codesTextEdit, 1, 0, 20, 10)
grid.addWidget(okPushButton, 1, 11)
grid.addWidget(cancelPushButton, 2, 11)
self.setLayout(grid)
self.resize(QApplication.desktop().size().width()//3, QApplication.desktop().size().height()//2)
def _ok(self):
# save
self._save()
# set out data
codes = re.split(',|\n| ', self._codesTextEdit.toPlainText())
temp = []
for x in codes:
if x and x not in temp: temp.append(x)
codes = [x + '.SH' if x[0] in ['6', '5'] else x + '.SZ' for x in temp]
self._data['codes'] = codes
self.accept()
def _cancel(self):
self.reject()
| [
"[email protected]"
] | |
8b7a7bf8e469f8575e9b8af31593301da9b21c06 | e57af4a840d1126e22363bd9611a40fe18093c92 | /wrappers/s2i/python/test/router-template-app/MyRouter.py | 10aa8ee91177f1fa9145641815baeec07abb43aa | [
"Apache-2.0"
] | permissive | holdenk/seldon-core | ca05e12c8e568700487f9b9e4b79a900200bd03c | de08406883850e5566b4e08af7d6484c1960bdd6 | refs/heads/master | 2020-03-07T12:47:07.530623 | 2018-03-30T10:08:42 | 2018-03-30T10:08:42 | 127,485,585 | 1 | 0 | Apache-2.0 | 2018-03-31T00:37:40 | 2018-03-31T00:37:40 | null | UTF-8 | Python | false | false | 755 | py |
class MyRouter(object):
"""
Router template.
"""
def __init__(self):
"""
Add any initialization parameters. These will be passed at runtime from the graph definition parameters defined in your seldondeployment kubernetes resource manifest.
"""
print("Initializing")
def route(self,features,feature_names):
"""
Route a request.
Parameters
----------
features : array-like
feature_names : array of feature names (optional)
"""
return 0
def send_feedback(self,features,feature_names,routing,reward,truth):
"""
Handle feedback for your routings. Optional.
"""
print("Received feedback")
| [
"[email protected]"
] | |
014442c52df6cfc9302ce48ac5b1ba27eebb94e3 | d9eef8dd3489682c8db41f2311e3058d1f369780 | /.history/abel-network-files/mcmc_alg_implementation_own_two_20180704104642.py | 20737f1c25b68caf620680915be9d652c2c31371 | [] | no_license | McKenzie-Lamb/Gerrymandering | 93fe4a49fe39a0b307ed341e46ba8620ea1225be | b7a7c4129d6b0fcd760ba8952de51eafa701eac3 | refs/heads/master | 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 | Python | UTF-8 | Python | false | false | 11,006 | py | # Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import random
import math
import numpy as np
import graph_tool.all as gt
import time
from pathlib import Path
def create_graph_views(district_total_no):
graph_views = list()
for i in range(district_total_no):
graph_view = gt.GraphView(graph)
graph_view_check = graph_view.new_vertex_property("bool")
matched_vertices = gt.find_vertex(graph, district_no, i)
for j in matched_vertices:
graph_view_check[j] = True
graph_view = gt.GraphView(graph_view, vfilt=graph_view_check)
graph_view.vp.valid = graph_view_check
graph_views.append(graph_view)
return graph_views
def turn_off_edges(districts_graphs):
turned_off_graphs = list()
# Iterate through districts and selects random edges
for district in range(len(districts_graphs)):
to_delete = districts_graphs[district].new_edge_property('bool')
edges = districts_graphs[district].get_edges()
selected = edges[np.random.randint(
edges.shape[0], size=len(edges)//2), :] # Here is the prob for edge turn off
for i in selected:
to_delete[i] = True
turned_off_graphs.append(gt.GraphView(
districts_graphs[district], efilt=to_delete))
return turned_off_graphs
def get_cp_boundaries(graph, turned_on_graphs):
cp_boundary = list()
for g in range(len(turned_on_graphs)):
cp_label, hist = gt.label_components(turned_on_graphs[g])
labels = set(cp_label.a)
for l in labels:
cp = gt.find_vertex(turned_on_graphs[g], cp_label, l)
label_boun = 0
for v in cp:
vertex_bound = False
for n in graph.vertex(v).all_neighbors():
for g_two in range(len(turned_on_graphs)):
if g == g_two:
continue
try:
turned_on_graphs[g_two].vertex(n)
except ValueError:
continue
else:
graph.vp.nd[graph.vertex(v)] = g_two
graph.vp.cd[graph.vertex(v)] = g
vertex_bound = True
break
if vertex_bound == True:
label_boun += 1
break
if label_boun == len(cp):
cp_boundary.append(cp)
return cp_boundary
def get_non_adjacent_v(labels_in_boundaries, graph):
list_to_swap = random.sample(
labels_in_boundaries, random.randint(2, len(labels_in_boundaries))) # Prob for choosing from boundaries
index_to_del = list()
for l in range(len(list_to_swap)):
for v in range(len(list_to_swap[l])):
for l_two in range(len(list_to_swap)):
if l == l_two:
continue
for v_two in range(len(list_to_swap[l_two])):
if len(gt.shortest_path(graph, graph.vertex(list_to_swap[l][v]), graph.vertex(list_to_swap[l_two][v_two]))[0]) < 3:
index_to_del.append(l)
for i in range(len(list_to_swap)):
if i in index_to_del:
try:
del list_to_swap[i]
except IndexError:
continue
return list_to_swap
def gather_districts_data(districts_graphs):
for i in range(len(districts_graphs)):
population = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["pop"] = population
districts_graphs[i].graph_properties["pop"] = 0
dem_vote = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["dem_vote"] = dem_vote
districts_graphs[i].graph_properties["dem_vote"] = 0
rep_vote = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["rep_vote"] = rep_vote
districts_graphs[i].graph_properties["rep_vote"] = 0
for v in districts_graphs[i].vertices():
districts_graphs[i].graph_properties["pop"] += graph.vp.data[v]["PERSONS"]
districts_graphs[i].graph_properties["dem_vote"] += graph.vp.data[v]["CONDEM14"]
districts_graphs[i].graph_properties["rep_vote"] += graph.vp.data[v]["CONREP14"]
return districts_graphs
def random_color():
r = random.randint(0, 256)
g = random.randint(0, 256)
b = random.randint(0, 256)
a = 1
color_to_return = [r, g, b, a]
index_to_zero = random.randint(0, 3)
color_to_return[index_to_zero] = 0
return color_to_return
def adjust_color(districts_graphs, color, ring_color, niter_type = 'first', ring_colors_dict = None):
if niter_type == 'nonfirst':
for i in range(len(districts_graphs)):
if districts_graphs[i].graph_properties["dem_vote"] > districts_graphs[i].graph_properties["rep_vote"]:
color_ = (0, 0, 255, 1)
else:
color_ = (255, 0, 0, 1)
for v in districts_graphs[i].vertices():
color[v] = color_
ring_color[v] = ring_colors_dict[i]
return color, ring_color
else:
ring_colors_dict = dict()
for i in range(len(districts_graphs)):
ring_colors_dict[i] = random_color().copy()
if districts_graphs[i].graph_properties["dem_vote"] > districts_graphs[i].graph_properties["rep_vote"]:
color_ = (0, 0, 255, 1)
else:
color_ = (255, 0, 0, 1)
for v in districts_graphs[i].vertices():
color[v] = color_
ring_color[v] = ring_colors_dict[i]
return color, ring_color, ring_colors_dict
def propose_swap(districts_graphs, proposed_components, graph, labels_in_boundaries):
changes = dict()
vertex_to_add = dict()
vertex_to_delete = dict()
for i in range(len(districts_graphs)):
changes[i] = [districts_graphs[i].graph_properties["pop"],
districts_graphs[i].graph_properties["rep_vote"],
districts_graphs[i].graph_properties["dem_vote"]]
vertex_to_add[i] = []
vertex_to_delete[i] = []
for c in proposed_components:
added_pop = 0
added_rep = 0
added_dem = 0
n_dindex = 0
c_dindex = 0
for v in range(len(c)):
added_pop += graph.vp.data[c[v]]['PERSONS']
added_rep += graph.vp.data[c[v]]['CONREP14']
added_dem += graph.vp.data[c[v]]['CONDEM14']
n_dindex = graph.vp.nd[c[v]]
c_dindex = graph.vp.cd[c[v]]
vertex_to_add[n_dindex].append(c[v])
vertex_to_delete[c_dindex].append(c[v])
changes[n_dindex][0] += added_pop
changes[n_dindex][1] += added_rep
changes[n_dindex][2] += added_dem
changes[c_dindex][0] -= added_pop
changes[c_dindex][1] -= added_rep
changes[c_dindex][2] -= added_dem
similar_pop = True
for i in changes.keys():
if i == 0:
continue
similar_pop = math.isclose(changes[i][0], changes[i-1][0], rel_tol=0.50) # Here is the population difference
if similar_pop == True:
contiguos = True
for i in changes.keys():
previous_state = districts_graphs[i].vp.valid.copy()
districts_graphs[i].graph_properties["pop"] = changes[i][0]
districts_graphs[i].graph_properties["rep_vote"] = changes[i][1]
districts_graphs[i].graph_properties["dem_vote"] = changes[i][2]
for j in vertex_to_add[i]:
if len(vertex_to_add[i]) == 0:
break
districts_graphs[i].vp.valid[j] = True
for j in vertex_to_delete[i]:
if len(vertex_to_delete[i]) == 0:
break
districts_graphs[i].vp.valid[j] = False
comp, hist = gt.label_components(districts_graphs[i])
if np.sum(comp.a) != 0:
contiguos == False
if contiguos == True:
return districts_graphs
else:
selected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
propose_swap(districts_graphs, selected_vertices, graph, labels_in_boundaries)
else:
selected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
propose_swap(districts_graphs, selected_vertices, graph, labels_in_boundaries)
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph.gt"))
color = graph.new_vertex_property("vector<double>")
ring_color = graph.new_vertex_property("vector<double>")
cp_label = graph.new_vertex_property("int")
neighbor_district = graph.new_vertex_property('int')
current_district = graph.new_vertex_property('int')
graph.vp.nd = neighbor_district
graph.vp.cd = current_district
# Init variables
district_total_no = 2
# Separates graph into blocks
districts = gt.minimize_blockmodel_dl(
graph, district_total_no, district_total_no)
district_no = districts.get_blocks()
# Create the different graphs
districts_graphs = create_graph_views(district_total_no)
# Initialize data and draw first image
districts_graphs = gather_districts_data(districts_graphs)
color, ring_color, ring_colors_dict = adjust_color(districts_graphs, color, ring_color)
gt.graph_draw(graph, vertex_fill_color = color, vertex_color = ring_color,
output = str(main_folder / 'tmp.png'), bg_color=(255, 255, 255, 1), pos=graph.vp.pos)
# Actual function calling part of algorithm
for i in range(10):
turned_on_graphs = turn_off_edges(districts_graphs)
labels_in_boundaries = get_cp_boundaries(graph, turned_on_graphs)
selected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
districts_graphs = propose_swap(districts_graphs, selected_vertices, graph, labels_in_boundaries)
start_t = time.time()
for j in districts_graphs:
comp, hist = gt.label_components(j)
if sum(comp.a) == 0:
print(True)
else:
print(False)
end_t = time.time()
print(end_t - start_t)
print('-------------')
color, ring_color = adjust_color(districts_graphs, color, ring_color, niter_type = 'nonfirst', ring_colors_dict = ring_colors_dict)
gt.graph_draw(graph, vertex_fill_color = color, vertex_color = ring_color,
output = str(main_folder / ('tmp'+str(i)+'.png')), bg_color=(255, 255, 255, 1), pos=graph.vp.pos)
| [
"[email protected]"
] | |
bbf67b6d8b12d59dd77937c8c5df126c1dd64d82 | ba198225eceb33df214400c8a19a46292c051fe2 | /conf/settings/settings_production.py | 3e2f6aff61ff45d7a83f5a749ef78ecc26392a48 | [] | no_license | puntonim/inspire-read-api | 64e39261b41c6610f8775645eab18252bbeeb6c2 | 876a053ae5ad2642911bab5387ed6139bc9b09ec | refs/heads/master | 2020-07-11T12:36:41.358661 | 2019-04-04T12:22:38 | 2019-08-26T19:07:21 | 204,540,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | """
Production settings.
"""
from .settings_base import *
DEBUG = False
ALLOWED_HOSTS = ['mysite.com']
SECRET_KEY = 'mysecretkey'
ORCID_TOKENS_ENCRYPTION_KEY = 'mykey' | [
"[email protected]"
] | |
a5af53f0ae3c8b6d8628a1c09137e61e88a8ea9d | 156b77dc620d47fa76baf9361b4ccac04a7f7995 | /FSND/projects/02_trivia_api/starter/backend/test_flaskr.py | 5adf46e7e178d309d8bf8069f3ee65a3d2663468 | [] | no_license | zyfsju/fullstack-study | cc43b18a47bdf277be3828d351624063d965723a | 1172d62689e8edf60636d548cfc89c5168296d9c | refs/heads/master | 2023-03-25T03:03:04.021634 | 2021-03-22T17:56:04 | 2021-03-22T17:56:04 | 350,438,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "trivia_test"
self.database_path = "postgres://{}/{}".format(
"localhost:5432", self.database_name
)
self.new_question = {
"question": "How far is the mooon away from the earth?",
"answer": "238,900 mi",
"category": 3,
"difficulty": 4,
}
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
"""
TODO
Write at least one test for each test for successful operation and for expected errors.
"""
def test_get_categories(self):
res = self.client().get("/categories")
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(len(data["categories"]))
def test_get_questions_success(self):
res = self.client().get("/questions?page=1")
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(len(data["questions"]))
def test_get_questions_404(self):
res = self.client().get("/questions?page=30")
self.assertEqual(res.status_code, 404)
def test_delete_question_success(self):
res = self.client().delete("/questions/9")
self.assertEqual(res.status_code, 200)
def test_delete_question_404(self):
res = self.client().delete("/questions/100")
self.assertEqual(res.status_code, 404)
def test_post_question_search(self):
res = self.client().post("/questions", json={"searchTerm": "box"})
self.assertEqual(res.status_code, 200)
def test_post_question_insert(self):
res = self.client().post("/questions", json=self.new_question)
self.assertEqual(res.status_code, 200)
def test_post_question_422(self):
res = self.client().post("/questions", json={"test": True})
self.assertEqual(res.status_code, 422)
def test_post_question_400(self):
res = self.client().post("/questions", json={})
self.assertEqual(res.status_code, 400)
def test_get_questions_by_category(self):
res = self.client().get("/categories/2/questions")
self.assertEqual(res.status_code, 200)
def test_get_questions_by_category_404(self):
res = self.client().get("/categories/tt/questions")
self.assertEqual(res.status_code, 404)
def test_get_quiz_question(self):
res = self.client().post(
"/quizzes",
json={
"previous_questions": [],
"quiz_category": {"type": "History", "id": "4"},
},
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue("question" in data.keys())
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
5d2db857d942cc972b50c1533898446b0879cf70 | 53dee1a74618e5ac343c5abe0a7a46fdf287e51e | /modules/callbacks.py | 24e779a197aeef08e887e7541a2d795fa423a938 | [] | no_license | SpencerRaw/GNCA | 250e8bc0351d121ba4c752ad07d02975f9e15e17 | f82f24925acb26c141ed02b0b0fe23ebb0878450 | refs/heads/master | 2023-08-29T10:27:08.590713 | 2021-11-08T07:52:12 | 2021-11-08T07:52:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import numpy as np
from tensorflow.keras.callbacks import Callback
from boids.evaluate_boids import evaluate_complexity
from boids.forward import forward
class ComplexityCallback(Callback):
def __init__(
self, test_every=10, n_trajectories=1, trajectory_len=1000, n_boids=100
):
super().__init__()
self.test_every = test_every
self.n_trajectories = n_trajectories
self.trajectory_len = trajectory_len
self.n_boids = n_boids
self.complexities = []
def on_epoch_begin(self, epoch, logs=None):
if self.test_every > 0 and epoch == 0:
self.evaluate_complexity()
def on_epoch_end(self, epoch, logs=None):
if self.test_every > 0 and epoch > 0 and epoch % self.test_every == 0:
self.evaluate_complexity()
def on_train_end(self, logs=None):
if self.test_every > 0:
self.complexities = np.array(self.complexities)
np.savez("complexities.npz", complexities=self.complexities)
def evaluate_complexity(self):
out = evaluate_complexity(
self.model,
forward,
self.n_trajectories,
self.trajectory_len,
self.n_boids,
)
self.complexities.append(out)
| [
"[email protected]"
] | |
bc041e4c5efd26ad99e6caa4c17e001f6f83401a | b5daf9d5525971be607e87e140c6e7575ac4a9e2 | /service-mgmt-api/sm-api/sm_api/openstack/common/cliutils.py | 4e37ff8fcfa5fda508427ff57b265d4cf6653e80 | [
"Apache-2.0"
] | permissive | starlingx/ha | 9befeac4e934a22fdeadfc35163feb37967189e0 | e35510e1cc54e83a158af9a5da3fb75ed4dd8601 | refs/heads/master | 2023-08-16T18:36:29.902446 | 2023-08-09T14:31:50 | 2023-08-10T14:52:02 | 237,515,014 | 3 | 3 | Apache-2.0 | 2021-01-14T02:49:45 | 2020-01-31T20:58:31 | C | UTF-8 | Python | false | false | 2,020 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
import inspect
class MissingArgs(Exception):
def __init__(self, missing):
self.missing = missing
def __str__(self):
if len(self.missing) == 1:
return "An argument is missing"
else:
return ("%(num)d arguments are missing" %
dict(num=len(self.missing)))
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: An argument is missing
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: 2 arguments are missing
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, 'im_self', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
| [
"[email protected]"
] | |
61bffabbd45a33440250f55fb45cacc40dd6a16c | 19a32440205b2caeec67c73c10d917b5fb30a86a | /test/test_cloud_account_create_params.py | 729711d8628bae2fcbff63c14301d27e5882ccab | [
"MIT",
"Apache-2.0"
] | permissive | marrotte/isilon_sdk_python | 480e84312f5924a506aeb09c9c7cae79a2b9b7f4 | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | refs/heads/master | 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 | MIT | 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null | UTF-8 | Python | false | false | 1,331 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.cloud_account_create_params import CloudAccountCreateParams
class TestCloudAccountCreateParams(unittest.TestCase):
""" CloudAccountCreateParams unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCloudAccountCreateParams(self):
"""
Test CloudAccountCreateParams
"""
model = swagger_client.models.cloud_account_create_params.CloudAccountCreateParams()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
0cb3fb9fc0bf5c436f79abb096e9c3c3d73b02bb | 39d4504ec1da8975fac526d6801b94f4348b6b61 | /research/object_detection/core/keypoint_ops.py | 4e34d34eccf3f3b0da87fb24dd13973323dabd23 | [
"Apache-2.0"
] | permissive | vincentcheny/models | fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad | afb1a59fc1bc792ac72d1a3e22e2469020529788 | refs/heads/master | 2020-07-23T21:38:24.559521 | 2019-11-15T07:50:11 | 2019-11-15T07:50:11 | 207,712,649 | 1 | 0 | Apache-2.0 | 2019-09-11T03:12:31 | 2019-09-11T03:12:31 | null | UTF-8 | Python | false | false | 11,522 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keypoint operations.
Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2],
where the last dimension holds rank 2 tensors of the form [y, x] representing
the coordinates of the keypoint.
"""
import numpy as np
import tensorflow as tf
def scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def clip_to_window(keypoints, window, scope=None):
"""Clips keypoints to a window.
This op clips any input keypoints to a window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ClipToWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
new_keypoints = tf.concat([y, x], 2)
return new_keypoints
def prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
def change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height,
1.0 / win_width)
return new_keypoints
def to_normalized_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts absolute keypoint coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
keypoints = keypoint_ops.to_normalized_coordinates(keypoints,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2].
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with normalized
coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, 1.0 / height, 1.0 / width)
def to_absolute_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts normalized keypoint coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum keypoint
coordinate value is larger than 1.01 (in which case coordinates are already
absolute).
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2]
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates
in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input keypoints is correct.
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater_equal(1.01, max_val),
['maximum keypoint coordinate value is larger '
'than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, height, width)
def flip_horizontal(keypoints, flip_point, flip_permutation, scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipHorizontal'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def flip_vertical(keypoints, flip_point, flip_permutation, scope=None):
"""Flips the keypoints vertically around the flip_point.
This operation flips the y coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the y coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipVertical'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
v = flip_point * 2.0 - v
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def rot90(keypoints, scope=None):
"""Rotates the keypoints counter-clockwise by 90 degrees.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Rot90'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2)
v = 1.0 - v
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
| [
"[email protected]"
] | |
f747c7fc30189608f2665557c00fa27eb5312c27 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_092/ch68_2020_05_04_19_40_02_713532.py | 94edce737f60b07ea6de9d86db9b8fbf6ef19a20 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | def separa_trios(x):
if len(x)%3 == 0:
i = 0
L1 = []
L2 = []
while i < len(x):
L1.append(x[i])
L1.append(x[i + 1])
L1.append(x[i + 2])
L2.append(L1)
L1 = []
i += 3
return L2
elif len(x)%3 == 2:
c = 0
L3 = []
L4 = []
while(c < (len(x)-2)):
L3.append(x[c])
L3.append(x[c + 1])
L3.append(x[c + 2])
L4.append(L3)
L3 = []
c += 3
L3.append(x[len(X)-1])
L3.append(x[len(x)])
L4.append(L3)
return L4
else:
j = 0
L5 = []
L6 = []
while(j < (len(x)-1)):
L5.append(x[j])
L5.append(x[j + 1])
L5.append(x[j + 2])
L6.append(L5)
L5 = []
j += 3
L5.append(x[len(x)])
L6.append(L5)
return L6 | [
"[email protected]"
] | |
ddff7707ddf45d323fdbddc7a9da8e6465da5e1d | 491c1e520a64e3ebd5349130f35047aaed1e70ec | /contest/maxAscendingSum.py | 854c5fb405b93f90d906324748c19480c1d1e40d | [] | no_license | pangyouzhen/data-structure | 33a7bd7790c8db3e018114d85a137f5f3d6b92f8 | cd46cf08a580c418cc40a68bf9b32371fc69a803 | refs/heads/master | 2023-05-26T12:02:30.800301 | 2023-05-21T08:07:57 | 2023-05-21T08:07:57 | 189,315,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | class Solution(object):
# 这个题如果改成 子序列呢
def maxAscendingSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [0] * len(nums)
dp[0] = nums[0]
for i in range(1, len(nums)):
if nums[i] > nums[i - 1]:
dp[i] = dp[i - 1] + nums[i]
else:
dp[i] = nums[i]
return max(dp)
if __name__ == '__main__':
# nums = [10, 30, 60, 60, 60, 65]
nums = [10, 20, 30, 5, 10, 50]
sol = Solution()
print(sol.maxAscendingSum(nums))
| [
"[email protected]"
] | |
0c811292eabe29cef6cd91ec494fede6e354542b | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/1d3d6a3e2d2c4b7e50ebc0f191430bfbef4fd40a-<main>-fix.py | 93eb6b2dcb9e8a30f58802955767945e9a6d77f4 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,476 | py |
def main():
module = AnsibleModule(argument_spec=dict(path=dict(type='list', required=True), format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), exclude_path=dict(type='list'), remove=dict(type='bool', default=False)), add_file_common_args=True, supports_check_mode=True)
params = module.params
check_mode = module.check_mode
paths = params['path']
dest = params['dest']
exclude_paths = params['exclude_path']
remove = params['remove']
expanded_paths = []
expanded_exclude_paths = []
format = params['format']
globby = False
changed = False
state = 'absent'
archive = False
successes = []
if ((not HAS_LZMA) and (format == 'xz')):
module.fail_json(msg='lzma or backports.lzma is required when using xz format.')
for path in paths:
path = os.path.expanduser(os.path.expandvars(path))
if (('*' in path) or ('?' in path)):
expanded_paths = (expanded_paths + glob.glob(path))
globby = True
else:
expanded_paths.append(path)
if exclude_paths:
for exclude_path in exclude_paths:
exclude_path = os.path.expanduser(os.path.expandvars(exclude_path))
if (('*' in exclude_path) or ('?' in exclude_path)):
expanded_exclude_paths = (expanded_exclude_paths + glob.glob(exclude_path))
else:
expanded_exclude_paths.append(exclude_path)
if (not expanded_paths):
return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found')
archive = (globby or os.path.isdir(expanded_paths[0]) or (len(expanded_paths) > 1))
if ((not dest) and (not archive)):
dest = ('%s.%s' % (expanded_paths[0], format))
if (archive and (not dest)):
module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
archive_paths = []
missing = []
arcroot = ''
for path in expanded_paths:
if (arcroot == ''):
arcroot = (os.path.dirname(path) + os.sep)
else:
for i in range(len(arcroot)):
if (path[i] != arcroot[i]):
break
if (i < len(arcroot)):
arcroot = os.path.dirname(arcroot[0:(i + 1)])
arcroot += os.sep
if (remove and os.path.isdir(path)):
path_dir = path
if (path[(- 1)] != '/'):
path_dir += '/'
if dest.startswith(path_dir):
module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True')
if (os.path.lexists(path) and (path not in expanded_exclude_paths)):
archive_paths.append(path)
else:
missing.append(path)
if ((len(missing) == len(expanded_paths)) and dest and os.path.exists(dest)):
if re.search('(\\.tar|\\.tar\\.gz|\\.tgz|\\.tbz2|\\.tar\\.bz2|\\.tar\\.xz|\\.zip)$', os.path.basename(dest), re.IGNORECASE):
state = 'archive'
else:
state = 'compress'
elif archive:
if (not archive_paths):
if os.path.lexists(dest):
state = 'archive'
elif missing:
state = 'incomplete'
archive = None
size = 0
errors = []
if os.path.lexists(dest):
size = os.path.getsize(dest)
if (state != 'archive'):
if check_mode:
changed = True
else:
try:
if (format == 'zip'):
arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True)
elif ((format == 'gz') or (format == 'bz2')):
arcfile = tarfile.open(dest, ('w|' + format))
elif (format == 'xz'):
arcfileIO = io.BytesIO()
arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
elif (format == 'tar'):
arcfile = tarfile.open(dest, 'w')
match_root = re.compile(('^%s' % re.escape(arcroot)))
for path in archive_paths:
if os.path.isdir(path):
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
if (not dirpath.endswith(os.sep)):
dirpath += os.sep
for dirname in dirnames:
fullpath = (dirpath + dirname)
arcname = match_root.sub('', fullpath)
try:
if (format == 'zip'):
arcfile.write(fullpath, arcname)
else:
arcfile.add(fullpath, arcname, recursive=False)
except Exception as e:
errors.append(('%s: %s' % (fullpath, to_native(e))))
for filename in filenames:
fullpath = (dirpath + filename)
arcname = match_root.sub('', fullpath)
if (not filecmp.cmp(fullpath, dest)):
try:
if (format == 'zip'):
arcfile.write(fullpath, arcname)
else:
arcfile.add(fullpath, arcname, recursive=False)
successes.append(fullpath)
except Exception as e:
errors.append(('Adding %s: %s' % (path, to_native(e))))
else:
if (format == 'zip'):
arcfile.write(path, match_root.sub('', path))
else:
arcfile.add(path, match_root.sub('', path), recursive=False)
successes.append(path)
except Exception as e:
module.fail_json(msg=('Error when writing %s archive at %s: %s' % ((((format == 'zip') and 'zip') or ('tar.' + format)), dest, to_native(e))), exception=format_exc())
if arcfile:
arcfile.close()
state = 'archive'
if (format == 'xz'):
with lzma.open(dest, 'wb') as f:
f.write(arcfileIO.getvalue())
arcfileIO.close()
if errors:
module.fail_json(msg=('Errors when writing archive at %s: %s' % (dest, '; '.join(errors))))
if ((state in ['archive', 'incomplete']) and remove):
for path in successes:
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif (not check_mode):
os.remove(path)
except OSError as e:
errors.append(path)
if errors:
module.fail_json(dest=dest, msg=('Error deleting some source files: ' + str(e)), files=errors)
if ((not check_mode) and (os.path.getsize(dest) != size)):
changed = True
if (successes and (state != 'incomplete')):
state = 'archive'
else:
path = expanded_paths[0]
if (not (os.path.exists(path) or os.path.lexists(dest))):
state = 'absent'
elif ((not os.path.lexists(path)) and os.path.lexists(dest)):
state = 'compress'
else:
if module.check_mode:
if (not os.path.exists(dest)):
changed = True
else:
size = 0
f_in = f_out = arcfile = None
if os.path.lexists(dest):
size = os.path.getsize(dest)
try:
if (format == 'zip'):
arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True)
arcfile.write(path, path[len(arcroot):])
arcfile.close()
state = 'archive'
elif (format == 'tar'):
arcfile = tarfile.open(dest, 'w')
arcfile.add(path)
arcfile.close()
else:
f_in = open(path, 'rb')
if (format == 'gz'):
f_out = gzip.open(dest, 'wb')
elif (format == 'bz2'):
f_out = bz2.BZ2File(dest, 'wb')
elif (format == 'xz'):
f_out = lzma.LZMAFile(dest, 'wb')
else:
raise OSError('Invalid format')
shutil.copyfileobj(f_in, f_out)
successes.append(path)
except OSError as e:
module.fail_json(path=path, dest=dest, msg=('Unable to write to compressed file: %s' % to_native(e)), exception=format_exc())
if arcfile:
arcfile.close()
if f_in:
f_in.close()
if f_out:
f_out.close()
if (os.path.getsize(dest) != size):
changed = True
state = 'compress'
if (remove and (not check_mode)):
try:
os.remove(path)
except OSError as e:
module.fail_json(path=path, msg=('Unable to remove source file: %s' % to_native(e)), exception=format_exc())
params['path'] = dest
file_args = module.load_file_common_arguments(params)
if (not check_mode):
changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(archived=successes, dest=dest, changed=changed, state=state, arcroot=arcroot, missing=missing, expanded_paths=expanded_paths, expanded_exclude_paths=expanded_exclude_paths)
| [
"[email protected]"
] | |
b91c102509d6cfc53f4dfb4a2f6fad0aa1418164 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_encouraging.py | 80132252a0c761c502ceab036d3d7ee9fc288904 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py |
#calss header
class _ENCOURAGING():
def __init__(self,):
self.name = "ENCOURAGING"
self.definitions = [u'making you feel more confidence or hope: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
66e2b39f73d65ebeac8acb7ee4270d2bf488b601 | 5c37dfaac3a02cc592c154bd9ed1a3fbcf5855cd | /python/cron/numpy.py | 6a5307cbf874d543abfc54dfb176cdaf496672e7 | [] | no_license | thejohnfreeman/cron | 5a91884e5ce8448e6f16b4d0823a05cc429ee37f | c2dbbb50ea8852e0f947b075e9e2f663a450fdcb | refs/heads/master | 2020-12-29T00:41:24.952795 | 2016-03-30T02:29:48 | 2016-03-30T02:30:47 | 55,026,110 | 0 | 0 | null | 2016-03-30T02:31:40 | 2016-03-30T02:31:40 | null | UTF-8 | Python | false | false | 751 | py | """
Numpy support for Cron.
Numpy support is provided in a submodule so that users who do not need it aren't
forced to import numpy.
After this module is imported, Cron types that can be used as numpy array types
will have a `dtype` attribute. For example:
>>> import numpy as np
>>> from cron import Date
>>> import cron.numpy
>>> array = np.zeroes(8, dtype=Date.dtype)
"""
#-------------------------------------------------------------------------------
import numpy
from .ext import set_up_numpy as _set_up_numpy
# Add all the numpy stuff to the extension module.
_set_up_numpy()
# FIXME: Should we put this all in a submodule?
from .ext import get_day, get_month, get_year, get_ymd, get_ymdi
from .ext import date_from_ymdi
| [
"[email protected]"
] | |
1057b2013d6364200744efd5a4918a410b79b9bf | 03a70d422855fcf2b488c5070c5ef12001143230 | /virtual/bin/django-admin | c98dbb340f1fcdb0df6f4f85e1453a849a12ae31 | [
"MIT"
] | permissive | bellahOchola/Rater | 0b5f1004552a8d966bff4092bd242834cd68564e | 0251fdc6886eb1bcbad82a7aefc61e0ecf6a0738 | refs/heads/master | 2022-12-11T01:21:39.542482 | 2020-01-17T00:33:18 | 2020-01-17T00:33:18 | 232,781,927 | 0 | 0 | null | 2022-12-08T03:26:23 | 2020-01-09T10:22:29 | Python | UTF-8 | Python | false | false | 309 | #!/home/moringa/Desktop/projects/Django/rater/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
1e30765ae403bdd8a4a0a002c1a6768b0b8fafa0 | 269f18999a158db1a8a736655b84704780439cab | /shots/migrations/0009_remove_image_tags.py | cf21e8bde8d3c97021b08bf3ed43d7db2113b507 | [] | no_license | raul-jr3/fantastic-spoon | 6c6d1a323f4d3ff1dc157c8bfd63e0cd8da41ffd | 557cb3e85d2c385b080564bdf2571bd09c3b6655 | refs/heads/master | 2021-01-02T08:37:20.087398 | 2017-09-14T09:05:13 | 2017-09-14T09:05:13 | 99,033,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shots', '0008_image_tags'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='tags',
),
]
| [
"[email protected]"
] | |
c964f1ce9a0d56b63720d461b59ad92d73476c3b | 9192a0bf95b54fcbe76573cc250e590d64328422 | /ibanking_web/ibanking/views.py | 5e90f4bacf087dda72646b04d0de445acae9a6d1 | [] | no_license | ihfazhillah/explore-scrapy | ca07b31ce4c285bef1e1a7e0f5d97da1e3018217 | 74751f687062b47e56a1466836306b84724f094e | refs/heads/master | 2022-12-10T03:00:54.083201 | 2019-02-15T00:41:38 | 2019-02-15T00:41:38 | 166,724,277 | 0 | 0 | null | 2022-12-08T01:37:37 | 2019-01-21T00:22:30 | Python | UTF-8 | Python | false | false | 1,001 | py | from uuid import uuid4
from django.shortcuts import render
from django.http import JsonResponse
from scrapyd_api import ScrapydAPI
from .models import ScrapyItem
# Create your views here.
scrapyd = ScrapydAPI('http://localhost:6800')
def get_statements(request):
unique_id = str(uuid4())
settings = {
'unique_id': unique_id
}
task = scrapyd.schedule('default', 'ibmandiri', settings=settings, to_crawl='otjlsjflask')
return JsonResponse({
'task_id': task,
'unique_id': unique_id,
'status': 'started',
'url': '/check_job?unique_id={}&task_id={}'.format(unique_id, task)
})
def check_job_get_statements(request):
task_id = request.GET.get('task_id')
unique_id = request.GET.get('unique_id')
status = scrapyd.job_status('default', task_id)
if status == 'finished':
item = ScrapyItem.objects.get(unique_id=unique_id)
return JsonResponse(item.to_dict())
return JsonResponse({'status': status})
| [
"[email protected]"
] | |
b5f279e0b892ad03556863e8883bdf0635bb56f0 | 4b86ebac6e2273bec07e8f0f1275a9f4c4700491 | /Sort/2947_나무조각.py | b14eb7a5c7ba76ac4a95e165b8923bbaf58b19a0 | [] | no_license | chohan3036/algo_study | 999d8a9d44b27100009246dcf913e07f36787295 | 64abbc8a401f9e555692f01917eb78b0fd37d7fb | refs/heads/master | 2023-04-07T06:13:16.059638 | 2021-04-20T04:32:35 | 2021-04-20T04:32:35 | 307,859,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | import sys
reading = lambda :sys.stdin.readline().strip()
N = list(map(int, reading().split()))
n = 5
for i in range(n):
for j in range(1, n):
if N[j - 1] > N[j]:
temp = N[j - 1]
N[j - 1] = N[j]
N[j] = temp
for k in N:
print(k, end=' ')
print() | [
"[email protected]"
] | |
0ab887523c36a9a3a0f880417a77e0c20e147a6d | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/6a8e7087831193afbc3e1799460614506743077b-<stack>-bug.py | 01dc6fbc3cc25cc1e58e588b0c339369943c2826 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,170 | py | def stack(self, level=(- 1), dropna=True):
"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n The new index levels are sorted.\n\n Parameters\n ----------\n level : int, str, list, default -1\n Level(s) to stack from the column axis onto the index\n axis, defined as one index or label, or a list of indices\n or labels.\n dropna : bool, default True\n Whether to drop rows in the resulting Frame/Series with\n missing values. Stacking a column level onto the index\n axis can create combinations of index and column values\n that are missing from the original dataframe. See Examples\n section.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being re-organised from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack()\n cat weight 0\n height 1\n dog weight 2\n height 3\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack()\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n\n **Prescribing the level(s) to be stacked**\n\n The first parameter controls which level or levels are stacked:\n\n >>> df_multi_level_cols2.stack(0)\n kg m\n cat height NaN 2.0\n weight 1.0 NaN\n dog height NaN 4.0\n weight 3.0 NaN\n >>> df_multi_level_cols2.stack([0, 1])\n cat height m 2.0\n weight kg 1.0\n dog height m 4.0\n weight kg 3.0\n dtype: float64\n\n **Dropping missing values**\n\n >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n Note that rows where all values are missing are dropped by\n default but this behaviour can be controlled via the dropna\n keyword parameter:\n\n >>> df_multi_level_cols3\n weight height\n kg m\n cat NaN 1.0\n dog 2.0 3.0\n >>> df_multi_level_cols3.stack(dropna=False)\n height weight\n cat kg NaN NaN\n m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n >>> df_multi_level_cols3.stack(dropna=True)\n height weight\n cat m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n "
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna) | [
"[email protected]"
] | |
2f07383bfc2b9984072cbf5c3ab0ad171fc409ae | f92fb9b9abe021d5604d3b5fb2ade0fbe6d85e3e | /robot/logging.py | 30ffb6e8cc9edea7f47e51458319b66961d6af58 | [
"MIT"
] | permissive | chuchiring/wukong-robot | a9f4656db45656962614451ebb170b4ca4573871 | f31982738df5b5c3929c713415aee04d13f2f4c8 | refs/heads/master | 2020-04-23T06:45:10.085706 | 2019-02-16T05:51:26 | 2019-02-16T05:51:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | import logging
import sys
import os
from robot import config, constants
from logging.handlers import RotatingFileHandler, HTTPHandler
PAGE = 4096
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
def tail(filepath, n=10):
"""
实现 tail -n
"""
res = ""
with open(filepath, 'rb') as f:
f_len = f.seek(0, 2)
rem = f_len % PAGE
page_n = f_len // PAGE
r_len = rem if rem else PAGE
while True:
# 如果读取的页大小>=文件大小,直接读取数据输出
if r_len >= f_len:
f.seek(0)
lines = f.readlines()[::-1]
break
f.seek(-r_len, 2)
# print('f_len: {}, rem: {}, page_n: {}, r_len: {}'.format(f_len, rem, page_n, r_len))
lines = f.readlines()[::-1]
count = len(lines) -1 # 末行可能不完整,减一行,加大读取量
if count >= n: # 如果读取到的行数>=指定行数,则退出循环读取数据
break
else: # 如果读取行数不够,载入更多的页大小读取数据
r_len += PAGE
page_n -= 1
for line in lines[:n][::-1]:
res += line.decode('utf-8')
return res
def getLogger(name):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(name)
# StreamHandler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level=logging.INFO)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# FileHandler
file_handler = RotatingFileHandler(os.path.join(constants.TEMP_PATH, 'wukong.log'), maxBytes=1024*1024,backupCount=5)
file_handler.setLevel(level=logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def readLog(lines=200):
log_path = os.path.join(constants.TEMP_PATH, 'wukong.log')
if os.path.exists(log_path):
return tail(log_path, lines)
return ''
| [
"[email protected]"
] | |
7dca2733f4c7e5b4178a1a284fd2614650b0b984 | e522c1606e06cd0a2ea82c57b72aac2e065ad429 | /programming-team/First Semester/UCB/boxes.py | 9d349284cd7c37e4a74902d185a11f42dbe71df4 | [] | no_license | Nate8888/programming-contest-practice | 80d17d7ccea9491f42ec0cccab7e0cfe80849f17 | 4c2496260c209190103d7bbef82d866ae4be09b6 | refs/heads/master | 2023-05-04T16:20:36.136598 | 2021-05-27T22:52:04 | 2021-05-27T22:52:04 | 293,109,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import sys
def k_min_max_partitions(my_list, size, k):
if k == 1:
return sum(my_list[0:size])
if size == 1:
return my_list[0]
current_min_max = 1e27
for current_index in range(1, size + 1):
current_min_max = min(current_min_max, max(k_min_max_partitions(my_list, current_index, k - 1), sum(my_list[current_index:size])))
#print(current_min_max)
return current_min_max
line = input().rstrip().split(" ")
partitions = int(line[1])
items = int(line[0])
all_nums = input().rstrip().split(" ")
nums = [int(x) for x in all_nums]
print(k_min_max_partitions(nums, items, partitions)) | [
"[email protected]"
] | |
ff3b0522f795040dd1eea2c2af80b0748c4a76eb | 151e4ab8bdcff37ded920f32250331c1edc1772d | /tlmshop/serializers/__init__.py | 87773f17382c4d432810508bc9149e2b62673837 | [] | no_license | LegionMarket/django-cms-base | 79069ee67628ff7d10338b48b154fe087863e1ea | 1b6fc3423e3d0b2165552cc980432befb496f3e0 | refs/heads/master | 2020-12-30T17:10:33.866390 | 2017-05-25T12:49:41 | 2017-05-25T12:49:41 | 91,057,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,797 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.safestring import mark_safe
from rest_framework import serializers
from shop.serializers.bases import ProductSerializer
from shop.search.serializers import ProductSearchSerializer as BaseProductSearchSerializer
from tlmshop.search_indexes import tlmshop_search_index_classes
__all__ = ['ProductSummarySerializer', 'ProductSearchSerializer', 'CatalogSearchSerializer']
class ProductSummarySerializer(ProductSerializer):
media = serializers.SerializerMethodField()
class Meta(ProductSerializer.Meta):
fields = ['id', 'product_name', 'product_url', 'product_model', 'price', 'media', 'caption']
def get_media(self, product):
return self.render_html(product, 'media')
if settings.SHOP_TYPE in ['commodity', 'i18n_commodity']:
class ProductDetailSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
fields = ['product_name', 'slug', 'unit_price', 'product_code']
__all__.append('ProductDetailSerializer')
elif settings.SHOP_TYPE in ['smartcard', 'i18n_smartcard']:
class ProductDetailSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
fields = ['product_name', 'slug', 'unit_price', 'manufacturer', 'card_type', 'speed',
'product_code', 'storage']
__all__.append('ProductDetailSerializer')
elif settings.SHOP_TYPE in ['i18n_polymorphic', 'polymorphic']:
from .polymorphic import (SmartCardSerializer, SmartPhoneSerializer, AddSmartPhoneToCartSerializer)
__all__.extend(['SmartCardSerializer', 'SmartPhoneSerializer', 'AddSmartPhoneToCartSerializer'])
class ProductSearchSerializer(BaseProductSearchSerializer):
"""
Serializer to search over all products in this shop
"""
media = serializers.SerializerMethodField()
class Meta(BaseProductSearchSerializer.Meta):
fields = BaseProductSearchSerializer.Meta.fields + ['media', 'caption']
field_aliases = {'q': 'text'}
search_fields = ['text']
index_classes = tlmshop_search_index_classes
def get_media(self, search_result):
return mark_safe(search_result.search_media)
class CatalogSearchSerializer(BaseProductSearchSerializer):
"""
Serializer to restrict products in the catalog
"""
media = serializers.SerializerMethodField()
class Meta(BaseProductSearchSerializer.Meta):
fields = BaseProductSearchSerializer.Meta.fields + ['media', 'caption']
field_aliases = {'q': 'autocomplete'}
search_fields = ['autocomplete']
index_classes = tlmshop_search_index_classes
def get_media(self, search_result):
return mark_safe(search_result.catalog_media)
| [
"[email protected]"
] | |
b0c652a0481b0f73ad11a84bb549814dfbbdff09 | da6fe795eec82abddcc2590696e3cdc0cf479445 | /code/repository.py | b2feefb2acca4238608de76597c9ae80f8a55211 | [] | no_license | techknowledgist/tgist-features | d0b37228ea65d22dec08983d259f2de92dbf02a2 | 994b371e68030f462074ea9a9c32a7f3ddc394f4 | refs/heads/master | 2021-03-24T12:59:37.417498 | 2019-05-14T14:09:20 | 2019-05-14T14:09:20 | 114,810,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,635 | py | """repository.py
Code to build, manage and access repositories.
A repository needs to be initialized once. Once you have a repository you can do
several things:
1- adding all files from a corpus (both sources and processed files)
2- adding sources from a list of files
3- querying the repository for data
4- analyzing the repository
The above functionalities are in varying stages of (non) completion. See below
for more details.
USAGE:
$ python repository.py --initialize --repository PATH --type STRING
$ python repository.py --add-corpus --repository PATH --corpus PATH
$ python repository.py --analyze --repository PATH
There are short versions for some of the options: --repository (-r),
--type (-t) and --corpus (-c)
INITIALIZATION
To initialize a repository:
$ python repository.py --initialize --repository test --type patents
The --type argument specifies the repository type, which is one of 'patents',
'cnki' or 'pubmed'. After a repository is initialized it has the following
structure:
type.txt
documents/
logs/
index/
idx-dates.txt
idx-identifiers.txt
idx-files.txt
idx-processed-d1_txt.txt
idx-processed-d2_seg.txt
idx-processed-d2_tag.txt
idx-processed-d3_phr_feats.txt
data/
sources/
processed/
All directories are empty, except for index/, which has a couple of index files
that are all empty (because the repository is still empty). Other directories
may be added to repositories, for example a directory with scripts for local
repository-spefici processing or a directory with lists of files. Other index
files may be added over time, some specific to particular repository types. The
type.txt file contains the type of the repository, which is used each time the
repository is opened.
TODO: add an SQLite database that replaces all the index files. The index files
could still be kept around as a kind of journal files.
There are several kinds of repositories, each making different assumptions on
identifiers:
PatentRepository (--type patents)
CnkiRepository (--type cnki)
PubmedRepository (--type pubmed)
A PatentRepository assumes that the basename of a file is a unique file and
stores patents using that uniqueness. It is the only repository type that exists
at this point. When more types come into being a new option will be added.
The index files contain a list of records with the following fields (this is for
patents):
idx-ids.txt:
numerical identifier
idx-files.txt:
timestamp
numerical identifier
size of source file (compressed)
path in repository
idx-dates:
not yet used
idx-processed-X.txt:
timestamp
git commit
numerical id
options (zero or more columns)
ADDING CORPUS DATA
To add all data from a corpus:
$ python repository.py --add-corpus -r test -c data/patents/corpora/sample-us
Existing data will not be overwritten. At some point we may add a flag that
allows you to overwrite existing data.
More corpora can be added later. This needs to be done one corpus at a time, no
conccurrency is implemented.
During a corpus load, source files are added to /data/sources and processed
files to data/processed. For patents, the directory strcuture in data/sources is
generated from the filename, for the sample corpus we end up with the following
files in a three-level directory structure:
data/sources/42/365/96/US4236596A.xml.gz
data/sources/42/467/08/US4246708A.xml.gz
data/sources/42/543/95/US4254395A.xml.gz
data/sources/41/927/70/US4192770A.xml.gz
This structure is mirrored under data/processed, with the addition of the name
of the processing step (d1_txt, d2_seg, d2_tag or d3_phr_feats), for example,
for the tagged files we get:
data/processed/d2_tag/42/365/96/US4236596A.xml.gz
data/processed/d2_tag/42/467/08/US4246708A.xml.gz
data/processed/d2_tag/42/543/95/US4254395A.xml.gz
data/processed/d2_tag/41/927/70/US4192770A.xml.gz
Corpus loads also update the index files and add a time-stamped log file to the
logs directory.
USING FROM OTHER SCRIPTS
There are a couple of utility methods intended to be used from other
scripts. These methods convert between paths and identifiers:
filepath2longid(path) returns a long identifier
filepath2shortid(path) returns a short identifier
longid2filepath(longid) returns a path to a source file
longid2filepath(longid, step) returns a path to a processed file
shortid2filepath(sortid) returns a path to a source file
shortid2filepath(shortid, step) returns a path to a processed file
longid2shortid(longid) returns a short identifier
Methods that return paths will return None if the path does not exist.
What is returned is actually the path with the .gz extension stripped off. This
is so the result works nicely with utils.path.open_input_file(), which does not
expect the .gz extension.
These methods are now only implemented for PatentRepository, they may or may not
make sense for other repository classes.
Examples:
import repository
repo = repository.open_repository('ln-us')
print repo.filepath2longid('57/238/US5723853A.xml')
print repo.filepath2shortid('57/238/US5723853A.xml')
print repo.longid2filepath('US5723853A.xml')
print repo.longid2filepath('US5723853A.xml', 'd2_tag')
print repo.shortid2filepath('5723853')
print repo.shortid2filepath('5723853', 'd2_tag')
print repo.longid2shortid('US5723853A.xml')
"""
import os, sys, re, time, shutil, getopt, glob
from config import DEFAULT_PIPELINE
from corpus import Corpus
from utils.path import compress, ensure_path, read_only, make_writable
REPOSITORY_DIR = '/home/j/corpuswork/fuse/FUSEData/repositories'
REPOSITORY_TYPES = ('patents', 'pubmed', 'cnki')
re_PATENT = re.compile('^(B|D|H|HD|RE|PP|T)?(\d+)(.*)')
PROCESSING_STEPS = ('d1_txt', 'd2_seg', 'd2_tag', 'd3_phr_feats')
class RepositoryError(Exception):
def __init__(self, value): self.value = value
def __str__(self): return repr(self.value)
def create_repository(repository, repotype="patents"):
"""Creates a generic repository and initializes a skeleton directory
structure on disk. This method is solely intended to create the repository
on disk. The resulting generic repository object is not meant to be used for
further processing and it is therefore not returned."""
if os.path.exists(repository):
exit("WARNING: cannot create repository, directory already exists")
if not repotype in REPOSITORY_TYPES:
exit("WARNING: unkown repository type")
os.makedirs(repository)
repo = Repository(repository)
repo.create_skeleton_on_disk(repotype)
def open_repository(repository):
repository = validate_location(repository)
with open(os.path.join(repository, 'type.txt')) as fh:
repotype = fh.read().strip()
if repotype == 'patents':
return PatentRepository(repository)
elif repotype == 'pubmed':
print "Not yet implemented"
elif repotype == 'cnki':
print "Not yet implemented"
else:
print "Unknown repository type"
class Repository(object):
def __init__(self, dirname):
"""The argument is a relative or absolute path to a repository. Often,
this is a directory insize of REPOSITORY_DIR, which is the standard
location of all repositories."""
dirname = validate_location(dirname)
self.dir = dirname
self.type_file = os.path.join(self.dir, 'type.txt')
self.idx_dir = os.path.join(self.dir, 'index')
self.doc_dir = os.path.join(self.dir, 'documents')
self.log_dir = os.path.join(self.dir, 'logs')
self.data_dir = os.path.join(self.dir, 'data','sources')
self.proc_dir = os.path.join(self.dir, 'data', 'processed')
self.idx_ids = os.path.join(self.idx_dir, 'idx-ids.txt')
self.idx_files = os.path.join(self.idx_dir, 'idx-files.txt')
self.idx_dates = os.path.join(self.idx_dir, 'idx-dates.txt')
def __str__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.dir)
def create_skeleton_on_disk(self, repotype):
"""Initialize directory structure and files on disk. The only file with
content is type.txt which stores the repository type."""
for d in (self.doc_dir, self.log_dir, self.idx_dir,
self.data_dir, self.proc_dir):
os.makedirs(d)
with open(self.type_file, 'w') as fh:
fh.write("%s\n" % repotype)
read_only(self.type_file)
for fname in self._index_files():
open(fname, 'w').close()
read_only(fname)
def _index_files(self):
"""Return a list of all index files."""
fnames = [self.idx_ids, self.idx_files, self.idx_dates]
for step in PROCESSING_STEPS:
fnames.append("%s%sidx-processed-%s.txt" % (self.idx_dir, os.sep, step))
return fnames
def read_identifiers(self):
"""Return a dictionary with as keys all the identifiers in the
repository. This works for now with smaller repositories, but we may
need to start using an SQLite database."""
identifiers = {}
fh = open(self.idx_ids)
for line in fh:
identifiers[line.strip()] = True
return identifiers
def filepath2longid(self, path): raise RepositoryError("not yet implemented")
def filepath2shortid(self, path): raise RepositoryError("not yet implemented")
def longid2filepath(self, id): raise RepositoryError("not yet implemented")
def shortid2filepath(self, id): raise RepositoryError("not yet implemented")
def longid2shortid(self, id): raise RepositoryError("not yet implemented")
def analyze(self):
files = 0
size = 0
for line in open(self.idx_files):
(timestamp, id, filesize, path) = line.rstrip().split("\t")
files += 1
size += int(filesize)
print self
print " %6sMB - source size (compressed)" % (size/1000000)
print " %8s - number of files" % files
def load_index(self):
self.index = RepositoryIndex(self)
def get(self, identifier, step=None):
self.index.get(identifier, step)
class PatentRepository(Repository):
def add_corpus(self, corpus_path, language='en', datasource='ln',
limit=sys.maxint):
"""Adds corpus data to the repository, both the source data taken from
the external source of the corpus and the processed files. Updates the
list of identifiers, the list of files and the list of processed files
in the index by appending elements to the end of those files. Only adds
patents that are not in the identifier list.
The input corpus is expected to be one of the corpora created by
step1_init.py and step2_process.py.
Unlike with creating corpora, where we can create and process various
corpora on different cores/machines, this method should never be run in
parallel, always import one corpus at a time and wait for it to
finish."""
current_identifiers = self.read_identifiers()
corpus = CorpusInterface(language, datasource, corpus_path)
self._open_index_files()
logfile = os.path.join(self.log_dir, "add-corpus-%s.txt" % timestamp())
self.log = open(logfile, 'w')
log("Adding corpus %s" % corpus_path, self.log, notify=True)
c = 0
t1 = time.time()
added = 0
try:
for line in open(corpus.file_list):
c += 1
if c % 100 == 0: print c
if c > limit: c -= 1; break
added_p = self._add_patent(current_identifiers, corpus, c, line)
if added_p:
added += 1
except:
log("An Exception occurred - exiting...", self.log, notify=True)
finally:
log("Added %d out of %d in %d seconds" % (added, c, time.time() - t1),
self.log, notify=True)
self._close_log()
self._close_index_files()
def _open_index_files(self):
for fname in self._index_files():
make_writable(fname)
self.fh_ids = open(self.idx_ids, 'a')
self.fh_files = open(self.idx_files, 'a')
self.fh_dates = open(self.idx_dates, 'a')
self.fh_processed = {}
for step in PROCESSING_STEPS:
fname = "%s%sidx-processed-%s.txt" % (self.idx_dir, os.sep, step)
self.fh_processed[step] = open(fname, 'a')
def _close_log(self):
logname = self.log.name
self.log.close()
read_only(logname)
def _close_index_files(self):
fhs = [self.fh_ids, self.fh_files, self.fh_dates] + self.fh_processed.values()
for fh in fhs:
fname = fh.name
fh.close()
read_only(fname)
def _add_patent(self, current_identifiers, corpus, c, line):
"""Add a document source and the processed files. Do nothing if source
cannot be found or if source is already in the list of current
identifiers. Return True is data were added, false otherwise."""
# TODO: there is a nasty potential problem here. Suppose we are
# processing a file in a corpus and that file was in another corpus that
# we imported before. And suppose that in the erarlier corpus this file
# was not processed and it was processed in the later corpus. In that
# case the processed files are not copied. Even if this occurs, it will
# not impact the integrity of the repository.
(external_source, local_source) = get_filelist_paths(line)
external_source = validate_filename(external_source)
if external_source is None:
log("%05d WARNING, source not available for %s" % (c, external_source),
self.log, notify=True)
return False
(id, path, basename) = parse_patent_path(external_source)
if id in current_identifiers:
log("%05d Skipping %s" % (c, external_source), self.log)
return False
else:
t = timestamp()
log("%05d Adding %s to %s" % (c, external_source, os.sep.join(path)),
self.log)
self._add_patent_source(t, external_source, id, path, basename)
self._add_patent_processed(t, corpus, local_source, id, path, basename)
return True
def _add_patent_source(self, t, source, id, path, basename):
path = os.sep.join(path)
target_dir = os.path.join(self.data_dir, path)
target_file = os.path.join(target_dir, basename)
copy_and_compress(source, target_dir, target_file)
size = get_file_size(target_file)
self.fh_ids.write("%s\n" % id)
self.add_entry_to_file_index(t, id, size, path, basename)
def _add_patent_processed(self, t, corpus, local_source, id, path, basename):
for step in PROCESSING_STEPS:
fname = get_path_of_processed_file(corpus, step, local_source)
if fname is not None:
target_dir = os.path.join(self.proc_dir, step, os.sep.join(path))
target_file = os.path.join(target_dir, basename)
log(" Adding %s" % target_file, self.log)
copy_and_compress(fname, target_dir, target_file)
#self.add_entry_to_processed_index(t, id, step, corpus.git_commits)
self.add_entry_to_processed_index(t, id, step, corpus)
def add_entry_to_file_index(self, t, id, size, path, basename):
if basename.endswith('.gz'):
basename = basename[:-3]
self.fh_files.write("%s\t%s\t%d\t%s%s%s\n" %
(t, id, size, path, os.sep, basename))
def add_entry_to_processed_index(self, t, id, step, corpus):
commit = corpus.git_commits.get(step)
options = "\t".join(corpus.options[step])
if options: options = "\t" + options
self.fh_processed[step].write("%s\t%s\t%s%s\n" % (t, commit, id, options))
def filepath2longid(self, path):
return os.path.basename(path)
def filepath2shortid(self, path):
return self.longid2shortid(os.path.basename(path))
def longid2filepath(self, id, step='source'):
base_dir = self.data_dir
if not step == 'source':
if not step in PROCESSING_STEPS:
raise RepositoryError("illegal step - %s" % step)
base_dir = os.path.join(self.proc_dir, step)
path = os.path.join(
base_dir, os.sep.join(patentid2path(self.longid2shortid(id))), id)
return path if os.path.exists(path + '.gz') else None
def shortid2filepath(self, id, step='source'):
# use repo info to get full path
# use glob to get actual path
base_dir = self.data_dir
if not step == 'source':
if not step in PROCESSING_STEPS:
raise RepositoryError("illegal step - %s" % step)
base_dir = os.path.join(self.proc_dir, step)
path = os.path.join(
base_dir, os.sep.join(patentid2path(self.longid2shortid(id))), "*%s*.xml.gz" % id)
#return path
matches = glob.glob(path)
return matches[0] if len(matches) == 1 else None
def longid2shortid(self, id):
return get_patent_id(id)[2]
class RepositoryIndex(object):
"""An object like this will probably be needed at some point."""
def __init__(self, repository):
self.repository = repository
self.index = {}
c = 0
for line in open(self.repository.idx_files):
c += 1
if c > 10: break
(timestamp, id, size, path) = line.split()
def get(self, identifier):
"""Return all information associated with an identifier."""
pass
class CorpusInterface(object):
"""Object that acts as an intermediary to a corpus. It emulates part of the
interface of corpus.Corpus (location and file_list instance variables and
adds information on commits in self.git_commits and processing step options
in self.options."""
def __init__(self, language, datasource, corpus_path):
self.corpus = Corpus(language, datasource, None, None, corpus_path,
DEFAULT_PIPELINE, False)
self.location = self.corpus.location
self.file_list = self.corpus.file_list
self._collect_git_commits()
self._collect_step_options()
def _collect_git_commits(self):
"""Collect the git commits for all steps in the processing chain."""
self.git_commits = {}
for step in PROCESSING_STEPS:
commit = self._find_git_commit_from_processing_history(step)
self.git_commits[step] = commit
def _find_git_commit_from_processing_history(self, step):
"""Returns the git commit for the code that processed this step of the
corpus. Returns the earliest commit as found in the corpus."""
fname = os.path.join(self.location, 'data', step, '01', 'state',
'processing-history.txt')
if os.path.exists(fname):
line = open(fname).readline()
return line.rstrip("\n\r\f").split("\t")[3]
return None
def _collect_step_options(self):
self.options = {}
for step in PROCESSING_STEPS:
fname = os.path.join(self.location, 'data', step, '01', 'config', 'pipeline-head.txt')
if not os.path.exists(fname):
options = []
else:
content = open(fname).readline().split()
options = content[1:]
self.options[step] = options
def timestamp():
return time.strftime("%Y%m%d:%H%M%S")
def get_filelist_paths(line):
""" a file list has two or three columns, the second is the full path to the
source and the third, if it exists, is the local path.By default the local
path is a full copy of the source path. Return the source path and resolved
local path."""
fields = line.rstrip("\n\r\f").split("\t")
source = fields[1]
local_source = fields[2] if len(fields)> 2 else source
return (source, local_source)
def get_path_of_processed_file(corpus, step, local_source):
"""Build a full file path given a corpus, a processing step and a local
source file"""
fname = os.path.join(corpus.location,
'data', step, '01', 'files', local_source)
return validate_filename(fname)
def validate_filename(fname):
"""Validate the filename by checking whether it exists, potentially in
gzipped form. Return the actual filename or return None if there was no
file. Note that fname typically never includes the .gz extension"""
if os.path.exists(fname + '.gz'): return fname + '.gz'
elif os.path.exists(fname): return fname
else: return None
def get_file_size(fname):
"""Returns the size of fname if it exists, or else the size of fname.gz."""
if not os.path.exists(fname):
fname += '.gz'
return os.path.getsize(fname)
def parse_patent_path(path):
"""take a source path (of which only the basename is relevant) and get the
identifier, prefix, kind (a1, A2..) and the path as used in the
repository."""
basename = os.path.basename(path)
basename_bare = os.path.splitext(basename)[0]
prefix, kind, id = get_patent_id(basename_bare)
path = patentid2path(id)
return id, path, basename
def get_patent_id(basename):
"""Get the prefix, kind code and unique numerical identifier from the file
name. Prefix and kind code can both be empty strings."""
id = os.path.splitext(basename)[0]
if id.startswith('US'):
id = id[2:]
result = re_PATENT.match(id)
if result is None:
print "WARNING: no match on", basename
return None
prefix = result.groups()[0]
kind_code = result.groups()[-1]
number = ''.join([g for g in result.groups()[:-1] if g is not None])
return (prefix, kind_code, number)
def patentid2path(id):
"""Generate a pathname from the patent identifier. Split the patent
identifier into three parts of a path. The first part of the path is either
a year, one or two letters (D, PP, T etcetera), The last part is the last
two digits of the identifier, which ensures that the deepest directories in
the tree never have more that 100 patents. The middle part is what remains
of the indentifier. Only the first and middle parts are returned"""
if id[:2] in ('19', '20'):
return id[:4], id[4:-2]
elif id[0].isalpha() and id[1].isalpha():
return id[:2], id[2:-2]
elif id[0].isalpha():
return id[:1], id[1:-2]
else:
return id[:2], id[2:-2]
def copy_and_compress(source, target_dir, target_file):
"""Copy source to target file, making sure there is a directory. Compress
the new file."""
ensure_path(target_dir)
shutil.copyfile(source, target_file)
compress(target_file)
def log(text, fh, notify=False):
fh.write("%s\n" % text)
if notify:
print text
def check_repository_existence(repository):
if not os.path.exists(repository):
exit("WARNING: repository '%s' does not exist" % repository)
def validate_location(path):
if os.path.isdir(os.path.join(REPOSITORY_DIR, path)):
return os.path.join(REPOSITORY_DIR, path)
if os.path.isdir(path):
return path
exit("WARNING: repository '%s' does not exist" % path)
### Some tests I ran before creating the repository code
LISTS_DIR = '/home/j/corpuswork/fuse/FUSEData/lists'
IDX_FILE = LISTS_DIR + '/ln_uspto.all.index.txt'
UPDATES_FILE = LISTS_DIR + '/ln-us-updates-2014-09-23-scrambled-basename.txt'
def test_filenames(fname):
"""Checks whether all the numbers we extract from the filenames are
unique. This is the case for IDX_FILE."""
number2name = {}
number2path = {}
fh = open(fname)
basedir = fh.readline()
c = 1
for line in fh:
c += 1
if c % 100000 == 0: print c
#if c > 100000: break
(number, adate, pdate, path) = line.rstrip("\n\f\r").split("\t")
name = os.path.splitext(os.path.basename(path))[0]
if number2name.has_key(number):
print "Warning, duplicate number", number, path
number2name.setdefault(number,[]).append(name)
number2path.setdefault(number,[]).append(path)
def test_filename_lengths(fname):
"""Checks lengths of file names."""
lengths = {}
fh = open(fname)
basedir = fh.readline()
c = 1
fhs = { 5: open('lengths-05.txt', 'w'),
6: open('lengths-06.txt', 'w'),
7: open('lengths-07.txt', 'w'),
8: open('lengths-08.txt', 'w'),
11: open('lengths-11.txt', 'w'),
12: open('lengths-12.txt', 'w') }
for line in fh:
c += 1
if c % 100000 == 0: print c
#if c > 100000: break
(number, adate, pdate, path) = line.rstrip("\n\f\r").split("\t")
name = os.path.splitext(os.path.basename(path))[0]
number_length = len(number)
lengths[number_length] = lengths.get(number_length,0) + 1
fhs[number_length].write("%s\n" % name)
print lengths
def test_directory_structure():
"""Some experiments to see how to do the directory structure of the
repository. The goal is to have the filenumber reflected in the path in a
predicatble manner. It looks like having a three-deep structure works
nicely. The deepest level just encodes the last two numbers of the number
(so no more than 100 documents in the leaf directories). Then the first is
either a year, one or two letters, or two numbers. The middle directory is
whatever remains of the filename."""
fh = open('tmp-patnums.txt')
fh.readline()
dirs = {}
c = 0
for line in fh:
c += 1
if c % 100000 == 0: print c
num = line.strip()
(dir1, dir2, dir3) = patentid2path(num)
dirs.setdefault(dir1,{})
dirs[dir1][dir2] = dirs[dir1].get(dir2,0) + 1
if not dir1 and dir2 and dir3:
print num, dir1, dir2, dir3
for dir1 in sorted(dirs):
print '>', dir1, len(dirs[dir1])
for dir2 in sorted(dirs[dir1]):
print ' ', dir2, dirs[dir1][dir2]
def test_compare_lists(list1, list2):
"""list1 is an index with four columns, list2 just has the one column."""
in1 = open(list1)
in2 = open(list2)
out1 = open("out-in-repo.txt", 'w')
out2 = open("out-not-in-repo.txt", 'w')
basedir = in1.readline()
repo = {}
c = 1
for line in in1:
c += 1
if c % 100000 == 0: print c
repo[line.split("\t")[0]] = True
in_repo = 0
not_in_repo = 0
c = 0
for line in in2:
c += 1
if c % 100000 == 0: print c
basename = line.strip()
(prefix, code, id) = get_patent_id(basename)
if id in repo:
in_repo += 1
out1.write("%s\n" % basename)
else:
not_in_repo += 1
out2.write("%s\n" % basename)
print 'in_repo', in_repo
print 'not_in_repo', not_in_repo
if __name__ == '__main__':
options = ['initialize', 'add-corpus', 'analyze', 'repository=', 'type=', 'corpus=']
(opts, args) = getopt.getopt(sys.argv[1:], 'r:t:c:', options)
init_p = False
add_corpus_p = False
analyze_p = False
repository = None
repotype = 'patents'
corpus = None
for opt, val in opts:
if opt == '--initialize': init_p = True
if opt == '--add-corpus': add_corpus_p = True
if opt == '--analyze': analyze_p = True
if opt in ('-r', '--repository'): repository = val
if opt in ('-t', '--type'): repotype = val
if opt in ('-c', '--corpus'): corpus = val
if repository is None:
exit("WARNING: missing repository argument")
if init_p:
print "Initializing repository '%s'" % repository
create_repository(repository, repotype)
elif add_corpus_p:
if corpus is None:
exit("WARNING: missing corpus argument")
open_repository(repository).add_corpus(corpus)
elif analyze_p:
print "Analyzing repository '%s'" % repository
open_repository(repository).analyze()
| [
"[email protected]"
] | |
ad14696ccc3a66d98529f577771fe50ff92df097 | 8038c72e9c05b9a962a78002e1ac32ac5797d33f | /qfcommon/base/dbpool.py | 079fa5d9342b7d212f06955af235fb6c82168436 | [] | no_license | liusiquan/open_test | faaf419dcd5877558e25847edf5e128c5e9e64b2 | 0cb0aaf401234ac3274d34dcf08ff633917db048 | refs/heads/master | 2021-01-11T20:44:25.567470 | 2017-01-18T10:13:54 | 2017-01-18T10:13:54 | 79,174,997 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 43,385 | py | # coding: utf-8
import time, datetime, os
import types, random
import threading
import logging
import traceback
import pager
from contextlib import contextmanager
log = logging.getLogger()
dbpool = None
def timeit(func):
def _(*args, **kwargs):
starttm = time.time()
ret = 0
num = 0
err = ''
try:
retval = func(*args, **kwargs)
t = type(retval)
if t == types.ListType:
num = len(retval)
elif t == types.DictType:
num = 1
return retval
except Exception, e:
err = str(e)
ret = -1
raise
finally:
endtm = time.time()
conn = args[0]
#dbcf = conn.pool.dbcf
dbcf = conn.param
log.info('server=%s|name=%s|user=%s|addr=%s:%d|db=%s|idle=%d|busy=%d|max=%d|time=%d|ret=%s|num=%d|sql=%s|err=%s',
conn.type, conn.name, dbcf.get('user',''),
dbcf.get('host',''), dbcf.get('port',0),
os.path.basename(dbcf.get('db','')),
len(conn.pool.dbconn_idle),
len(conn.pool.dbconn_using),
conn.pool.max_conn,
int((endtm-starttm)*1000000),
str(ret), num, repr(args[1]), err)
return _
class DBPoolBase:
def acquire(self, name):
pass
def release(self, name, conn):
pass
class DBResult:
def __init__(self, fields, data):
self.fields = fields
self.data = data
def todict(self):
ret = []
for item in self.data:
ret.append(dict(zip(self.fields, item)))
return ret
def __iter__(self):
for row in self.data:
yield dict(zip(self.fields, row))
def row(self, i, isdict=True):
if isdict:
return dict(zip(self.fields, self.data[i]))
return self.data[i]
def __getitem__(self, i):
return dict(zip(self.fields, self.data[i]))
class DBFunc:
def __init__(self, data):
self.value = data
class DBConnection:
def __init__(self, param, lasttime, status):
self.name = None
self.param = param
self.conn = None
self.status = status
self.lasttime = lasttime
self.pool = None
self.server_id = None
def __str__(self):
return '<%s %s:%d %s@%s>' % (self.type,
self.param.get('host',''), self.param.get('port',0),
self.param.get('user',''), self.param.get('db',0)
)
def is_available(self):
return self.status == 0
def useit(self):
self.status = 1
self.lasttime = time.time()
def releaseit(self):
self.status = 0
def connect(self):
pass
def close(self):
pass
def alive(self):
pass
def cursor(self):
return self.conn.cursor()
@timeit
def execute(self, sql, param=None):
#log.info('exec:%s', sql)
cur = self.conn.cursor()
if param:
if not isinstance(param, (types.DictType, types.TupleType)):
param = tuple([param])
ret = cur.execute(sql, param)
else:
ret = cur.execute(sql)
cur.close()
return ret
@timeit
def executemany(self, sql, param):
cur = self.conn.cursor()
if param:
if not isinstance(param, (types.DictType, types.TupleType)):
param = tuple([param])
ret = cur.executemany(sql, param)
else:
ret = cur.executemany(sql)
cur.close()
return ret
@timeit
def query(self, sql, param=None, isdict=True, head=False):
'''sql查询,返回查询结果'''
#log.info('query:%s', sql)
cur = self.conn.cursor()
if not param:
cur.execute(sql)
else:
if not isinstance(param, (types.DictType, types.TupleType)):
param = tuple([param])
cur.execute(sql, param)
res = cur.fetchall()
cur.close()
res = [self.format_timestamp(r, cur) for r in res]
#log.info('desc:', cur.description)
if res and isdict:
ret = []
xkeys = [ i[0] for i in cur.description]
for item in res:
ret.append(dict(zip(xkeys, item)))
else:
ret = res
if head:
xkeys = [ i[0] for i in cur.description]
ret.insert(0, xkeys)
return ret
@timeit
def get(self, sql, param=None, isdict=True):
'''sql查询,只返回一条'''
cur = self.conn.cursor()
if not param:
cur.execute(sql)
else:
if not isinstance(param, (types.DictType, types.TupleType)):
param = tuple([param])
cur.execute(sql, param)
res = cur.fetchone()
cur.close()
res = self.format_timestamp(res, cur)
if res and isdict:
xkeys = [ i[0] for i in cur.description]
return dict(zip(xkeys, res))
else:
return res
def value2sql(self, v, charset='utf-8'):
tv = type(v)
if tv in [types.StringType, types.UnicodeType]:
if tv == types.UnicodeType:
v = v.encode(charset)
if v.startswith(('now()','md5(')):
return v
return "'%s'" % self.escape(v)
elif isinstance(v, datetime.datetime) or isinstance(v, datetime.date):
return "'%s'" % str(v)
elif isinstance(v, DBFunc):
return v.value
else:
if v is None:
return 'NULL'
return str(v)
def exp2sql(self, key, op, value):
item = '(`%s` %s ' % (key.strip('`').replace('.','`.`'), op)
if op == 'in':
item += '(%s))' % ','.join([self.value2sql(x) for x in value])
elif op == 'not in':
item += '(%s))' % ','.join([self.value2sql(x) for x in value])
elif op == 'between':
item += ' %s and %s)' % (self.value2sql(value[0]), self.value2sql(value[1]))
else:
item += self.value2sql(value) + ')'
return item
def dict2sql(self, d, sp=','):
'''字典可以是 {name:value} 形式,也可以是 {name:(operator, value)}'''
x = []
for k,v in d.iteritems():
if isinstance(v, types.TupleType):
x.append('%s' % self.exp2sql(k, v[0], v[1]))
else:
x.append('`%s`=%s' % (k.strip(' `').replace('.','`.`'), self.value2sql(v)))
return sp.join(x)
def dict2on(self, d, sp=' and '):
x = []
for k,v in d.iteritems():
x.append('`%s`=`%s`' % (k.strip(' `').replace('.','`.`'), v.strip(' `').replace('.','`.`')))
return sp.join(x)
def dict2insert(self, d):
keys = d.keys()
vals = []
for k in keys:
vals.append('%s' % self.value2sql(d[k]))
new_keys = ['`' + k.strip('`') + '`' for k in keys]
return ','.join(new_keys), ','.join(vals)
def fields2where(self, fields, where=None):
if not where:
where = {}
for f in fields:
if f.value == None or (f.value == '' and f.isnull == False):
continue
where[f.name] = (f.op, f.value)
return where
def format_table(self, table):
'''调整table 支持加上 `` 并支持as'''
#如果有as
table = table.strip(' `').replace(',','`,`')
index = table.find(' ')
if ' ' in table:
return '`%s`%s' % ( table[:index] ,table[index:])
else:
return '`%s`' % table
def insert(self, table, values, other=None):
#sql = "insert into %s set %s" % (table, self.dict2sql(values))
keys, vals = self.dict2insert(values)
sql = "insert into %s(%s) values (%s)" % (self.format_table(table), keys, vals)
if other:
sql += ' ' + other
return self.execute(sql)
def insert_list(self, table, values_list, other=None):
sql = 'insert into %s ' % self.format_table(table)
sql_key = ''
sql_value = []
for values in values_list:
keys, vals = self.dict2insert(values)
sql_key = keys # 正常key肯定是一样的
sql_value.append('(%s)' % vals)
sql += ' (' + sql_key + ') ' + 'values' + ','.join(sql_value)
if other:
sql += ' ' + other
return self.execute(sql)
def update(self, table, values, where=None, other=None):
sql = "update %s set %s" % (self.format_table(table), self.dict2sql(values))
if where:
sql += " where %s" % self.dict2sql(where,' and ')
if other:
sql += ' ' + other
return self.execute(sql)
def delete(self, table, where, other=None):
sql = "delete from %s" % self.format_table(table)
if where:
sql += " where %s" % self.dict2sql(where, ' and ')
if other:
sql += ' ' + other
return self.execute(sql)
def select(self, table, where=None, fields='*', other=None, isdict=True):
sql = self.select_sql(table, where, fields, other)
return self.query(sql, None, isdict=isdict)
def select_one(self, table, where=None, fields='*', other=None, isdict=True):
if not other:
other = ' limit 1'
if 'limit' not in other:
other += ' limit 1'
sql = self.select_sql(table, where, fields, other)
return self.get(sql, None, isdict=isdict)
def select_join(self, table1, table2, join_type='inner', on=None, where=None, fields='*', other=None, isdict=True):
sql = self.select_join_sql(table1, table2, join_type, on, where, fields, other)
return self.query(sql, None, isdict=isdict)
def select_join_one(self, table1, table2, join_type='inner', on=None, where=None, fields='*', other=None, isdict=True):
if not other:
other = ' limit 1'
if 'limit' not in other:
other += ' limit 1'
sql = self.select_join_sql(table1, table2, join_type, on, where, fields, other)
return self.get(sql, None, isdict=isdict)
def select_sql(self, table, where=None, fields='*', other=None):
if type(fields) in (types.ListType, types.TupleType):
fields = ','.join(fields)
sql = "select %s from %s" % (fields, self.format_table(table))
if where:
sql += " where %s" % self.dict2sql(where, ' and ')
if other:
sql += ' ' + other
return sql
def select_join_sql(self, table1, table2, join_type='inner', on=None, where=None, fields='*', other=None):
if type(fields) in (types.ListType, types.TupleType):
fields = ','.join(fields)
sql = "select %s from %s %s join %s" % (fields, self.format_table(table1), join_type, self.format_table(table2))
if on:
sql += " on %s" % self.dict2on(on, ' and ')
if where:
sql += " where %s" % self.dict2sql(where, ' and ')
if other:
sql += ' ' + other
return sql
def select_page(self, sql, pagecur=1, pagesize=20, count_sql=None, maxid=-1):
return pager.db_pager(self, sql, pagecur, pagesize, count_sql, maxid)
def last_insert_id(self):
pass
def start(self): # start transaction
pass
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def escape(self, s):
return s
def format_timestamp(self, ret, cur):
'''将字段以_time结尾的格式化成datetime'''
if not ret:
return ret
index = []
for d in cur.description:
if d[0].endswith('_time'):
index.append(cur.description.index(d))
res = []
for i , t in enumerate(ret):
if i in index and type(t) in [types.IntType,types.LongType]:
res.append(datetime.datetime.fromtimestamp(t))
else:
res.append(t)
return res
def with_mysql_reconnect(func):
def close_mysql_conn(self):
try:
self.conn.close()
except:
log.warning(traceback.format_exc())
self.conn = None
def _(self, *args, **argitems):
if self.type == 'mysql':
import MySQLdb as m
elif self.type == 'pymysql':
import pymysql as m
trycount = 3
while True:
try:
x = func(self, *args, **argitems)
except m.OperationalError, e:
log.warning(traceback.format_exc())
if e[0] >= 2000: # 客户端错误
close_mysql_conn(self)
self.connect()
trycount -= 1
if trycount > 0:
continue
raise
except m.InterfaceError, e:
log.warning(traceback.format_exc())
close_mysql_conn(self)
self.connect()
trycount -= 1
if trycount > 0:
continue
raise
else:
return x
return _
#def with_mysql_reconnect(func):
# def _(self, *args, **argitems):
# import MySQLdb
# trycount = 3
# while True:
# try:
# x = func(self, *args, **argitems)
# except MySQLdb.OperationalError, e:
# #log.err('mysql error:', e)
# if e[0] >= 2000: # client error
# #log.err('reconnect ...')
# self.conn.close()
# self.connect()
#
# trycount -= 1
# if trycount > 0:
# continue
# raise
# else:
# return x
#
# return _
class MySQLConnection (DBConnection):
type = "mysql"
def __init__(self, param, lasttime, status):
DBConnection.__init__(self, param, lasttime, status)
self.connect()
def useit(self):
self.status = 1
self.lasttime = time.time()
def releaseit(self):
self.status = 0
def connect(self):
engine = self.param['engine']
if engine == 'mysql':
import MySQLdb
self.conn = MySQLdb.connect(host = self.param['host'],
port = self.param['port'],
user = self.param['user'],
passwd = self.param['passwd'],
db = self.param['db'],
charset = self.param['charset'],
connect_timeout = self.param.get('timeout', 10),
)
self.conn.autocommit(1)
cur = self.conn.cursor()
cur.execute("show variables like 'server_id'")
row = cur.fetchone()
self.server_id = int(row[1])
#if self.param.get('autocommit',None):
# log.note('set autocommit')
# self.conn.autocommit(1)
#initsqls = self.param.get('init_command')
#if initsqls:
# log.note('init sqls:', initsqls)
# cur = self.conn.cursor()
# cur.execute(initsqls)
# cur.close()
else:
raise ValueError, 'engine error:' + engine
#log.note('mysql connected', self.conn)
def close(self):
self.conn.close()
self.conn = None
@with_mysql_reconnect
def alive(self):
if self.is_available():
cur = self.conn.cursor()
cur.execute("show tables;")
cur.close()
self.conn.ping()
@with_mysql_reconnect
def execute(self, sql, param=None):
return DBConnection.execute(self, sql, param)
@with_mysql_reconnect
def executemany(self, sql, param):
return DBConnection.executemany(self, sql, param)
@with_mysql_reconnect
def query(self, sql, param=None, isdict=True, head=False):
return DBConnection.query(self, sql, param, isdict, head)
@with_mysql_reconnect
def get(self, sql, param=None, isdict=True):
return DBConnection.get(self, sql, param, isdict)
def escape(self, s, enc='utf-8'):
if type(s) == types.UnicodeType:
s = s.encode(enc)
ns = self.conn.escape_string(s)
return unicode(ns, enc)
def last_insert_id(self):
ret = self.query('select last_insert_id()', isdict=False)
return ret[0][0]
def start(self):
sql = "start transaction"
return self.execute(sql)
def commit(self):
sql = 'commit'
return self.execute(sql)
def rollback(self):
sql = 'rollback'
return self.execute(sql)
class PyMySQLConnection (MySQLConnection):
type = "pymysql"
def __init__(self, param, lasttime, status):
MySQLConnection.__init__(self, param, lasttime, status)
def connect(self):
engine = self.param['engine']
if engine == 'pymysql':
import pymysql
self.conn = pymysql.connect(host = self.param['host'],
port = self.param['port'],
user = self.param['user'],
passwd = self.param['passwd'],
db = self.param['db'],
charset = self.param['charset'],
connect_timeout = self.param.get('timeout', 10),
)
self.conn.autocommit(1)
cur = self.conn.cursor()
cur.execute("show variables like 'server_id'")
row = cur.fetchone()
self.server_id = int(row[1])
else:
raise ValueError, 'engine error:' + engine
class SQLiteConnection (DBConnection):
type = "sqlite"
def __init__(self, param, lasttime, status):
DBConnection.__init__(self, param, lasttime, status)
def connect(self):
engine = self.param['engine']
if engine == 'sqlite':
import sqlite3
self.conn = sqlite3.connect(self.param['db'], detect_types=sqlite3.PARSE_DECLTYPES, isolation_level=None)
else:
raise ValueError, 'engine error:' + engine
def useit(self):
DBConnection.useit(self)
if not self.conn:
self.connect()
def releaseit(self):
DBConnection.releaseit(self)
self.conn.close()
self.conn = None
def escape(self, s, enc='utf-8'):
s = s.replace("'", "\'")
s = s.replace('"', '\"')
return s
def last_insert_id(self):
ret = self.query('select last_insert_rowid()', isdict=False)
return ret[0][0]
def start(self):
sql = "BEGIN"
return self.conn.execute(sql)
class DBPool (DBPoolBase):
def __init__(self, dbcf):
# one item: [conn, last_get_time, stauts]
self.dbconn_idle = []
self.dbconn_using = []
self.dbcf = dbcf
self.max_conn = 20
self.min_conn = 1
if self.dbcf.has_key('conn'):
self.max_conn = self.dbcf['conn']
self.connection_class = {}
x = globals()
for v in x.itervalues():
if type(v) == types.ClassType and v != DBConnection and issubclass(v, DBConnection):
self.connection_class[v.type] = v
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
self.open(self.min_conn)
def synchronize(func):
def _(self, *args, **argitems):
self.lock.acquire()
x = None
try:
x = func(self, *args, **argitems)
finally:
self.lock.release()
return x
return _
def open(self, n=1):
param = self.dbcf
newconns = []
for i in range(0, n):
myconn = self.connection_class[param['engine']](param, time.time(), 0)
myconn.pool = self
newconns.append(myconn)
self.dbconn_idle += newconns
def clear_timeout(self):
#log.info('try clear timeout conn ...')
now = time.time()
dels = []
allconn = len(self.dbconn_idle) + len(self.dbconn_using)
for c in self.dbconn_idle:
if allconn == 1:
break
if now - c.lasttime > self.dbcf.get('idle_timeout', 10):
dels.append(c)
allconn -= 1
if dels:
log.debug('close timeout db conn:%d', len(dels))
for c in dels:
c.close()
self.dbconn_idle.remove(c)
@synchronize
def acquire(self, timeout=10):
start = time.time()
while len(self.dbconn_idle) == 0:
if len(self.dbconn_idle) + len(self.dbconn_using) < self.max_conn:
self.open()
continue
self.cond.wait(timeout)
if int(time.time() - start) > timeout:
log.error('func=acquire|error=no idle connections')
raise RuntimeError('no idle connections')
conn = self.dbconn_idle.pop(0)
conn.useit()
self.dbconn_using.append(conn)
if random.randint(0, 100) > 80:
self.clear_timeout()
return conn
@synchronize
def release(self, conn):
# conn是有效的
# FIXME: conn有可能为false吗?这样是否会有conn从dbconn_using里出不来了
if conn:
self.dbconn_using.remove(conn)
conn.releaseit()
if conn.conn:
self.dbconn_idle.insert(0, conn)
self.cond.notify()
@synchronize
def alive(self):
for conn in self.dbconn_idle:
conn.alive()
def size(self):
return len(self.dbconn_idle), len(self.dbconn_using)
class DBConnProxy:
#def __init__(self, masterconn, slaveconn):
def __init__(self, pool, timeout=10):
#self.name = ''
#self.master = masterconn
#self.slave = slaveconn
self._pool = pool
self._master = None
self._slave = None
self._timeout = timeout
self._modify_methods = set(['execute', 'executemany', 'last_insert_id',
'insert', 'update', 'delete', 'insert_list', 'start', 'rollback', 'commit'])
def __getattr__(self, name):
#if name.startswith('_') and name[1] != '_':
# return self.__dict__[name]
if name in self._modify_methods:
if not self._master:
self._master = self._pool.master.acquire(self._timeout)
return getattr(self._master, name)
else:
if name == 'master':
if not self._master:
self._master = self._pool.master.acquire(self._timeout)
return self._master
if name == 'slave':
if not self._slave:
self._slave = self._pool.get_slave().acquire(self._timeout)
return self._slave
if not self._slave:
self._slave = self._pool.get_slave().acquire(self._timeout)
return getattr(self._slave, name)
class RWDBPool:
def __init__(self, dbcf):
self.dbcf = dbcf
self.name = ''
self.policy = dbcf.get('policy', 'round_robin')
self.master = DBPool(dbcf.get('master', None))
self.slaves = []
self._slave_current = -1
for x in dbcf.get('slave', []):
self.slaves.append(DBPool(x))
def get_slave(self):
if self.policy == 'round_robin':
size = len(self.slaves)
self._slave_current = (self._slave_current + 1) % size
return self.slaves[self._slave_current]
else:
raise ValueError, 'policy not support'
def get_master(self):
return self.master
# def acquire(self, timeout=10):
# #log.debug('rwdbpool acquire')
# master_conn = None
# slave_conn = None
#
# try:
# master_conn = self.master.acquire(timeout)
# slave_conn = self.get_slave().acquire(timeout)
# return DBConnProxy(master_conn, slave_conn)
# except:
# if master_conn:
# master_conn.pool.release(master_conn)
# if slave_conn:
# slave_conn.pool.release(slave_conn)
# raise
def acquire(self, timeout=10):
return DBConnProxy(self, timeout)
def release(self, conn):
#log.debug('rwdbpool release')
if conn._master:
#log.debug('release master')
conn._master.pool.release(conn._master)
if conn._slave:
#log.debug('release slave')
conn._slave.pool.release(conn._slave)
def size(self):
ret = {'master': (-1,-1), 'slave':[]}
if self.master:
ret['master'] = self.master.size()
for x in self.slaves:
key = '%s@%s:%d' % (x.dbcf['user'], x.dbcf['host'], x.dbcf['port'])
ret['slave'].append((key, x.size()))
return ret
def checkalive(name=None):
global dbpool
while True:
if name is None:
checknames = dbpool.keys()
else:
checknames = [name]
for k in checknames:
pool = dbpool[k]
pool.alive()
time.sleep(300)
def install(cf):
global dbpool
dbpool = {}
for name,item in cf.iteritems():
#item = cf[name]
dbp = None
if item.has_key('master'):
dbp = RWDBPool(item)
else:
dbp = DBPool(item)
dbpool[name] = dbp
return dbpool
def acquire(name, timeout=10):
global dbpool
#log.info("acquire:", name)
pool = dbpool[name]
x = pool.acquire(timeout)
x.name = name
return x
def release(conn):
if not conn:
return
global dbpool
#log.info("release:", name)
pool = dbpool[conn.name]
return pool.release(conn)
def execute(db, sql, param=None):
return db.execute(sql, param)
def executemany(db, sql, param):
return db.executemany(sql, param)
def query(db, sql, param=None, isdict=True, head=False):
return db.query(sql, param, isdict, head)
@contextmanager
def get_connection(token):
try:
conn = acquire(token)
yield conn
except:
log.error("error=%s", traceback.format_exc())
finally:
release(conn)
@contextmanager
def get_connection_exception(token):
'''出现异常捕获后,关闭连接并抛出异常'''
try:
conn = acquire(token)
yield conn
except:
#log.error("error=%s", traceback.format_exc())
raise
finally:
release(conn)
def with_database(name, errfunc=None, errstr=''):
def f(func):
def _(self, *args, **argitems):
multi_db = isinstance(name, (types.TupleType, types.ListType))
is_none, is_inst = False, False
if isinstance(self, types.NoneType):
is_none = True
elif isinstance(self, types.ObjectType):
is_inst = True
if multi_db:
dbs = {}
for dbname in name:
dbs[dbname] = acquire(dbname)
if is_inst:
self.db = dbs
elif is_none:
self = dbs
else:
if is_inst:
self.db = acquire(name)
elif is_none:
self = acquire(name)
x = None
try:
x = func(self, *args, **argitems)
except:
if errfunc:
return getattr(self, errfunc)(error=errstr)
else:
raise
finally:
if multi_db:
if is_inst:
dbs = self.db
else:
dbs = self
dbnames = dbs.keys()
for dbname in dbnames:
release(dbs.pop(dbname))
else:
if is_inst:
release(self.db)
elif is_none:
release(self)
if is_inst:
self.db = None
elif is_none:
self = None
return x
return _
return f
def test():
import random, logger
logger.install('stdout')
#log.install("SimpleLogger")
dbcf = {'test1': {'engine': 'sqlite', 'db':'test1.db', 'conn':1}}
#dbcf = {'test1': {'engine': 'sqlite', 'db':':memory:', 'conn':1}}
install(dbcf)
sql = "create table if not exists user(id integer primary key, name varchar(32), ctime timestamp)"
print 'acquire'
x = acquire('test1')
print 'acquire ok'
x.execute(sql)
#sql1 = "insert into user values (%d, 'zhaowei', datetime())" % (random.randint(1, 100));
sql1 = "insert into user values (%d, 'zhaowei', datetime())" % (random.randint(1, 100));
x.execute(sql1)
x.insert("user", {"name":"bobo","ctime":DBFunc("datetime()")})
sql2 = "select * from user"
ret = x.query(sql2)
print 'result:', ret
ret = x.query('select * from user where name=?', 'bobo')
print 'result:', ret
print 'release'
release(x)
print 'release ok'
print '-' * 60
class Test2:
@with_database("test1")
def test2(self):
ret = self.db.query("select * from user")
print ret
t = Test2()
t.test2()
def test1():
DATABASE = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'test', # db table
'host':'127.0.0.1', # db host
'port':3306, # db port
'user':'root', # db user
'passwd':'654321',# db password
'charset':'utf8',# db charset
'conn':20} # db connections in pool
}
install(DATABASE)
while True:
x = random.randint(0, 10)
print 'x:', x
conns = []
for i in range(0, x):
c = acquire('test')
time.sleep(1)
conns.append(c)
print dbpool['test'].size()
for c in conns:
release(c)
time.sleep(1)
print dbpool['test'].size()
time.sleep(1)
print dbpool['test'].size()
def test2():
DATABASE = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'test', # db name
'host':'127.0.0.1', # db host
'port':3306, # db port
'user':'root', # db user
'passwd':'123456', # db password
'charset':'utf8', # db charset
'conn':110} # db connections in pool
}
install(DATABASE)
def go():
#x = random.randint(0, 10)
#print 'x:', x
#conns = []
#for i in range(0, x):
# c = acquire('test')
# #time.sleep(1)
# conns.append(c)
# print dbpool['test'].size()
#for c in conns:
# release(c)
# #time.sleep(1)
# print dbpool['test'].size()
while True:
#time.sleep(1)
c = acquire('test')
#print dbpool['test'].size()
release(c)
#print dbpool['test'].size()
ths = []
for i in range(0, 100):
t = threading.Thread(target=go, args=())
ths.append(t)
for t in ths:
t.start()
for t in ths:
t.join()
def test3():
import logger
logger.install('stdout')
global log
log = logger.log
DATABASE = {'test':{
'policy': 'round_robin',
'default_conn':'auto',
'master':
{'engine':'mysql',
'db':'test',
'host':'127.0.0.1',
'port':3306,
'user':'root',
'passwd':'123456',
'charset':'utf8',
'idle_timeout':60,
'conn':20},
'slave':[
{'engine':'mysql',
'db':'test',
'host':'127.0.0.1',
'port':3306,
'user':'zhaowei_r1',
'passwd':'123456',
'charset':'utf8',
'conn':20},
{'engine':'mysql',
'db':'test',
'host':'127.0.0.1',
'port':3306,
'user':'zhaowei_r2',
'passwd':'123456',
'charset':'utf8',
'conn':20},
],
},
}
install(DATABASE)
while True:
x = random.randint(0, 10)
print 'x:', x
conns = []
print 'acquire ...'
for i in range(0, x):
c = acquire('test')
time.sleep(1)
c.insert('ztest', {'name':'zhaowei%d'%(i)})
print c.query('select count(*) from ztest')
print c.get('select count(*) from ztest')
conns.append(c)
print dbpool['test'].size()
print 'release ...'
for c in conns:
release(c)
time.sleep(1)
print dbpool['test'].size()
time.sleep(1)
print '-'*60
print dbpool['test'].size()
print '-'*60
time.sleep(1)
def test4(tcount):
DATABASE = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'qf_core', # db name
'host':'172.100.101.106', # db host
'port':3306, # db port
'user':'qf', # db user
'passwd':'123456', # db password
'charset':'utf8', # db charset
'conn':10} # db connections in pool
}
install(DATABASE)
def run_thread():
while True:
time.sleep(0.01)
conn = None
try:
conn = acquire('test')
except:
log.debug("%s catch exception in acquire", threading.currentThread().name)
traceback.print_exc()
time.sleep(0.5)
continue
try:
sql = "select count(*) from profile"
ret = conn.query(sql)
except:
log.debug("%s catch exception in query", threading.currentThread().name)
traceback.print_exc()
finally:
if conn:
release(conn)
conn = None
import threading
th = []
for i in range(0, tcount):
_th = threading.Thread(target=run_thread, args=())
log.debug("%s create", _th.name)
th.append(_th)
for t in th:
t.start()
log.debug("%s start", t.name)
for t in th:
t.join()
log.debug("%s finish",t.name)
def test5():
DATABASE = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'qf_core', # db name
'host':'172.100.101.106', # db host
'port':3306, # db port
'user':'qf', # db user
'passwd':'123456', # db password
'charset':'utf8', # db charset
'conn':20} # db connections in pool
}
install(DATABASE)
def run_thread():
i = 0
while i < 10:
time.sleep(0.01)
with get_connection('test') as conn:
sql = "select count(*) from profile"
ret = conn.query(sql)
log.debug('ret:%s', ret)
i += 1
pool = dbpool['test']
log.debug("pool size: %s", pool.size())
import threading
th = []
for i in range(0, 10):
_th = threading.Thread(target=run_thread, args=())
log.debug("%s create", _th.name)
th.append(_th)
for t in th:
t.setDaemon(True)
t.start()
log.debug("%s start", t.name)
def test_with():
DATABASE = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'qf_core', # db name
'host':'172.100.101.106', # db host
'port':3306, # db port
'user':'qf', # db user
'passwd':'123456', # db password
'charset':'utf8', # db charset
'conn':10} # db connections in pool
}
install(DATABASE)
with get_connection('test') as conn:
record = conn.query("select retcd from profile where userid=227519")
print record
#record = conn.query("select * from chnlbind where userid=227519")
#print record
pool = dbpool['test']
print pool.size()
with get_connection('test') as conn:
record = conn.query("select * from profile where userid=227519")
print record
record = conn.query("select * from chnlbind where userid=227519")
print record
pool = dbpool['test']
print pool.size()
def test_format_time():
import logger
logger.install('stdout')
DATABASE = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'qiantai', # db name
'host':'172.100.101.151', # db host
'port':3306, # db port
'user':'qf', # db user
'passwd':'123456', # db password
'charset':'utf8', # db charset
'conn':10} # db connections in pool
}
install(database)
with get_connection('test') as conn:
print conn.select('order')
print conn.select_join('app','customer','inner',)
print conn.format_table('order as o')
def test_base_func():
import logger
logger.install('stdout')
database = {'test': # connection name, used for getting connection from pool
{'engine':'mysql', # db type, eg: mysql, sqlite
'db':'qf_core', # db name
'host':'172.100.101.151', # db host
'port':3306, # db port
'user':'qf', # db user
'passwd':'123456', # db password
'charset':'utf8', # db charset
'conn':10} # db connections in pool
}
install(database)
with get_connection('test') as conn:
conn.insert('auth_user',{
'username':'13512345677',
'password':'123',
'mobile':'13512345677',
'email':'[email protected]',
})
print conn.select('auth_user',{
'username':'13512345677',
})
conn.delete('auth_user',{
'username':'13512345677',
})
conn.select_join('profile as p','auth_user as a',where={
'p.userid':DBFunc('a.id'),
})
def test_new_rw():
import logger
logger.install('stdout')
database = {'test':{
'policy': 'round_robin',
'default_conn':'auto',
'master':
{'engine':'mysql',
'db':'test',
'host':'127.0.0.1',
'port':3306,
'user':'root',
'passwd':'654321',
'charset':'utf8',
'conn':10}
,
'slave':[
{'engine':'mysql',
'db':'test',
'host':'127.0.0.1',
'port':3306,
'user':'root',
'passwd':'654321',
'charset':'utf8',
'conn':10
},
{'engine':'mysql',
'db':'test',
'host':'127.0.0.1',
'port':3306,
'user':'root',
'passwd':'654321',
'charset':'utf8',
'conn':10
}
]
}
}
install(database)
def printt(t=0):
now = time.time()
if t > 0:
print 'time:', now-t
return now
t = printt()
with get_connection('test') as conn:
t = printt(t)
print 'master:', conn._master, 'slave:', conn._slave
assert conn._master == None
assert conn._slave == None
ret = conn.query("select 10")
t = printt(t)
print 'after read master:', conn._master, 'slave:', conn._slave
assert conn._master == None
assert conn._slave != None
conn.execute('create table if not exists haha (id int(4) not null primary key, name varchar(128) not null)')
t = printt(t)
print 'master:', conn._master, 'slave:', conn._slave
assert conn._master != None
assert conn._slave != None
conn.execute('drop table haha')
t = printt(t)
assert conn._master != None
assert conn._slave != None
print 'ok'
print '=' * 20
t = printt()
with get_connection('test') as conn:
t = printt(t)
print 'master:', conn._master, 'slave:', conn._slave
assert conn._master == None
assert conn._slave == None
ret = conn.master.query("select 10")
assert conn._master != None
assert conn._slave == None
t = printt(t)
print 'after query master:', conn._master, 'slave:', conn._slave
ret = conn.query("select 10")
assert conn._master != None
assert conn._slave != None
print 'after query master:', conn._master, 'slave:', conn._slave
print 'ok'
if __name__ == '__main__':
#test_with()
#test5()
#time.sleep(50)
#pool = dbpool['test']
#test3()
#test4()
#test()
#test_base_func()
test_new_rw()
print 'complete!'
| [
"qfpay@TEST107.(none)"
] | qfpay@TEST107.(none) |
d0c6e4689bcc224f4fde752a7459a16adbb45cf1 | 2e263bb909bb706957990f23d4d07a33fe031a61 | /curiosity_debug.py | dffbbe8768dbdc709baeb1ed25319c6dda169585 | [] | no_license | liziniu/SuperMario | 3e9b776d4c490275b1684b4fd3b3471914811b7d | 64a33901c61591348dd4b9c878e396901dea27b6 | refs/heads/master | 2020-05-16T02:32:10.694180 | 2019-06-02T10:08:11 | 2019-06-02T10:08:11 | 182,632,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,193 | py | import matplotlib
import numpy as np
import tensorflow as tf
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import os
import argparse
import pickle
import json
from baselines.common import set_global_seeds
from curiosity.dynamics import Dynamics
from run import build_env
from scipy.stats import pearsonr
from common.util import DataRecorder
from baselines import logger
class Model:
def __init__(self, sess, env, aux_task, feat_dim, lr):
self.sess = sess or tf.Session()
self.dynamics = Dynamics(sess=self.sess, env=env, auxiliary_task=aux_task, feat_dim=feat_dim,
queue_size=1000, normalize_novelty=True)
self.obs_shape = env.observation_space.shape
self.ac_shape = env.action_space.shape
del env
self.opt = tf.train.RMSPropOptimizer(lr, decay=0.99)
self.aux_loss = self.dynamics.aux_loss
self.dyna_loss = self.dynamics.dyna_loss
self.loss = self.aux_loss + self.dyna_loss
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradsandvars = self.opt.compute_gradients(self.loss, params)
self.train_op = self.opt.apply_gradients(gradsandvars)
self.train_history = []
def train(self, data, rollout_size, online=True, save_path=None):
"""
:param data: list of dict. [{"obs": arr; "next_obs": arr}]
"""
self.recoder = DataRecorder(os.path.join(save_path, "training"))
for episode, episode_data in enumerate(data):
episode_length = len(episode_data["obs"])
obs, act, next_obs, x_pos = episode_data["obs"], episode_data["act"], episode_data["next_obs"], episode_data["x_pos"]
episode_novelty = []
if not online:
ind = np.random.permutation(episode_length)
obs, act, next_obs, x_pos = obs[ind], act[ind], next_obs[ind], x_pos[ind]
for start in range(0, episode_length, rollout_size):
end = start + rollout_size
batch_obs, batch_act, batch_next_obs, batch_x_pos = obs[start:end], act[start:end], next_obs[start:end], x_pos[start:end]
novelty = self.sess.run(self.dynamics.novelty, feed_dict={self.dynamics.obs: obs,
self.dynamics.ac: act,
self.dynamics.next_obs: next_obs})
self.sess.run(self.train_op, feed_dict={self.dynamics.obs: batch_obs, self.dynamics.ac: batch_act,
self.dynamics.next_obs: batch_next_obs})
p = pearsonr(x_pos, novelty)[0]
logger.info("Episode:{}|Epoch:{}|P:{}".format(episode, start//rollout_size, p))
episode_novelty.append(novelty)
self.recoder.store({"x_pos": x_pos, "novelty": novelty, "episode": episode, "epoch": start//rollout_size,
"p": p})
plt.figure()
plt.scatter(x_pos, novelty)
# plt.yscale("log")
plt.savefig(os.path.join(save_path, "{}_{}.png".format(episode, start//rollout_size)))
plt.close()
self.recoder.dump()
def preprocess_data(data):
data_episode = []
for episode_data in data:
tmp = {"obs": [], "act": [], "x_pos": []}
for t in episode_data:
tmp["obs"].append(t["obs"][0])
tmp["act"].append(t["act"][0])
tmp["x_pos"].append(t["info"][0]["x_pos"])
tmp["obs"] = np.asarray(tmp["obs"], dtype=np.float32)
tmp["act"] = np.asarray(tmp["act"], dtype=np.float32)
tmp["x_pos"] = np.asarray(tmp["x_pos"], dtype=np.float32)
tmp["next_obs"] = np.copy(tmp["obs"][1:])
tmp["obs"] = tmp["obs"][:-1]
tmp["act"] = tmp["act"][:-1]
tmp["x_pos"] = tmp["x_pos"][:-1]
data_episode.append(tmp)
return data_episode
def visualize_p(path):
f = open(path, "rb")
data = []
while True:
try:
data.append(pickle.load(f))
except Exception as e:
print(e)
break
p = []
episode = None
for i in range(len(data)):
if episode is None:
episode = data[i]["episode"]
ep = data[i]["episode"]
if ep == episode:
p.append(data[i]["p"])
else:
plt.figure()
plt.plot(p)
plt.savefig(os.path.join(path, "p_{}.png").format(episode))
p = []
p.append(ep)
episode = ep
print("Epoch:{} done".format(ep))
def main(args):
f = open("{}/data.pkl".format(args.load_path), "rb")
data = []
while True:
try:
data.append(pickle.load(f))
except:
break
print("Episode:", len(data))
set_global_seeds(args.seed)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
sess = tf.Session(config=config, )
env = build_env(env_id=args.env, num_env=1, alg="acer", reward_scale=1.0, env_type=args.env_type,
gamestate=None, seed=None, prefix="")
model = Model(
sess=sess,
env=env,
aux_task=args.aux_task,
feat_dim=args.feat_dim,
lr=args.lr
)
sess.run(tf.global_variables_initializer())
save_path = "{}/plots-{}-{}-{}".format(
args.load_path,
args.memo,
args.online,
args.aux_task,
)
logger.configure(dir=save_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, "config.json"), "w") as f:
json.dump(args.__dict__, f, indent=4)
model.train(
preprocess_data(data),
rollout_size=args.rollout_size*args.num_env,
save_path=save_path,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--rollout_size", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--nb_opt", type=int, default=5)
parser.add_argument("--lr", type=float, default=7e-4)
parser.add_argument("--memo", type=str, default="")
parser.add_argument("--online", action="store_true", default=False)
parser.add_argument("--aux_task", type=str, default="RF", choices=["RF", "RND", "IDF"])
parser.add_argument("--feat_dim", type=int, default=512)
parser.add_argument("--dyna_dim", type=int, default=512)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--load_path", type=str, default="logs")
parser.add_argument('--gamestate', default=None)
parser.add_argument("--alg", type=str, default="ppo2")
parser.add_argument("--env_type", type=str, default="atari")
parser.add_argument("--env", type=str)
parser.add_argument("--num_env", type=int, default=1)
parser.add_argument("--reward_scale", type=float, default=1.0, choices=[1.0])
args = parser.parse_args()
main(args) | [
"[email protected]"
] | |
8eafd7e316029d9719d71cf80f9817845735177a | eb85c96c3783be407b396956c13448d89f5e5fee | /design_patterns_with_python/2-python-design-patterns-m2-exercise-files/Strategy/StrategyVariation/strategy_variation/shipping_cost.py | 39dd4ddcb717c4d486e40cddd906eba548af071d | [] | no_license | ForeverDreamer/python_learning | 83c2c290271dbf060ee1718140b8dfd128b82b20 | ff905c4811ddb688f8ee44aed8c4d8067db6168b | refs/heads/master | 2022-04-30T03:23:45.162498 | 2019-07-05T07:55:01 | 2019-07-05T07:55:01 | 181,037,513 | 1 | 0 | null | 2022-04-22T21:12:46 | 2019-04-12T15:41:44 | Jupyter Notebook | UTF-8 | Python | false | false | 170 | py | class ShippingCost(object):
def __init__(self, strategy):
self._strategy = strategy
def shipping_cost(self, order):
return self._strategy(order)
| [
"[email protected]"
] | |
8390cd4854229dfee6b142cd9d5bbad654885cf3 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_223/ch87_2020_04_29_13_42_03_524040.py | 51d2d5546b1465b3bf43fb948f7677c962d2a02a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | with open ('churras.txt', 'r') as churrastxt:
linhas = churrastxt.readlines()
print (linhas)
custo = 0
for linha in linhas:
separa = linha.split(',')
for e in separa:
e1=float(e[1])
e2=float(e[2])
custo+=(e1*e2)
print (custo) | [
"[email protected]"
] | |
b4a637358f8fd61fff627c1eb8da57d3effb6445 | 8b942cbd6a0da0a61f68c468956ba318c7f1603d | /sortings/0056_merge_intervals.py | 9ebfcf815fa3d6974a6efdde33f440b51204ad67 | [
"MIT"
] | permissive | MartinMa28/Algorithms_review | 080bd608b0e0c6f39c45f28402e5181791af4766 | 3f2297038c00f5a560941360ca702e6868530f34 | refs/heads/master | 2022-04-13T03:56:56.932788 | 2020-04-06T03:41:33 | 2020-04-06T03:41:33 | 203,349,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | class Solution:
def merge(self, intervals: list) -> list:
sorted_intervals = sorted(intervals)
idx = 0
while idx < len(sorted_intervals) - 1:
start, end = sorted_intervals[idx]
next_start, next_end = sorted_intervals[idx + 1]
if end >= next_start:
# overlaps with next range
sorted_intervals.pop(idx)
if end < next_end:
sorted_intervals[idx] = (start, next_end)
else:
sorted_intervals[idx] = (start, end)
else:
# if does not overlap, check the next range
idx += 1
return sorted_intervals
if __name__ == "__main__":
solu = Solution()
print(solu.merge([[1,3],[2,6],[8,10],[15,18]])) | [
"[email protected]"
] | |
85a9c7cc2c8dbd8ce1eda62b1dbc50f5b32114df | bb6ebff7a7f6140903d37905c350954ff6599091 | /tools/clang/blink_gc_plugin/process-graph.py | 2bff96f76ad6c8f3698738e85c74f29c3a901e19 | [
"BSD-3-Clause",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 13,211 | py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse, os, sys, json, subprocess, pickle, StringIO
parser = argparse.ArgumentParser(
description =
"Process the Blink points-to graph generated by the Blink GC plugin.")
parser.add_argument(
'-', dest='use_stdin', action='store_true',
help='Read JSON graph files from stdin')
parser.add_argument(
'-c', '--detect-cycles', action='store_true',
help='Detect cycles containing GC roots')
parser.add_argument(
'-s', '--print-stats', action='store_true',
help='Statistics about ref-counted and traced objects')
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Verbose output')
parser.add_argument(
'--ignore-cycles', default=None, metavar='FILE',
help='File with cycles to ignore')
parser.add_argument(
'--ignore-classes', nargs='*', default=[], metavar='CLASS',
help='Classes to ignore when detecting cycles')
parser.add_argument(
'--pickle-graph', default=None, metavar='FILE',
help='File to read/save the graph from/to')
parser.add_argument(
'files', metavar='FILE_OR_DIR', nargs='*', default=[],
help='JSON graph files or directories containing them')
# Command line args after parsing.
args = None
# Map from node labels to nodes.
graph = {}
# Set of root nodes.
roots = []
# List of cycles to ignore.
ignored_cycles = []
# Global flag to determine exit code.
global_reported_error = False
def set_reported_error(value):
global global_reported_error
global_reported_error = value
def reported_error():
return global_reported_error
def log(msg):
if args.verbose:
print msg
global_inc_copy = 0
def inc_copy():
global global_inc_copy
global_inc_copy += 1
def get_node(name):
return graph.setdefault(name, Node(name))
ptr_types = ('raw', 'ref', 'mem')
def inc_ptr(dst, ptr):
if ptr in ptr_types:
node = graph.get(dst)
if not node: return
node.counts[ptr] += 1
def add_counts(s1, s2):
for (k, v) in s2.iteritems():
s1[k] += s2[k]
# Representation of graph nodes. Basically a map of directed edges.
class Node:
def __init__(self, name):
self.name = name
self.edges = {}
self.reset()
def __repr__(self):
return "%s(%s) %s" % (self.name, self.visited, self.edges)
def update_node(self, decl):
# Currently we don't track any node info besides its edges.
pass
def update_edge(self, e):
new_edge = Edge(**e)
edge = self.edges.get(new_edge.key)
if edge:
# If an edge exist, its kind is the strongest of the two.
edge.kind = max(edge.kind, new_edge.kind)
else:
self.edges[new_edge.key] = new_edge
def super_edges(self):
return [ e for e in self.edges.itervalues() if e.is_super() ]
def subclass_edges(self):
return [ e for e in self.edges.itervalues() if e.is_subclass() ]
def reset(self):
self.cost = sys.maxint
self.visited = False
self.path = None
self.counts = {}
for ptr in ptr_types:
self.counts[ptr] = 0
def update_counts(self):
for e in self.edges.itervalues():
inc_ptr(e.dst, e.ptr)
# Representation of directed graph edges.
class Edge:
def __init__(self, **decl):
self.src = decl['src']
self.dst = decl['dst']
self.lbl = decl['lbl']
self.ptr = decl['ptr']
self.kind = decl['kind'] # 0 = weak, 1 = strong, 2 = root
self.loc = decl['loc']
# The label does not uniquely determine an edge from a node. We
# define the semi-unique key to be the concatenation of the
# label and dst name. This is sufficient to track the strongest
# edge to a particular type. For example, if the field A::m_f
# has type HashMap<WeakMember<B>, Member<B>> we will have a
# strong edge with key m_f#B from A to B.
self.key = '%s#%s' % (self.lbl, self.dst)
def __repr__(self):
return '%s (%s) => %s' % (self.src, self.lbl, self.dst)
def is_root(self):
return self.kind == 2
def is_weak(self):
return self.kind == 0
def keeps_alive(self):
return self.kind > 0
def is_subclass(self):
return self.lbl.startswith('<subclass>')
def is_super(self):
return self.lbl.startswith('<super>')
def parse_file(filename):
obj = json.load(open(filename))
return obj
def build_graphs_in_dir(dirname):
# TODO: Use plateform independent code, eg, os.walk
files = subprocess.check_output(
['find', dirname, '-name', '*.graph.json']).split('\n')
log("Found %d files" % len(files))
for f in files:
f.strip()
if len(f) < 1:
continue
build_graph(f)
def build_graph(filename):
for decl in parse_file(filename):
if decl.has_key('name'):
# Add/update a node entry
name = decl['name']
node = get_node(name)
node.update_node(decl)
else:
# Add/update an edge entry
name = decl['src']
node = get_node(name)
node.update_edge(decl)
# Copy all non-weak edges from super classes to their subclasses.
# This causes all fields of a super to be considered fields of a
# derived class without tranitively relating derived classes with
# each other. For example, if B <: A, C <: A, and for some D, D => B,
# we don't want that to entail that D => C.
def copy_super_edges(edge):
if edge.is_weak() or not edge.is_super():
return
inc_copy()
# Make the super-class edge weak (prohibits processing twice).
edge.kind = 0
# If the super class is not in our graph exit early.
super_node = graph.get(edge.dst)
if super_node is None: return
# Recursively copy all super-class edges.
for e in super_node.super_edges():
copy_super_edges(e)
# Copy strong super-class edges (ignoring sub-class edges) to the sub class.
sub_node = graph[edge.src]
for e in super_node.edges.itervalues():
if e.keeps_alive() and not e.is_subclass():
new_edge = Edge(
src = sub_node.name,
dst = e.dst,
lbl = '%s <: %s' % (super_node.name, e.lbl),
ptr = e.ptr,
kind = e.kind,
loc = e.loc,
)
sub_node.edges[new_edge.key] = new_edge
# Add a strong sub-class edge.
sub_edge = Edge(
src = super_node.name,
dst = sub_node.name,
lbl = '<subclass>',
ptr = edge.ptr,
kind = 1,
loc = edge.loc,
)
super_node.edges[sub_edge.key] = sub_edge
def complete_graph():
for node in graph.itervalues():
for edge in node.super_edges():
copy_super_edges(edge)
for edge in node.edges.itervalues():
if edge.is_root():
roots.append(edge)
log("Copied edges down <super> edges for %d graph nodes" % global_inc_copy)
def reset_graph():
for n in graph.itervalues():
n.reset()
def shortest_path(start, end):
start.cost = 0
minlist = [start]
while len(minlist) > 0:
minlist.sort(key=lambda n: -n.cost)
current = minlist.pop()
current.visited = True
if current == end or current.cost >= end.cost + 1:
return
for e in current.edges.itervalues():
if not e.keeps_alive():
continue
dst = graph.get(e.dst)
if dst is None or dst.visited:
continue
if current.cost < dst.cost:
dst.cost = current.cost + 1
dst.path = e
minlist.append(dst)
def detect_cycles():
for root_edge in roots:
reset_graph()
# Mark ignored classes as already visited
for ignore in args.ignore_classes:
name = ignore.find("::") > 0 and ignore or ("WebCore::" + ignore)
node = graph.get(name)
if node:
node.visited = True
src = graph[root_edge.src]
dst = graph.get(root_edge.dst)
if src.visited:
continue
if root_edge.dst == "WTF::String":
continue
if dst is None:
print "\nPersistent root to incomplete destination object:"
print root_edge
set_reported_error(True)
continue
# Find the shortest path from the root target (dst) to its host (src)
shortest_path(dst, src)
if src.cost < sys.maxint:
report_cycle(root_edge)
def is_ignored_cycle(cycle):
for block in ignored_cycles:
if block_match(cycle, block):
return True
def block_match(b1, b2):
if len(b1) != len(b2):
return False
for (l1, l2) in zip(b1, b2):
if l1 != l2:
return False
return True
def report_cycle(root_edge):
dst = graph[root_edge.dst]
path = []
edge = root_edge
dst.path = None
while edge:
path.append(edge)
edge = graph[edge.src].path
path.append(root_edge)
path.reverse()
# Find the max loc length for pretty printing.
max_loc = 0
for p in path:
if len(p.loc) > max_loc:
max_loc = len(p.loc)
out = StringIO.StringIO()
for p in path[:-1]:
print >>out, (p.loc + ':').ljust(max_loc + 1), p
sout = out.getvalue()
if not is_ignored_cycle(sout):
print "\nFound a potentially leaking cycle starting from a GC root:\n", sout
set_reported_error(True)
def load_graph():
global graph
global roots
log("Reading graph from pickled file: " + args.pickle_graph)
dump = pickle.load(open(args.pickle_graph, 'rb'))
graph = dump[0]
roots = dump[1]
def save_graph():
log("Saving graph to pickle file: " + args.pickle_graph)
dump = (graph, roots)
pickle.dump(dump, open(args.pickle_graph, 'wb'))
def read_ignored_cycles():
global ignored_cycles
if not args.ignore_cycles:
return
log("Reading ignored cycles from file: " + args.ignore_cycles)
block = []
for l in open(args.ignore_cycles):
line = l.strip()
if not line or line.startswith('Found'):
if len(block) > 0:
ignored_cycles.append(block)
block = []
else:
block += l
if len(block) > 0:
ignored_cycles.append(block)
gc_bases = (
'WebCore::GarbageCollected',
'WebCore::GarbageCollectedFinalized',
'WebCore::GarbageCollectedMixin',
)
ref_bases = (
'WTF::RefCounted',
'WTF::ThreadSafeRefCounted',
)
gcref_bases = (
'WebCore::RefCountedGarbageCollected',
'WebCore::ThreadSafeRefCountedGarbageCollected',
)
ref_mixins = (
'WebCore::EventTarget',
'WebCore::EventTargetWithInlineData',
'WebCore::ActiveDOMObject',
)
def print_stats():
gcref_managed = []
ref_managed = []
gc_managed = []
hierarchies = []
for node in graph.itervalues():
node.update_counts()
for sup in node.super_edges():
if sup.dst in gcref_bases:
gcref_managed.append(node)
elif sup.dst in ref_bases:
ref_managed.append(node)
elif sup.dst in gc_bases:
gc_managed.append(node)
groups = [("GC manged ", gc_managed),
("ref counted ", ref_managed),
("in transition", gcref_managed)]
total = sum([len(g) for (s,g) in groups])
for (s, g) in groups:
percent = len(g) * 100 / total
print "%2d%% is %s (%d hierarchies)" % (percent, s, len(g))
for base in gcref_managed:
stats = dict({ 'classes': 0, 'ref-mixins': 0 })
for ptr in ptr_types: stats[ptr] = 0
hierarchy_stats(base, stats)
hierarchies.append((base, stats))
print "\nHierarchies in transition (RefCountedGarbageCollected):"
hierarchies.sort(key=lambda (n,s): -s['classes'])
for (node, stats) in hierarchies:
total = stats['mem'] + stats['ref'] + stats['raw']
print (
"%s %3d%% of %-30s: %3d cls, %3d mem, %3d ref, %3d raw, %3d ref-mixins" %
(stats['ref'] == 0 and stats['ref-mixins'] == 0 and "*" or " ",
total == 0 and 100 or stats['mem'] * 100 / total,
node.name.replace('WebCore::', ''),
stats['classes'],
stats['mem'],
stats['ref'],
stats['raw'],
stats['ref-mixins'],
))
def hierarchy_stats(node, stats):
if not node: return
stats['classes'] += 1
add_counts(stats, node.counts)
for edge in node.super_edges():
if edge.dst in ref_mixins:
stats['ref-mixins'] += 1
for edge in node.subclass_edges():
hierarchy_stats(graph.get(edge.dst), stats)
def main():
global args
args = parser.parse_args()
if not (args.detect_cycles or args.print_stats):
print "Please select an operation to perform (eg, -c to detect cycles)"
parser.print_help()
return 1
if args.pickle_graph and os.path.isfile(args.pickle_graph):
load_graph()
else:
if args.use_stdin:
log("Reading files from stdin")
for f in sys.stdin:
build_graph(f.strip())
else:
log("Reading files and directories from command line")
if len(args.files) == 0:
print "Please provide files or directores for building the graph"
parser.print_help()
return 1
for f in args.files:
if os.path.isdir(f):
log("Building graph from files in directory: " + f)
build_graphs_in_dir(f)
else:
log("Building graph from file: " + f)
build_graph(f)
log("Completing graph construction (%d graph nodes)" % len(graph))
complete_graph()
if args.pickle_graph:
save_graph()
if args.detect_cycles:
read_ignored_cycles()
log("Detecting cycles containg GC roots")
detect_cycles()
if args.print_stats:
log("Printing statistics")
print_stats()
if reported_error():
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
5261b1fa72f0205b52d000597183839bd1f223ff | 2c7e967b5cb7c911245463ae1cd152b25b5e0b89 | /steps/google.py | 54cefc195ca13018e3b6ebd16626e1f29e1f6c1f | [
"MIT"
] | permissive | antonckoenig/citeas-api | 902d725c59dad9292c68f873d3a3512c77ceb06e | 9a0da10fad95b49363ef43c4d02be1dcb17169d6 | refs/heads/master | 2023-07-30T13:08:39.967004 | 2021-09-19T20:08:13 | 2021-09-19T20:08:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | from googlesearch import get_random_user_agent, search
from steps.arxiv import ArxivResponseStep
from steps.bitbucket import BitbucketRepoStep
from steps.cran import CranLibraryStep
from steps.core import Step
from steps.github import GithubRepoStep
from steps.pypi import PypiLibraryStep
from steps.webpage import WebpageStep
class GoogleStep(Step):
step_intro = "Use Google to find the software citation."
step_more = "This project webpage often includes attribution information like an associated DOI, GitHub repository, and/or project title."
@property
def starting_children(self):
return [
ArxivResponseStep,
GithubRepoStep,
BitbucketRepoStep,
CranLibraryStep,
PypiLibraryStep,
WebpageStep,
]
def set_content_url(self, input):
if "http" in input:
return None
self.content_url = self.google_search(input)
def set_content(self, input):
self.content = self.content_url
@staticmethod
def google_search(input):
random_user_agent = get_random_user_agent()
# check if input is PMID
if len(input) == 8 and input.isdigit():
query = input
elif "scipy" in input:
query = "scipy citation"
else:
query = "{} software citation".format(input)
for url in search(query, stop=3, user_agent=random_user_agent):
if "citebay.com" not in url and not url.endswith(".pdf"):
return url
| [
"[email protected]"
] | |
97f7334a5bc7c96d20dfd8b287e3555d16fb9d1d | 541fa581db6368486605fb8b622196e6fb9fc45f | /backend/manage.py | 7b40c4858ed128a63f6a08a7f209ec4b57e713cd | [] | no_license | crowdbotics-apps/speakup-radio-19595 | a8c92dd5b343cfaf95897108215feb3775f66ccf | cf97afc333feb0e81e0f8d2e4115ce983d66589f | refs/heads/master | 2022-12-03T03:05:29.785944 | 2020-08-16T16:15:05 | 2020-08-16T16:15:05 | 287,979,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'speakup_radio_19595.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d755209ee2ddbde3f348bfcc20afc00768056f8e | d2024f10e641ab2f28a888d23071edc032299498 | /demo04062018/object_rec_camera_pointing_v3_ts.py | 1ce5bae4c0cba1cb4c90475cc91a28de52d3016d | [] | no_license | chen116/demo2018 | 6f2ae07150182b8e14a2eacbc57bdc79c03e6dee | d289545bcc30445be26e1381d5301d8f657d0c6e | refs/heads/master | 2021-04-27T13:11:44.742650 | 2018-07-14T14:38:37 | 2018-07-14T14:38:37 | 122,435,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,290 | py | # USAGE
# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
from foscam_v3 import FoscamCamera
import numpy as np
import argparse
import imutils
import time
import cv2
import os
import socket
import sys
import threading
import copy
from imutils.video import FileVideoStream
import threading
from queue import Queue
# setup GUI
if True:
from tkinter import *
master = Tk()
w = 1000 # width for the Tk root
h = 50 # height for the Tk root
# get screen width and height
ws = master.winfo_screenwidth() # width of the screen
hs = master.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws/2) - (w/2)
y = (hs)-h*2
# set the dimensions of the screen
# and where it is placed
master.geometry('%dx%d+%d+%d' % (w, h, x, y))
sched_var = StringVar()
sched_var.set(" || "+sys.argv[7]+" ||")
fg ="green"
if "RT" in sys.argv[7]:
fg = "blue"
sched_label = Label(master, textvariable=sched_var,fg = fg,bg = "white",font = "Verdana 30 bold" )
sched_label.pack(side=LEFT)
# scheds = [
# ("Credit", 0),
# ("RT-Xen", 1)
# ]
# sched = IntVar()
# sched.set(0) # initialize
# previous_sched = sched.get()
# for text, mode in scheds:
# b = Radiobutton(master, text=text,variable=sched, value=mode)
# b.pack(side=LEFT)
# checked = IntVar(value=0)
# previous_checked = checked.get()
# c = Checkbutton(master, text="Anchors | ", variable=checked,font = "Verdana 14 bold" )
# c.pack(side=LEFT)
anchors_var = StringVar()
anchors_var.set("Resource:")
anchors_label = Label(master, textvariable=anchors_var,font = "Verdana 10 bold" )
anchors_label.pack(side=LEFT)
anchors_options = [
("Anchors", 1),
("50%",0),
("100%", 2),
]
checked = IntVar()
checked.set(0) # initialize
previous_checked = checked.get()
for text, mode in anchors_options:
b = Radiobutton(master, text=text,variable=checked, value=mode)
b.pack(side=LEFT)
frame_var = StringVar()
frame_var.set("Frame:")
frame_label = Label(master, textvariable=frame_var,font = "Verdana 10 bold" )
frame_label.pack(side=LEFT)
FSIZE = [
("S", 300),
("M", 600),
("L", 800)
]
w1 = IntVar()
w1.set(600) # initialize
previous_f_size = w1.get()
for text, mode in FSIZE:
b = Radiobutton(master, text=text,variable=w1, value=mode)
b.pack(side=LEFT)
timeslice_var = StringVar()
timeslice_var.set(" | ")
timeslice_label = Label(master, textvariable=timeslice_var,font = "Verdana 14 bold" )
timeslice_label.pack(side=LEFT)
tsSIZE = [
("Low-lat", 15),
("High-thru", 30)
]
ts1 = IntVar()
ts1.set(15) # initialize
previous_ts = ts1.get()
for text, mode in tsSIZE:
b = Radiobutton(master, text=text,variable=ts1, value=mode)
b.pack(side=LEFT)
# ts1 = Scale(master,from_=15,to=30,orient=HORIZONTAL)
# ts1.set(15) # init speed
# previous_ts = ts1.get()
# ts1.pack(side=LEFT)
def exit_app(w1):
w1.set(0)
done = Button(master, text="EXIT",command=lambda: exit_app(w1))
done.pack(side=LEFT)
# anchors_var = StringVar()
# anchors_var.set("Meow")
# anchors_label = Label(master, textvariable=anchors_var)
# anchors_label.pack(side=BOTTOM)
m1 = Scale(master,from_=1,to=20,orient=HORIZONTAL)
m1.set(5) # init speed
# m1.pack(side=LEFT)
class Workers(threading.Thread):
def __init__(self,threadLock,every_n_frame,thread_id,input_q,output_q):
threading.Thread.__init__(self)
self.net = cv2.dnn.readNetFromCaffe("MobileNetSSD_deploy.prototxt.txt", "MobileNetSSD_deploy.caffemodel")
self.thread_id=thread_id
self.input_q=input_q
self.output_q=output_q
self.every_n_frame=every_n_frame
self.n=every_n_frame['n']
self.threadLock=threadLock
self.my_every_n_frame_cnt=0
def run(self):
# Acquire lock to synchronize thread
# self.threadLock.acquire()
while True:
self.threadLock.acquire()
self.n = self.every_n_frame['n']
# self.every_n_frame['cnt']=(self.every_n_frame['cnt']+1)%self.n
# self.my_every_n_frame_cnt = self.every_n_frame['cnt']
self.threadLock.release()
if self.n==-1:
# self.output_q.put({'cnt':-1})
break
# blob = self.input_q.get()
stuff = self.input_q.get()
if stuff['cnt']==-1:
self.output_q.put({'cnt':-1})
break
# self.n = stuff['n']
self.my_every_n_frame_cnt = stuff['cnt']
blob = stuff['blob']
if self.my_every_n_frame_cnt%self.n==0:
self.net.setInput(blob)
#print("--------------------thread:",self.thread_id," gonna dnn", "cnt:",self.my_every_n_frame_cnt,'n:',self.n)
# self.output_q.put(self.net.forward())
# self.output_q.put({'blob':self.net.forward(),'cnt':stuff['cnt']})
net_result=self.net.forward()
# self.output_q.put({'blob':net_result,'cnt':stuff['cnt']})
self.output_q.put({'blob':net_result,'cnt':stuff['cnt']})
else:
# self.output_q.put({'blob':-1*np.ones((1,1,1,2)),'cnt':stuff['cnt']})
self.output_q.put({'blob':-1*np.ones((1,1,1,2)),'cnt':stuff['cnt']})
# self.output_q.put(np.ndarray([0]))
# Release lock for the next thread
# self.threadLock.release()
#print("Exiting thread" , self.thread_id)
input_q = Queue() # fps is better if queue is higher but then more lags
output_q = Queue()
threads = []
every_n_frame = {'cnt':-1,'n':m1.get()}
threadLock = threading.Lock()
total_num_threads = 5
num_threads_exiting = 0
def start_server():
global remotetrack
remotetrack = 0
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host = socket.gethostname()
s.bind((host,int(sys.argv[6])))
s.listen(5)
#print('server started')
while True:
connection, address = s.accept()
while True:
data = connection.recv(64)
if (len(data)>0):
msg = data.decode('utf-8')
if (msg == 'found_object'):
#print('remote node found object')
remotetrack = 1
elif (msg=='lost_object'):
#print('remote node lost object')
remotetrack = 0
elif (msg=='clean_up'):
#print('cleanup from other node')
remotetrack = -1
if not data:
break
connection.sendall(data)
remotetrack = -1
connection.close()
remotetrack = -1
mycam = FoscamCamera(sys.argv[1],88,sys.argv[2],sys.argv[3],daemon=False)
moveright = 0
moveleft = 0
global remotetrack
localtrack = 0
remotetrack = 0
localsearch = 0
sentfoundmessage = 0
sentlostmessage = 0
centered = 1
#sock_client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#sock_client.connect((sys.argv[4],int(sys.argv[5])))
thread = threading.Thread(target = start_server)
thread.daemon = True
thread.start()
sock_client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
tempFlag=None
while tempFlag is None:
try:
sock_client.connect((sys.argv[4],int(sys.argv[5])))
tempFlag=1
except:
#print("Waiting for other host")
time.sleep(1)
pass
#setup CAM
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--prototxt", required=True,
# help="path to Caffe 'deploy' prototxt file")
#ap.add_argument("-m", "--model", required=True,
# help="path to Caffe pre-trained model")
#ap.add_argument("-c", "--confidence", type=float, default=0.2,
# help="minimum probability to filter weak detections")
#args = vars(ap.parse_args())
#os.system('python reset_cam.py')
mycam.ptz_reset()
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
#CLASSES = ["person"]
L=0.3
R=0.7
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
canpoint = 1
# load our serialized model from disk
# #print("[INFO] loading model...")
# prototxt = 'MobileNetSSD_deploy.prototxt.txt'
# model = 'MobileNetSSD_deploy.caffemodel'
# net = cv2.dnn.readNetFromCaffe(prototxt, model)
personincam = 0
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
#print("[INFO] starting video stream...")
#vs = VideoStream('rtsp://arittenbach:[email protected]:88/videoMain').start()
vs = VideoStream('rtsp://'+sys.argv[2]+':'+sys.argv[3]+'@'+sys.argv[1]+':88/videoMain').start() # realvid
# vs= FileVideoStream("../walkcat.mp4").start() # outvid
tracking_target = "person" # realvid
# tracking_target = "cat" # outvid
time.sleep(2.0)
# cat_frame = vs.read() # outvid
# for x in range(10): # outvid
# cat_frame = vs.read() # outvid
# setup mulithreads
for i in range(total_num_threads):
tmp_thread = Workers(threadLock,every_n_frame,i,input_q,output_q)
tmp_thread.start()
threads.append(tmp_thread)
# prev_box = {}
prev_boxes = []
# loop over the frames from the video stream
cnt=0
global_cnt=0
import heartbeat
window_size_hr=5
hb = heartbeat.Heartbeat(1024,window_size_hr,100,"vic.log",10,100)
monitoring_items = ["heart_rate","app_mode","frame_size","timeslice"]
comm = heartbeat.DomU(monitoring_items)
fps = FPS().start()
pointat = 0
# loop over the frames from the video stream
prev_personincam = personincam
# while vs.more(): # outvid
while True: # realvid
frame = vs.read()
if frame is not None:
# frame = cat_frame # outvid
current_f_size=w1.get()
if remotetrack == -1 or current_f_size == 0:
threadLock.acquire()
every_n_frame['n']=-1
threadLock.release()
while not input_q.empty():
x=input_q.get()
for i in range(total_num_threads):
input_q.put({'cnt':-1})
break
if current_f_size > 0:
frame = imutils.resize(frame, width=current_f_size)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
threadLock.acquire()
every_n_frame['n']=m1.get()
threadLock.release()
stuff={'blob':blob,'cnt':cnt,'n':m1.get()}
cnt+=1
input_q.put(stuff)
if not output_q.empty():
stuff = output_q.get()
detections = stuff['blob']
order = stuff['cnt']
#print('output cnt:',order,'global cnt:',global_cnt)
global_cnt+=1
if detections[0][0][0][0] == -1:
if len(prev_boxes)>0:
for prev_box in prev_boxes:
startX=prev_box['startX']
startY=prev_box['startY']
endX=prev_box['endX']
endY=prev_box['endY']
idx=prev_box['idx']
label=prev_box['label']
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
else:
prev_boxes=[]
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
idx2 = int(detections[0,0,i,1])
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if ((confidence > 0.2) and (CLASSES[idx2]==tracking_target)):
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
# #print('catttttttttttttttttt')
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# #print('startX=',startX)
# #print('endX=',endX)
if(((startX+endX)/2<(L*w)) and (moveleft==0)):
mycam.ptz_move_left()
moveleft = 1
moveright = 0
# canpoint = 0
# pointat = time.time()+0.3
elif(((startX+endX)/2>(R*w)) and (moveright==0)):
mycam.ptz_move_right()
moveright = 1
moveleft = 0
# canpoint = 0
# pointat = time.time()+0.3
# draw the prediction on the frame
elif((((startX+endX)/2>(L*w)) and (((startX+endX)/2)<(R*w))))and((moveright==1)or(moveleft==1)):
mycam.ptz_stop_run()
moveright = 0
moveleft = 0
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
prev_box = {}
prev_box['startX']=startX
prev_box['startY']=startY
prev_box['endX']=endX
prev_box['endY']=endY
prev_box['idx']=idx
prev_box['label']= "recalculating..."
prev_boxes.append(prev_box)
localtrack = 1
localsearch = 0
sentlostmessage = 0
centered = 0
myvec = detections[0,0,:,1]
if myvec[0]!=-1:
if CLASSES.index(tracking_target) in myvec:
personincam = 1
prev_personincam=1
else:
#print('about to send lost message')
personincam = 0
prev_personincam = 0
localtrack = 0
sock_client.send(bytes('lost_object','UTF-8'))
sentlostmessage = 1
else:
personincam = prev_personincam
if personincam == 0:
#print('about to send lost message')
personincam = 0
prev_personincam = 0
localtrack = 0
sock_client.send(bytes('lost_object','UTF-8'))
sentlostmessage = 1
if ((localsearch == 0) and (localtrack == 0) and (remotetrack == 1) and (personincam==0)):
#print('about to start cruise')
mycam.start_cruise('mycruise')
localsearch = 1
localtrack = 0
centered = 0
if ((localtrack == 0) and (remotetrack ==0) and (centered == 0) and (personincam==0)):
#print('about to reset cam')
mycam.ptz_reset()
centered = 1
localsearch = 0
localtrack = 0
sentfoundmessage = 0
#elif ((confidence < 0.2) and (CLASSES[idx2]=='person') and (localsearch==0) and (remotetrack == 1) and (localtrack == 0)):
# #print('about to start cruise')
# mycam.start_horizontal_cruise()
# localsearch = 1
# localtrack = 0
#elif ((confidence < 0.2) and (CLASSES[idx2]=='person') and (localsearch==0) and (remotetrack == 0) and (centered==0)):
# #print('about tor reset cam')
# mycam.ptz_reset()
# centered = 1
# localsearch = 0
# localtrack = 0
# sock_client.send(bytes('lost_object','UTF-8'))
# show the output frame
cv2.imshow("Frame", frame)
# hb stuff
# #print("hb: before heartbeat_beat()")
hb.heartbeat_beat()
# #print("hb: before get_window_heartrate()")
window_hr = hb.get_window_heartrate()
# #print("hb: before get_instant_heartrate()")
# instant_hr = hb.get_instant_heartrate()
# #print("hb: after hb stuff")
if global_cnt>window_size_hr:
comm.write("heart_rate",window_hr)
# #print('------------------window_hr:',window_hr)
# #print('instant_hr:',instant_hr)
current_checked = checked.get()
if previous_checked!=current_checked:
comm.write("app_mode",current_checked)
previous_checked=current_checked
if previous_f_size!=current_f_size:
comm.write("frame_size",current_f_size)
previous_f_size=current_f_size
current_ts=ts1.get()
if previous_ts!=current_ts:
comm.write("timeslice",current_ts)
previous_ts=current_ts
# current_sched = sched.get()
# if previous_sched!=current_sched:
# comm.write("sched",current_sched)
# previous_sched=current_sched
fps.update()
master.update_idletasks()
master.update()
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# # update the FPS counter
# fps.update()
# master.update_idletasks()
# master.update()
#if(time.time()>pointat):
# canpoint = 1
#print('localsearch = ',localsearch)
#print('remotetrack = ',remotetrack)
#print('localtrack = ',localtrack)
#print('personincam =',personincam)
#print('sentfoundmessage = ',sentfoundmessage)
#print('sentlostmessage = ',sentlostmessage)
sentfoundmessage = 0
if ((personincam==1) and (sentfoundmessage==0)):
#print('about to send found message')
sock_client.send(bytes('found_object','UTF-8'))
sentfoundmessage = 1
# stop the timer and display FPS information
fps.stop()
#print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
#print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
# hb clean up
hb.heartbeat_finish()
comm.write("heart_rate","done")
# worker threads clean up
threadLock.acquire()
every_n_frame['n']=-1
threadLock.release()
while not input_q.empty():
x=input_q.get()
for i in range(total_num_threads):
input_q.put({'cnt':-1})
for t in threads:
t.join()
#print("worker threads cleaned up")
# mycam1 = FoscamCamera('65.114.169.154',88,'arittenbach','8mmhamcgt16!')
# mycam2 = FoscamCamera('65.114.169.108',88,'admin','admin')
mycam1 = FoscamCamera('65.114.169.139',88,'arittenbach','8mmhamcgt16!')
mycam2 = FoscamCamera('65.114.169.151',88,'admin','admin')
mycam1.ptz_reset()
mycam2.ptz_reset()
mycam1.set_ptz_speed(4)
mycam2.set_ptz_speed(4)
if remotetrack!=-1:
sock_client.send(bytes('clean_up','UTF-8'))
| [
"[email protected]"
] | |
b7e68212cd709d9a98d2c452268db90ad47392ae | 871d2a367e45164f21ecdbefe52bf442b563b33c | /tests/tests/correctness/EPLAnalytics/Utilities/DataSimulator/sim_cor_030/run.py | df3cb6f159e3069ad3abecc6e54e13c8a54fb953 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | SoftwareAG/apama-industry-analytics-kit | c0f6c30badf31411a29bc6daa4a7125b76f4e737 | a3f6039915501d41251b6f7ec41b0cb8111baf7b | refs/heads/master | 2022-02-19T20:47:27.180233 | 2022-02-02T12:58:23 | 2022-02-02T12:58:23 | 185,572,282 | 3 | 2 | Apache-2.0 | 2022-02-02T12:58:24 | 2019-05-08T09:14:07 | Python | UTF-8 | Python | false | false | 3,894 | py | # $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
self.injectAnalytic(correlator)
self.injectDataSimulator(correlator)
self.ready(correlator)
correlator.receive(filename='RawOutputData.evt', channels=['OutputData'])
correlator.receive(filename='OutputDataOnly.evt', channels=['OUTPUT_DATA_ONLY'])
correlator.injectMonitorscript(['test.mon'], self.input)
# Run the simulator for just over 60 seconds so that we get 60 data points generated
correlator.incrementTime(60.1)
self.waitForSignal('OutputDataOnly.evt', expr='Received Data:', condition='>=59', timeout=5)
def validate(self):
# Ensure the test output was correct
exprList=[]
exprList.append('Received Data: 100')
exprList.append('Received Data: 96.66666666666667')
exprList.append('Received Data: 93.33333333333333')
exprList.append('Received Data: 90')
exprList.append('Received Data: 86.66666666666667')
exprList.append('Received Data: 83.33333333333334')
exprList.append('Received Data: 80')
exprList.append('Received Data: 76.66666666666667')
exprList.append('Received Data: 73.33333333333334')
exprList.append('Received Data: 70')
exprList.append('Received Data: 66.66666666666667')
exprList.append('Received Data: 63.33333333333334')
exprList.append('Received Data: 60')
exprList.append('Received Data: 56.66666666666667')
exprList.append('Received Data: 53.33333333333334')
exprList.append('Received Data: 50')
exprList.append('Received Data: 46.66666666666667')
exprList.append('Received Data: 43.33333333333334')
exprList.append('Received Data: 40.00000000000001')
exprList.append('Received Data: 36.66666666666667')
exprList.append('Received Data: 33.33333333333334')
exprList.append('Received Data: 30.00000000000001')
exprList.append('Received Data: 26.66666666666667')
exprList.append('Received Data: 23.33333333333334')
exprList.append('Received Data: 20.00000000000001')
exprList.append('Received Data: 16.66666666666668')
exprList.append('Received Data: 13.33333333333334')
exprList.append('Received Data: 10.00000000000001')
exprList.append('Received Data: 6.66666666666668')
exprList.append('Received Data: 3.33333333333334')
exprList.append('Received Data: 100')
self.assertOrderedGrep("OutputDataOnly.evt", exprList=exprList)
self.assertLineCount('OutputDataOnly.evt', expr='Received Data:', condition='>=59')
# Check for invalid data values
self.assertLineCount('OutputDataOnly.evt', expr='INVALID DATA RECEIVED!', condition='==0')
# Ensure the test output was correct
exprList=[]
exprList.append('Validating com.industry.analytics.Analytic\("DataSimulator",\[\],\["OutputData"\],{"simulationType":"sawfalling"}\)')
exprList.append('Analytic DataSimulator started for inputDataNames \[\]')
self.assertOrderedGrep("correlator.out", exprList=exprList)
# Make sure that the we got the right number of analytics created
self.assertLineCount('correlator.out', expr='Validating com.industry.analytics.Analytic', condition='==1')
self.assertLineCount('correlator.out', expr='Analytic DataSimulator started', condition='==1')
# Basic sanity checks
self.checkSanity()
| [
"[email protected]"
] | |
6576c4f0d44e7715aa4ad36675fbe3c92075e2db | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02381/s751837364.py | bd7cb0f49528872efae67b22b1fb8ff06610b153 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # Standard Deviation
end = 0
while end == 0:
studentAmount = int(input())
if studentAmount == 0:
end += 1
else:
scoreData = [int(i) for i in input().rstrip().split()]
totalScore = 0
for score in scoreData:
totalScore += score
averageScore = totalScore / studentAmount
# print('Average: ' + str(averageScore))
totalSquareDistance = 0
for score in scoreData:
totalSquareDistance += (score - averageScore) ** 2
variance = totalSquareDistance / studentAmount
# print('Variance: ' + str(variance))
standardDeviation = variance ** 0.5
# print('Standard Deviation: ', end = '')
print(str(standardDeviation))
| [
"[email protected]"
] | |
78a893873241271aad40cdcd58e8cd782cbe62e3 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/hvy_chainer-gan-improvements/chainer-gan-improvements-master/sample.py | 8326637b1ad23d6efabde2e6c8394804eb7dfa71 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,109 | py | import argparse
import numpy as np
from chainer import serializers
from models import Generator
import plot
# Resize the MNIST dataset to 32x32 images for convenience
# since the generator will create images with dimensions
# of powers of 2 (doubling upsampling in each deconvolution).
im_shape = (32, 32)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--n-z', type=int, default=10)
parser.add_argument('--n-samples', type=int, default=128)
parser.add_argument('--in-generator-filename', type=str, default='generator.model')
parser.add_argument('--out-filename', type=str, default='sample.png')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
n_z = args.n_z
n_samples = args.n_samples
in_generator_filename = args.in_generator_filename
out_filename = args.out_filename
generator = Generator(n_z, im_shape)
serializers.load_hdf5(in_generator_filename, generator)
zs = np.random.uniform(-1, 1, (n_samples, n_z)).astype(np.float32)
x = generator(zs)
plot.save_ims(out_filename, x.data)
| [
"[email protected]"
] | |
ecfc3bac0bddf6b0310970297e5c76cc50d20103 | e33e414418be93aa0fb19c38b82b221ed8826460 | /intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/tests/test_base.py | 41e0c22a674faf17892e0df2b8aceaa3fafe3569 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | wisererik/proposals | 69e4caaf89d7838c14b18328dc261b6c914748bf | 9db7413983df9341d1796f2acba7202d36f31278 | refs/heads/master | 2021-05-03T23:12:39.496346 | 2018-12-22T04:02:46 | 2018-12-22T04:02:46 | 120,399,059 | 0 | 0 | null | 2018-02-06T03:54:13 | 2018-02-06T03:54:13 | null | UTF-8 | Python | false | false | 1,582 | py | from vsmclient import base
from vsmclient import exceptions
from vsmclient.v1 import vsms
from tests import utils
from tests.v1 import fakes
cs = fakes.FakeClient()
class BaseTest(utils.TestCase):
def test_resource_repr(self):
r = base.Resource(None, dict(foo="bar", baz="spam"))
self.assertEqual(repr(r), "<Resource baz=spam, foo=bar>")
def test_getid(self):
self.assertEqual(base.getid(4), 4)
class TmpObject(object):
id = 4
self.assertEqual(base.getid(TmpObject), 4)
def test_eq(self):
# Two resources of the same type with the same id: equal
r1 = base.Resource(None, {'id': 1, 'name': 'hi'})
r2 = base.Resource(None, {'id': 1, 'name': 'hello'})
self.assertEqual(r1, r2)
# Two resoruces of different types: never equal
r1 = base.Resource(None, {'id': 1})
r2 = vsms.Volume(None, {'id': 1})
self.assertNotEqual(r1, r2)
# Two resources with no ID: equal if their info is equal
r1 = base.Resource(None, {'name': 'joe', 'age': 12})
r2 = base.Resource(None, {'name': 'joe', 'age': 12})
self.assertEqual(r1, r2)
def test_findall_invalid_attribute(self):
# Make sure findall with an invalid attribute doesn't cause errors.
# The following should not raise an exception.
cs.vsms.findall(vegetable='carrot')
# However, find() should raise an error
self.assertRaises(exceptions.NotFound,
cs.vsms.find,
vegetable='carrot')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.