metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jimenbian/sublime-timer",
"score": 3
} |
#### File: sublime-timer/sublime-timer/sublime-timer.py
```python
import sublime, sublime_plugin
import threading
import time
i=0
class timer(threading.Thread): #The timer class is derived from the class threading.Thread
def __init__(self, num, interval):
threading.Thread.__init__(self)
self.thread_num = num
self.interval = interval
self.thread_stop = False
def run(self): #Overwrite run() method, put what you want the thread do here
global i
while not self.thread_stop:
sublime.set_timeout(write_time,1)
i+=1
time.sleep(self.interval)
def pause(self):
self.thread_stop = True
def zero(self):
global i
i=0
thread1 = timer(1, 1)
class gtimerCommand(sublime_plugin.TextCommand):
def run(self, edit):
global thread1
thread=timer(1,1)
if thread1.isAlive():
live=True
else:
thread.start()
thread1=thread
class gtimerpauseCommand(sublime_plugin.TextCommand):
def run(self, edit):
global thread1
thread1.pause()
class gtimerzeroCommand(sublime_plugin.TextCommand):
def run(self, edit):
global thread1
thread1.zero()
def write_time():
sublime.status_message(time_manage(i))
def time_manage(time_number):
time_str='time:'+str(int(time_number/60))+'min '+str(time_number%60)+'s'
return time_str
``` |
{
"source": "jimenezh/protocols.distributed_keygen",
"score": 3
} |
#### File: distributed_keygen/demos/distributed_keygen_example_local.py
```python
import asyncio
from pydoc import plainpager
from typing import List
from tno.mpc.communication import Pool
from tno.mpc.protocols.distributed_keygen import DistributedPaillier
corruption_threshold = 0 # corruption threshold
key_length = 128 # bit length of private key
prime_thresh = 2000 # threshold for primality check
correct_param_biprime = 40 # correctness parameter for biprimality test
stat_sec_shamir = (
40 # statistical security parameter for secret sharing over the integers
)
PARTIES = 2 # number of parties that will be involved in the protocol, you can change this to any number you like
def setup_local_pool(server_port: int, ports: List[int]) -> Pool:
pool = Pool()
pool.add_http_server(server_port, "localhost")
for client_port in (port for port in ports if port != server_port):
pool.add_http_client(f"client{client_port}", "localhost", client_port)
return pool
local_ports = [3000 + i for i in range(PARTIES)]
local_pools = [
setup_local_pool(server_port, local_ports) for server_port in local_ports
]
loop = asyncio.get_event_loop()
async_coroutines = [
DistributedPaillier.from_security_parameter(
pool,
corruption_threshold,
key_length,
prime_thresh,
correct_param_biprime,
stat_sec_shamir,
distributed=False,
)
for pool in local_pools
]
print("Starting distributed key generation protocol.")
distributed_paillier_schemes = loop.run_until_complete(
asyncio.gather(*async_coroutines)
)
server = distributed_paillier_schemes[0]
analyst = distributed_paillier_schemes[1]
print("n = ",server.public_key.n, " g = ", server.public_key.g )
print("Server secret key is ", server.secret_key.t)
message = 12
ciphertext = server.encrypt(message)
pserver = server.secret_key.partial_decrypt(ciphertext)
panalyst = analyst.secret_key.partial_decrypt(ciphertext)
plaintext_attempt = server.secret_key.decrypt({0: pserver, 1:panalyst } )
# plaintext = server.decrypt(ciphertext)
print(f"Message: {message}\nCiphertext: {ciphertext}\nPartial decryptions: \n Server - {pserver} \n Analyst - {panalyst}")
print("The protocol has completed.")
```
#### File: protocols/distributed_keygen/distributed_keygen.py
```python
from __future__ import annotations
import asyncio
import copy
import logging
import math
import secrets
from dataclasses import asdict
from random import randint
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast, overload
import sympy
from typing_extensions import TypedDict
from tno.mpc.communication import Serialization, SupportsSerialization
from tno.mpc.communication.httphandlers import HTTPClient
from tno.mpc.communication.pool import Pool
from tno.mpc.encryption_schemes.paillier import (
Paillier,
PaillierCiphertext,
PaillierPublicKey,
PaillierSecretKey,
paillier,
)
from tno.mpc.encryption_schemes.shamir import IntegerShares
from tno.mpc.encryption_schemes.shamir import (
ShamirSecretSharingIntegers as IntegerShamir,
)
from tno.mpc.encryption_schemes.shamir import ShamirSecretSharingScheme as Shamir
from tno.mpc.encryption_schemes.shamir import ShamirShares, Shares
from tno.mpc.encryption_schemes.templates.encryption_scheme import EncodedPlaintext
from tno.mpc.encryption_schemes.utils import pow_mod
from .paillier_shared_key import PaillierSharedKey
class DistributedPaillier(Paillier, SupportsSerialization):
"""
Class that acts as one of the parties involved in distributed Paillier secret key generation.
The pool represents the network of parties involved in the key generation protocol.
"""
default_key_length = 2048
default_prime_threshold = 2000
default_biprime_param = 40
default_sec_shamir = 40
default_corruption_threshold = 1
_global_instances: Dict[int, Dict[int, "DistributedPaillier"]] = {}
_local_instances: Dict[int, "DistributedPaillier"] = {}
@classmethod
async def from_security_parameter( # type: ignore[override]
cls,
pool: Pool,
corruption_threshold: int = default_corruption_threshold,
key_length: int = default_key_length,
prime_threshold: int = default_prime_threshold,
correct_param_biprime: int = default_biprime_param,
stat_sec_shamir: int = default_sec_shamir,
distributed: bool = True,
precision: int = 0,
) -> DistributedPaillier:
r"""
Function that takes security parameters related to secret sharing and Paillier and
initiates a protocol to create a shared secret key between the parties in the provided
pool.
:param precision: precision of the fixed point encoding in Paillier
:param pool: The network of involved parties
:param corruption_threshold: Maximum number of allowed corruptions. We require for the
number of parties in the pool and the corruption threshold that
$$\text{number_of_parties} >= 2 * \text{corruption_threshold} + 1$$.
This is because we need to multiply secret sharings that both use polynomials of
degree corruption_threshold. The resulting secret sharing then becomes a polynomial
of degree $2*\text{corruption_threshold}$ and it requires at least $2*text{corruption_threshold}+1$
evaluation points to reconstruct the secret in that sharing.
:param key_length: desired bit length of the modulus $N$
:param prime_threshold: Upper bound on the number of prime numbers to check during
primality tests
:param correct_param_biprime: parameter that affects the certainty of the generated $N$
to be the product of two primes
:param stat_sec_shamir: security parameter for the Shamir secret sharing over the integers
:param distributed: Whether the different parties are run on different python instances
:param precision: precision (number of decimals) to ensure
:raise ValueError: In case the number of parties $n$ and the corruption threshold $t$ do
not satisfy that $n \geq 2*t + 1$
:raise Exception: In case the parties agree on a session id that is already being used.
:return: DistributedPaillier scheme containing a regular Paillier public key and a shared
secret key.
"""
(
number_of_players,
prime_length,
prime_list,
shamir_scheme,
shares,
other_parties,
) = cls.setup_input(pool, key_length, prime_threshold, corruption_threshold)
index, party_indices, zero_share, session_id = await cls.setup_protocol(
shamir_scheme, other_parties, pool
)
# check if number_of_parties >= 2 * corruption_threshold + 1
if number_of_players < 2 * corruption_threshold + 1:
raise ValueError(
"For a secret sharing scheme that needs to do a homomorphic "
f"multiplication, \nwhich is the case during distributed key generation "
f"with Paillier,\nwe require for the number of parties n and the corruption "
f"threshold t that n >= 2*t + 1.\n"
f"The given pool contains {number_of_players} parties (n) and the given corruption "
f"threshold (t) is {corruption_threshold}."
)
# generate keypair
public_key, secret_key = await cls.generate_keypair(
stat_sec_shamir,
number_of_players,
corruption_threshold,
shares,
index,
zero_share,
pool,
prime_list,
prime_length,
party_indices,
correct_param_biprime,
shamir_scheme,
)
scheme = cls(
public_key=public_key,
secret_key=secret_key,
precision=precision,
pool=pool,
index=index,
party_indices=party_indices,
shares=shares,
session_id=session_id,
distributed=distributed,
)
# We need to distinguish the case where the parties share a python instance and where they
# are run in different python instances. If the same python instance is used, then we need
# to save a different DistributedPaillier instance for each party. If different python
# instances are used, then we have exactly one DistributedPaillier instance in the python
# instance for that session.
if distributed:
if session_id in cls._local_instances:
raise Exception(
"An already existing session ID is about to be overwritten. "
"This can only happen if multiple sessions are run within the same python "
"instance and one of those session has the same ID"
)
cls._local_instances[session_id] = scheme
else:
if index in cls._global_instances:
if session_id in cls._global_instances[index]:
raise Exception(
"An already existing session ID is about to be overwritten. "
"This can only happen if multiple sessions are run within the same python "
"instance and one of those session has the same ID"
)
cls._global_instances[index][session_id] = scheme
else:
cls._global_instances[index] = {session_id: scheme}
return scheme
def __init__(
self,
public_key: PaillierPublicKey,
secret_key: PaillierSharedKey,
precision: int,
pool: Pool,
index: int,
party_indices: Dict[str, int],
shares: Shares,
session_id: int,
distributed: bool,
**kwargs: Any,
) -> None:
"""
Initializes a DistributedPaillier instance with a public Paillier key and a shared
secret Paillier key.
:param public_key: The Paillier public key
:param secret_key: The shared secret Paillier key
:param precision: The precision of the resulting scheme
:param pool: The pool with connections of parties involved in the shared secret key
:param index: The index of the party who owns this instance within the pool
:param party_indices: Dictionary mapping parties in the pool to their indices
:param shares: Data class that stores and keeps track of shares during decryption
:param session_id: The unique session identifier belonging to the protocol that generated
the keys for this DistributedPaillier scheme.
:param distributed: Boolean value indicating whether the protocol that generated the keys
for this DistributedPaillier scheme was run in different Python instances (True) or in a
single python instance (False)
:param kwargs: Any keyword arguments that are passed to the super __init__ function
"""
super().__init__(
public_key, cast(PaillierSecretKey, secret_key), precision, False, **kwargs
)
# these variables are necessary during decryption
self.pool = pool
self.index = index
self.shares = shares
self.party_indices = party_indices
self.session_id = session_id
self.distributed = distributed
def __eq__(self, other: object) -> bool:
"""
Compare this Distributed Paillier scheme with another to determine (in)equality. Does not
take the secret key into account as it might not be known and the public key combined
with the precision and the session id.
:param other: Object to compare this Paillier scheme with.
:return: Boolean value representing (in)equality of both objects.
"""
# Equality should still hold if the secret key is not available
return (
isinstance(other, DistributedPaillier)
and self.precision == other.precision
and self.public_key == other.public_key
and self.session_id == other.session_id
)
# region Decryption
async def decrypt( # type: ignore[override]
self,
ciphertext: PaillierCiphertext,
apply_encoding: bool = True,
receivers: Optional[List[str]] = None,
) -> Optional[paillier.Plaintext]:
"""
Decrypts the input ciphertext. Starts a protocol between the parties involved to create
local decryptions, send them to the other parties and combine them into full decryptions
for each party.
:param ciphertext: Ciphertext to be decrypted.
:param apply_encoding: Boolean indicating whether the decrypted ciphertext is decoded
before it is returned. Defaults to True.
:param receivers: An optional list specifying the names of the receivers, your own 'name'
is "self".
:return: Plaintext decrypted value.
"""
decrypted_ciphertext = await self._decrypt_raw(ciphertext, receivers)
if decrypted_ciphertext is not None:
return (
self.decode(decrypted_ciphertext)
if apply_encoding
else decrypted_ciphertext.value
)
return None
async def _decrypt_raw( # type: ignore[override]
self, ciphertext: PaillierCiphertext, receivers: Optional[List[str]] = None
) -> Optional[EncodedPlaintext[int]]:
"""
Function that starts a protocol between the parties involved to create local decryptions,
send them to the other parties and combine them into full decryptions for each party.
:param ciphertext: The ciphertext to be decrypted.
:param receivers: An optional list specifying the names of the receivers, your own 'name'
is "self".
:return: The encoded plaintext corresponding to the ciphertext.
"""
receivers_without_self: Optional[List[str]]
if receivers is not None:
# If we are part of the receivers, we expect the other parties to send us partial
# decryptions
self_receive = "self" in receivers
# We will broadcast our partial decryption to all receivers, but we do not need to send
# anything to ourselves.
if self_receive:
receivers_without_self = [recv for recv in receivers if recv != "self"]
else:
receivers_without_self = receivers
else:
# If no receivers are specified, we assume everyone will receive the partial decryptions
self_receive = True
receivers_without_self = receivers
# generate the local partial decryption
self.shares.partial_decryption.shares[self.index] = cast(
PaillierSharedKey, self.secret_key
).partial_decrypt(ciphertext)
# send the partial decryption to all other parties in the provided network
self.broadcast(
{
"content": "partial_decryption",
"value": self.shares.partial_decryption.shares[self.index],
},
self.pool,
receivers=receivers_without_self,
)
if self_receive:
# receive the partial decryption from the other parties
await self.gather_shares(
"partial_decryption", self.pool, self.shares, self.party_indices
)
# combine all partial decryption to obtain the full decryption
decryption = cast(PaillierSharedKey, self.secret_key).decrypt(
self.shares.partial_decryption.shares
)
return EncodedPlaintext(decryption, scheme=self)
return None
# endregion
# region Communication
@classmethod
def asend(
cls, pool: Pool, handler_name: str, message: Any, msg_id: Optional[str] = None
) -> None:
"""
Function that sends a message to a certain party in the pool
:param pool: network of involved parties
:param handler_name: receiver
:param message: python object to be sent
:param msg_id: optional
"""
pool.asend(handler_name, message, msg_id)
@classmethod
async def recv(
cls, pool: Pool, handler_name: str, msg_id: Optional[str] = None
) -> Any:
"""
Function that receives a message from a certain party in the pool
:param pool: network for involved parties
:param handler_name: name of the party that sent the message
:param msg_id: optional message id of the expected message
:return: python object
"""
return await pool.recv(handler_name, msg_id)
@classmethod
def broadcast(
cls,
message: Any,
pool: Pool,
message_id: Optional[str] = None,
receivers: Optional[List[str]] = None,
) -> None:
"""
Function that sends a message to all other parties in the pool
:param message: python object
:param pool: network of involved parties
:param message_id: optional message ID
:param receivers: optional list of receivers
"""
if receivers is not None:
other_parties: Iterable[str] = receivers
else:
other_parties = pool.pool_handlers.keys()
for party in other_parties:
pool.asend(party, message, message_id)
@classmethod
async def recv_all(cls, pool: Pool) -> Tuple[Tuple[str, Any]]:
"""
Function that retrieves one message for each party
:param pool: network of involved parties
:return: list of tuples containing the party and their message
"""
other_parties = pool.pool_handlers.keys()
async def result_tuple(party: str) -> Tuple[str, Any]:
"""
Get the Tuple containing party and message for the given party.
:param party: Party for which a message should be received.
:return: Tuple with first the party and second the message that was received from
that party.
"""
msg = await cls.recv(pool, party)
return party, msg
return await asyncio.gather(*[result_tuple(party) for party in other_parties]) # type: ignore
# endregion
# region Setup functions
@classmethod
def setup_input(
cls,
pool: Pool,
key_length: int,
prime_threshold: int,
corruption_threshold: int,
) -> Tuple[int, int, List[int], Shamir, Shares, List[str]]:
r"""
Function that sets initial variables for the process of creating a shared secret key
:param pool: network of involved parties
:param key_length: desired bit length of the modulus $N = p \cdot q$
:param prime_threshold: Bound on the number of prime numbers to be checked for primality
tests
:param corruption_threshold: Number of parties that are allowed to be corrupted
:return: A tuple of initiated variables, containing first the number_of_players,
second the length of the primes $p$ and $q$, third a list of small primes for the
small_prime test (empty if the length of $p$ and $q$ is smaller than the
prime_threshold), fourth a regular Shamir Sharing scheme, fifth a Shares data structure
for holding relevant shares, and last a list of the names of other parties.
"""
number_of_players = len(pool.pool_handlers) + 1
# key length of primes p and q
prime_length = key_length // 2
# if the primes are smaller than the small prime threshold,
# there's no point in doing a small prime test
if prime_length < math.log(prime_threshold):
prime_threshold = 1
prime_list = list(sympy.primerange(3, prime_threshold + 1))
shamir_scheme = cls.__init_shamir_scheme(
prime_length, number_of_players, corruption_threshold
)
shares = Shares()
other_parties = list(pool.pool_handlers.keys())
return (
number_of_players,
prime_length,
prime_list,
shamir_scheme,
shares,
other_parties,
)
@classmethod
async def setup_protocol(
cls, shamir_scheme: Shamir, other_parties: List[str], pool: Pool
) -> Tuple[int, Dict[str, int], ShamirShares, int]:
"""
Function that initiates a protocol to determine IDs and sets own ID
Additionally, the protocol prepares a secret sharing of 0 under a 2t-out-of-n
threshold scheme to be used later on.
:param shamir_scheme: Shamir secret sharing scheme to be used for p and q
:param other_parties: Names of the other parties in the pool
:param pool: network of involved parties
:return: This party's index, a dictionary with indices for the other parties, and a
zero-sharing in a 2t-out-of-n thresholds scheme to be used later on, the session id
"""
# start indices protocol
party_indices, session_id = await cls.get_indices(pool)
# prepare zero sharing
zero_sharing_scheme = Shamir(
shamir_scheme.modulus,
shamir_scheme.number_of_parties,
shamir_scheme.polynomial_degree * 2,
)
zero_sharing = zero_sharing_scheme.share_secret(0)
index = party_indices["self"]
# send zero shares to other parties
for party in other_parties:
party_share = zero_sharing.shares[party_indices[party]]
cls.asend(pool, party, {"content": "zero", "value": party_share})
# receive all zero shares of others
responses = await cls.recv_all(pool)
assert all(d["content"] == "zero" for _, d in responses)
shares = [d["value"] for _, d in responses]
# local share of the final zero sharing
final_zero_share = zero_sharing.shares[index] + sum(shares)
zero_share = ShamirShares(zero_sharing_scheme, {index: final_zero_share})
return index, party_indices, zero_share, session_id
@classmethod
async def get_indices(cls, pool: Pool) -> Tuple[Dict[str, int], int]:
"""
Function that initiates a protocol to determine IDs (indices) for each party
:param pool: network of involved parties
:return: dictionary from party name to index, where the entry "self" contains this party's
index
"""
success = False
list_to_sort = []
while not success:
success = True
# generate random number
random_number_self = randint(0, 1000000)
# send random number to all other parties
cls.broadcast(random_number_self, pool)
# receive random numbers from the other parties
responses = await cls.recv_all(pool)
list_to_sort = [("self", random_number_self)]
for party, random_number_party in responses:
if random_number_party not in [rn for _, rn in list_to_sort]:
list_to_sort.append((party, random_number_party))
else:
success = False
# sort the list based on the random numbers
sorted_list = sorted(list_to_sort, key=lambda j: j[1])
party_indices = {}
# extract the party names from the sorted list and assign an index based on the position.
# this dictionary should be the same for each party
for index, party in enumerate([party_name for party_name, _ in sorted_list]):
party_indices[party] = index + 1
session_id = sum(i[1] for i in sorted_list) % 1000000
return party_indices, session_id
@classmethod
def __init_shamir_scheme(
cls, prime_length: int, number_of_players: int, corruption_threshold: int
) -> Shamir:
"""
Function to initialize the regular Shamir scheme
:param prime_length: bit length of the shamir prime
:param number_of_players: number of parties involved in total (n)
:param corruption_threshold: number of parties allowed to be corrupted
:return: Shamir secret sharing scheme
"""
shamir_length = 2 * (prime_length + math.ceil((math.log2(number_of_players))))
shamir_scheme = Shamir(
sympy.nextprime(2 ** shamir_length),
number_of_players,
corruption_threshold,
)
return shamir_scheme
@classmethod
async def generate_keypair(
cls,
stat_sec_shamir: int,
number_of_players: int,
corruption_threshold: int,
shares: Shares,
index: int,
zero_share: ShamirShares,
pool: Pool,
prime_list: List[int],
prime_length: int,
party_indices: Dict[str, int],
correct_param_biprime: int,
shamir_scheme: Shamir,
) -> Tuple[PaillierPublicKey, PaillierSharedKey]:
"""
Function to distributively generate a shared secret key and a corresponding public key
:param stat_sec_shamir: security parameter for Shamir secret sharing over the integers
:param number_of_players: number of parties involved in the protocol
:param corruption_threshold: number of parties that are allowed to be corrupted
:param shares: dictionary that keeps track of shares for parties for certain numbers
:param index: index of this party
:param zero_share: A secret sharing of $0$ in a $2t$-out-of-$n$ shamir secret sharing scheme
:param pool: network of involved parties
:param prime_list: list of prime numbers
:param prime_length: desired bit length of $p$ and $q$
:param party_indices: mapping from party names to indices
:param correct_param_biprime: correctness parameter that affects the certainty that the
generated $N$ is a product of two primes
:param shamir_scheme: $t$-out-of-$n$ Shamir secret sharing scheme
:return: regular Paillier public key and a shared secret key
"""
secret_key = await cls.generate_secret_key(
stat_sec_shamir,
number_of_players,
corruption_threshold,
shares,
index,
zero_share,
pool,
prime_list,
prime_length,
party_indices,
correct_param_biprime,
shamir_scheme,
)
modulus = secret_key.n
public_key = PaillierPublicKey(modulus, modulus + 1)
logging.info("Key generation complete")
return public_key, secret_key
@classmethod
async def generate_pq(
cls,
shares: Shares,
pool: Pool,
index: int,
prime_length: int,
party_indices: Dict[str, int],
shamir_scheme: Shamir,
) -> Tuple[ShamirShares, ShamirShares]:
""" "
Function to generate primes $p$ and $q$
:param shares: dictionary that keeps track of shares for parties for certain numbers
:param pool: network of involved parties
:param index: index of this party
:param prime_length: desired bit length of $p$ and $q$
:param party_indices: mapping from party names to indices
:param shamir_scheme: $t$-out-of-$n$ Shamir secret sharing scheme
:return: sharings of $p$ and $q$
"""
shares.p.additive = cls.generate_prime_additive_share(index, prime_length)
cls.shamir_share_and_send(
"p", shares, shamir_scheme, index, pool, party_indices
)
await cls.gather_shares("p", pool, shares, party_indices)
p_sharing = cls.__add_received_shamir_shares("p", shares, index, shamir_scheme)
shares.q.additive = cls.generate_prime_additive_share(index, prime_length)
cls.shamir_share_and_send(
"q", shares, shamir_scheme, index, pool, party_indices
)
await cls.gather_shares("q", pool, shares, party_indices)
q_sharing = cls.__add_received_shamir_shares("q", shares, index, shamir_scheme)
return p_sharing, q_sharing
@classmethod
def generate_prime_additive_share(cls, index: int, prime_length: int) -> int:
r"""
Generate a random value between $2^(\text{length}-1)$ and 2^\text{length}.
the function will ensure that the random
value is equal to $3 \mod 4$ for the fist player, and to $0 \mod 4$ for all
other players.
This is necessary to generate additive shares of $p$ and $q$, or the
bi-primality test will not work.
:param index: index of this party
:param prime_length: desired bit length of primes $p$ and $q$
:return: a random integer of the desired bit length and value modulo $4$
"""
if index == 1:
mod4 = 3
else:
mod4 = 0
random_number = secrets.randbits(prime_length - 3) << 2
additive_share: int = 2 ** (prime_length - 1) + random_number + mod4
return additive_share
@classmethod
def shamir_share_and_send(
cls,
content: str,
shares: Shares,
shamir_scheme: Shamir,
index: int,
pool: Pool,
party_indices: Dict[str, int],
) -> None:
"""
Create a secret-sharing of the input value, and send each share to
the corresponding player, together with the label content
:param content: string identifying the number to be shared and sent
:param shares: dictionary keeping track of shares for different parties and numbers
:param shamir_scheme: $t$-out-of-$n$ Shamir secret sharing scheme
:param index: index of this party
:param pool: network of involved parties
:param party_indices: mapping from party names to indices
:raise NotImplementedError: In case the given content is not "p" or "q".
"""
# retrieve the local additive share for content
value = asdict(shares)[content]["additive"]
# create a shamir sharing of this value
value_sharing = shamir_scheme.share_secret(value)
# Save this player's shamir share of the local additive share
if content == "p":
shares.p.shares[index] = value_sharing.shares[index]
elif content == "q":
shares.q.shares[index] = value_sharing.shares[index]
else:
raise NotImplementedError(
f"Don't know what to do with this content: {content}"
)
# Send the other players' shares of the local additive share
other_parties = pool.pool_handlers.keys()
for party in other_parties:
party_share = value_sharing.shares[party_indices[party]]
cls.asend(pool, party, {"content": content, "value": party_share})
@classmethod
def int_shamir_share_and_send(
cls,
content: str,
shares: Shares,
int_shamir_scheme: IntegerShamir,
index: int,
pool: Pool,
party_indices: Dict[str, int],
) -> None:
r"""
Create a secret-sharing of the input value, and send each share to
the corresponding player, together with the label content
:param content: string identifying the number to be shared and sent
:param shares: dictionary keeping track of shares for different parties and numbers
:param int_shamir_scheme: Shamir secret sharing scheme over the integers
:param index: index of this party
:param pool: network of involved parties
:param party_indices: mapping from party names to indices
:raise NotImplementedError: In case the given content is not "lambda\_" or "beta".
"""
# retrieve the local additive share for content
value = asdict(shares)[content]["additive"]
# create a shamir sharing of this value
value_sharing = int_shamir_scheme.share_secret(value)
# Save this player's shamir share of the local additive share
if content == "lambda_":
shares.lambda_.shares[index] = value_sharing.shares[index]
elif content == "beta":
shares.beta.shares[index] = value_sharing.shares[index]
else:
raise NotImplementedError(
f"Don't know what to do with this content: {content}"
)
# Send the other players' shares of the local additive share
other_parties = pool.pool_handlers.keys()
for party in other_parties:
party_share = value_sharing.shares[party_indices[party]]
cls.asend(pool, party, {"content": content, "value": party_share})
@classmethod
def __add_received_shamir_shares(
cls, content: str, shares: Shares, index: int, shamir_scheme: Shamir
) -> ShamirShares:
"""
Fetch shares labeled with content and add them to
own_share_value.
:param content: string identifying the number to be retrieved
:param shares: dictionary keeping track of shares for different parties and numbers
:param index: index of this party
:param shamir_scheme: $t$-out-of-$n$ Shamir secret sharing
:return: sum of all the shares for the number identified by content
"""
shamir_shares = [
ShamirShares(shamir_scheme, {index: v})
for k, v in asdict(shares)[content]["shares"].items()
]
for i in range(1, len(shamir_shares)):
shamir_shares[0] += shamir_shares[i]
return shamir_shares[0]
@classmethod
def __int_add_received_shares(
cls,
content: str,
int_shamir_scheme: IntegerShamir,
shares: Shares,
index: int,
corruption_threshold: int,
) -> IntegerShares:
"""
Fetch shares labeled with content and add them to own_share_value.
:param content: string identifying the number to be retrieved
:param int_shamir_scheme: Shamir secret sharing scheme over the integers
:param shares: dictionary keeping track of shares for different parties and numbers
:param index: index of this party
:param corruption_threshold: number of parties that are allowed to be corrupted
:return: sum of the integer sharing of the number identified by content
"""
integer_shares = [
IntegerShares(
int_shamir_scheme,
{index: v},
corruption_threshold,
scaling=math.factorial(int_shamir_scheme.number_of_parties),
)
for k, v in asdict(shares)[content]["shares"].items()
]
for i in range(1, len(integer_shares)):
integer_shares[0] += integer_shares[i]
return integer_shares[0]
@classmethod
def __mul_received_v_and_check(cls, shares: Shares, modulus: int) -> bool:
""" "
Function to test whether a certain primality check holds
:param shares: dictionary keeping track of shares for a certain value
:param modulus: value of $N$
:return: true if the biprimality tests succeeds and false if it fails
"""
product = 1
for key, value in shares.v.shares.items():
if key != 1:
product *= value
value1 = shares.v.shares[1]
# The below test determines if N is "probably" the product of two primes (if the
# statement is True). Otherwise, N is definitely not the product of two primes.
return ((value1 % modulus) == (product % modulus)) or (
(value1 % modulus) == (-product % modulus)
)
@classmethod
async def gather_shares(
cls,
content: str,
pool: Pool,
shares: Shares,
party_indices: Dict[str, int],
) -> None:
r"""
Gather all shares with label content
:param content: string identifying a number
:param pool: network of involved parties
:param shares: dictionary keeping track of shares of different parties for certain numbers
:param party_indices: mapping from party names to indices
:raise NotImplementedError: In case the given content is not any of the possible values
for which we store shares ("p", "q", "n", "biprime", "lambda\_", "beta", "secret_key",
"partial_decryption").
"""
shares_from_other_parties = await cls.recv_all(pool)
for party, message in shares_from_other_parties:
msg_content = message["content"]
err_msg = f"received a share for {msg_content}, but expected {content}"
assert msg_content == content, err_msg
if content == "p":
shares.p.shares[party_indices[party]] = message["value"]
elif content == "q":
shares.q.shares[party_indices[party]] = message["value"]
elif content == "n":
shares.n.shares[party_indices[party]] = message["value"]
elif content == "biprime":
shares.biprime.shares[party_indices[party]] = message["value"]
elif content == "v":
shares.v.shares[party_indices[party]] = message["value"]
elif content == "lambda_":
shares.lambda_.shares[party_indices[party]] = message["value"]
elif content == "beta":
shares.beta.shares[party_indices[party]] = message["value"]
elif content == "secret_key":
shares.secret_key.shares[party_indices[party]] = message["value"]
elif content == "partial_decryption":
shares.partial_decryption.shares[party_indices[party]] = message[
"value"
]
else:
raise NotImplementedError(
f"Don't know what to do with this content: {content}"
)
@classmethod
async def __biprime_test(
cls,
correct_param_biprime: int,
shares: Shares,
modulus: int,
pool: Pool,
index: int,
party_indices: Dict[str, int],
) -> bool:
"""
Function to test for biprimality of $N$
:param correct_param_biprime: correctness parameter that affects the certainty that the
generated modulus is biprime
:param shares: dictionary keeping track of shares for different parties for certain numbers
:param modulus: the modulus $N$
:param pool: network of involved parties
:param index: index of this party
:param party_indices: mapping from party name to indices
:return: true if the test succeeds and false if it fails
"""
counter = 0
while counter < correct_param_biprime:
test_value = secrets.randbelow(modulus)
cls.broadcast({"content": "biprime", "value": test_value}, pool)
shares.biprime.shares[index] = test_value
await cls.gather_shares("biprime", pool, shares, party_indices)
test_value = 0
for value in shares.biprime.shares.values():
test_value += value
test_value = test_value % modulus
if sympy.jacobi_symbol(test_value, modulus) == 1:
if index == 1:
v_value = int(
pow_mod(
test_value,
(modulus - shares.p.additive - shares.q.additive + 1) // 4,
modulus,
)
)
else:
v_value = int(
pow_mod(
test_value,
(shares.p.additive + shares.q.additive) // 4,
modulus,
)
)
shares.v.shares[index] = v_value
cls.broadcast({"content": "v", "value": v_value}, pool)
await cls.gather_shares("v", pool, shares, party_indices)
if cls.__mul_received_v_and_check(shares, modulus):
counter += 1
else:
return False
return True
@classmethod
def __generate_lambda_addit_share(
cls,
index: int,
modulus: int,
shares: Shares,
) -> int:
""" "
Function to generate an additive share of lambda
:param index: index of this party
:param modulus: modulus $N$
:param shares: dictionary keeping track of shares for different parties for certain numbers
:return: additive share of lambda
"""
if index == 1:
return modulus - shares.p.additive - shares.q.additive + 1
# else
return 0 - shares.p.additive - shares.q.additive
@classmethod
def __small_prime_divisors_test(cls, prime_list: List[int], modulus: int) -> bool:
"""
Function to test $N$ for small prime divisors
:param prime_list: list of prime numbers
:param modulus: modulus $N$
:return: true if $N$ has small divisors and false otherwise
"""
for prime in prime_list:
if modulus % prime == 0:
return True
return False
@classmethod
async def compute_modulus(
cls,
shares: Shares,
zero_share: ShamirShares,
index: int,
pool: Pool,
prime_list: List[int],
party_indices: Dict[str, int],
prime_length: int,
shamir_scheme: Shamir,
correct_param_biprime: int,
) -> int:
r"""
Function that starts a protocol to generate candidates for $p$ and $q$
the multiplication of the two is then checked for biprimality to ensure it is a valid
modulus. This is run until it succeeds.
:param shares: dictionary that keeps track of shares for parties for certain numbers
:param zero_share: A secret sharing of $0$ in a $2t$-out-of-$n$ shamir secret sharing scheme
:param index: index of this party
:param pool: network of involved parties
:param prime_list: list of prime numbers
:param party_indices: mapping from party names to indices
:param prime_length: desired bit length of $p$ and $q$
:param shamir_scheme: $t$-out-of-$n$ Shamir secret sharing scheme
:param correct_param_biprime: correctness parameter that affects the certainty that the
generated $N$ is a product of two primes
:return: modulus $N$
"""
sp_err_counter = 0
bip_err_counter = 0
bip = False
logging.info("Computing N")
modulus = 0
counter = 0
while not bip:
counter += 1
shares.biprime = Shares.Biprime()
shares.v = Shares.V()
# secreting sharings of p and q
p_sharing, q_sharing = await cls.generate_pq(
shares, pool, index, prime_length, party_indices, shamir_scheme
)
# secret sharing of the modulus
modulus_sharing = p_sharing * q_sharing
# Add 0-share to fix distribution
modulus_sharing += zero_share
shares.n.shares[index] = modulus_sharing.shares[index]
cls.broadcast(
{"content": "n", "value": modulus_sharing.shares[index]}, pool
)
await cls.gather_shares("n", pool, shares, party_indices)
modulus_sharing.shares = shares.n.shares
modulus = modulus_sharing.reconstruct_secret()
if not cls.__small_prime_divisors_test(prime_list, modulus):
bip = await cls.__biprime_test(
correct_param_biprime, shares, modulus, pool, index, party_indices
)
if not bip:
bip_err_counter += 1
else:
sp_err_counter += 1
logging.info(f"N = {modulus}")
logging.info(f"Failures counter: sp={sp_err_counter} biprime={bip_err_counter}")
return modulus
@classmethod
async def generate_secret_key(
cls,
stat_sec_shamir: int,
number_of_players: int,
corruption_threshold: int,
shares: Shares,
index: int,
zero_share: ShamirShares,
pool: Pool,
prime_list: List[int],
prime_length: int,
party_indices: Dict[str, int],
correct_param_biprime: int,
shamir_scheme: Shamir,
) -> PaillierSharedKey:
"""
Functions that generates the modulus and sets up the sharing of the private key
:param stat_sec_shamir: security parameter for the Shamir secret sharing over the integers
:param number_of_players: total number of participants in this session (including self)
:param corruption_threshold: Maximum number of allowed corruptions
:param shares: dictionary that keeps track of shares for parties for certain numbers
:param index: index of this party
:param zero_share: A secret sharing of $0$ in a $2t$-out-of-$n$ shamir secret sharing scheme
:param pool: network of involved parties
:param prime_list: list of prime numbers
:param prime_length: desired bit length of $p$ and $q$
:param party_indices: mapping from party names to indices
:param correct_param_biprime: correctness parameter that affects the certainty that the
generated $N$ is a product of two primes
:param shamir_scheme: $t$-out-of-$n$ Shamir secret sharing scheme
:return: shared secret key
"""
modulus = await cls.compute_modulus(
shares,
zero_share,
index,
pool,
prime_list,
party_indices,
prime_length,
shamir_scheme,
correct_param_biprime,
)
int_shamir_scheme = IntegerShamir(
stat_sec_shamir,
modulus,
number_of_players,
corruption_threshold,
)
shares.lambda_.additive = cls.__generate_lambda_addit_share(
index, modulus, shares
)
cls.int_shamir_share_and_send(
"lambda_", shares, int_shamir_scheme, index, pool, party_indices
)
await cls.gather_shares("lambda_", pool, shares, party_indices)
lambda_ = cls.__int_add_received_shares(
"lambda_", int_shamir_scheme, shares, index, corruption_threshold
)
theta = 0
secret_key_sharing: IntegerShares
while True:
shares.secret_key = Shares.SecretKey()
shares.beta = Shares.Beta()
shares.beta.additive = secrets.randbelow(modulus)
cls.int_shamir_share_and_send(
"beta", shares, int_shamir_scheme, index, pool, party_indices
)
await cls.gather_shares("beta", pool, shares, party_indices)
beta = cls.__int_add_received_shares(
"beta", int_shamir_scheme, shares, index, corruption_threshold
)
secret_key_sharing = lambda_ * beta
temp_secret_key = copy.deepcopy(secret_key_sharing)
temp_secret_key.shares = {
key: (value % modulus) for key, value in temp_secret_key.shares.items()
}
shares.secret_key.shares = temp_secret_key.shares
cls.broadcast(
{"content": "secret_key", "value": temp_secret_key.shares[index]}, pool
)
await cls.gather_shares("secret_key", pool, shares, party_indices)
reconstructed_secret_key = temp_secret_key.reconstruct_secret(
modulus=modulus
)
theta = (
reconstructed_secret_key
* math.factorial(int_shamir_scheme.number_of_parties) ** 3
) % modulus
if math.gcd(theta, modulus) != 0:
break
secret_key = PaillierSharedKey(
n=modulus,
t=corruption_threshold,
player_id=index,
theta=theta,
share=secret_key_sharing,
)
return secret_key
class SerializedDistributedPaillier(Paillier.SerializedPaillier, TypedDict):
session_id: int
distributed: bool
index: int
def serialize(
self, **_kwargs: Any
) -> DistributedPaillier.SerializedDistributedPaillier:
r"""
Serialization function for Distributed Paillier schemes, which will be passed to
the communication module
:param \**_kwargs: optional extra keyword arguments
:return: Dictionary containing the serialization of this DistributedPaillier scheme.
"""
return {
"session_id": self.session_id,
"distributed": self.distributed,
"index": self.index,
"prec": self.precision,
"pubkey": self.public_key,
}
@overload
@staticmethod
def deserialize(
obj: DistributedPaillier.SerializedDistributedPaillier,
*,
origin: Optional[HTTPClient] = ...,
**kwargs: Any,
) -> "DistributedPaillier":
...
@overload
@staticmethod
def deserialize(
obj: Paillier.SerializedPaillier,
*,
origin: Optional[HTTPClient] = ...,
**kwargs: Any,
) -> "Paillier":
...
@staticmethod
def deserialize(
obj: Union[
DistributedPaillier.SerializedDistributedPaillier,
Paillier.SerializedPaillier,
],
*,
origin: Optional[HTTPClient] = None,
**kwargs: Any,
) -> Union["DistributedPaillier", "Paillier"]:
r"""
Deserialization function for Distributed Paillier schemes, which will be passed to
the communication module
:param obj: serialization of a distributed paillier scheme.
:param origin: HTTPClient representing where the message came from if applicable
:param \**kwargs: optional extra keyword arguments
:return: Deserialized DistributedPaillier scheme, local instance thereof, or a regular
Paillier scheme in case this party is not part of the distributed session.
"""
session_id = obj.get("session_id", None)
if isinstance(session_id, int):
if obj.get("distributed", False):
# The scheme should be stored in the local instances through the session ID
# If it is not, then this party was not part of the initial protocol
if session_id in DistributedPaillier._local_instances:
return DistributedPaillier._local_instances[session_id]
else:
# The scheme should be stored in the global instances through the session ID
# If it is not, then this party was not part of the initial protocol
index = obj.get("index", None)
if (
isinstance(index, int)
and session_id in DistributedPaillier._global_instances[index]
):
return DistributedPaillier._global_instances[index][session_id]
# This party is not part of the distributed session, so we parse it as a Paillier scheme
paillier_obj: Paillier.SerializedPaillier = {
"prec": obj["prec"],
"pubkey": obj["pubkey"],
}
return Paillier.deserialize(paillier_obj, origin=origin, **kwargs)
# endregion
# Load the serialization logic into the communication module
if "DistributedPaillier" not in Serialization.custom_deserialization_funcs:
Serialization.set_serialization_logic(DistributedPaillier, check_annotations=False)
```
#### File: distributed_keygen/test/test_distributed_keygen.py
```python
import asyncio
import math
from typing import AsyncGenerator, Tuple, Union, cast
import pytest
from _pytest.fixtures import FixtureRequest
from tno.mpc.communication import Pool, Serialization
from tno.mpc.communication.test import event_loop # pylint: disable=unused-import
from tno.mpc.communication.test.pool_fixtures_http import ( # pylint: disable=unused-import
fixture_pool_http_3p,
fixture_pool_http_4p,
fixture_pool_http_5p,
)
from tno.mpc.protocols.distributed_keygen import DistributedPaillier
@pytest.fixture(
name="pool_http",
params=[3, 4, 5],
ids=["3-party", "4-party", "5-party"],
scope="module",
)
async def fixture_pool_http(
request: FixtureRequest,
pool_http_3p: AsyncGenerator[Tuple[Pool, ...], None],
pool_http_4p: AsyncGenerator[Tuple[Pool, ...], None],
pool_http_5p: AsyncGenerator[Tuple[Pool, ...], None],
) -> AsyncGenerator[Tuple[Pool, ...], None]:
"""
Creates a collection of 3, 4 and 5 communication pools
:param pool_http_3p: Pool of 3 HTTP clients.
:param pool_http_4p: Pool of 4 HTTP clients.
:param pool_http_5p: Pool of 5 HTTP clients.
:param request: A fixture request used to indirectly parametrize.
:raise NotImplementedError: raised when based on the given param, no fixture can be created
:return: a collection of communication pools
"""
if request.param == 3: # type: ignore[attr-defined]
return pool_http_3p
if request.param == 4: # type: ignore[attr-defined]
return pool_http_4p
if request.param == 5: # type: ignore[attr-defined]
return pool_http_5p
raise NotImplementedError("This has not been implemented")
@pytest.fixture(
name="distributed_schemes",
params=list(range(2)),
ids=["corruption_threshold " + str(i) for i in range(2)],
scope="module",
)
@pytest.mark.asyncio
async def fixture_distributed_schemes(
pool_http: Tuple[Pool, ...],
request: FixtureRequest,
) -> Tuple[DistributedPaillier, ...]:
"""
Constructs schemes to use for distributed key generation.
:param pool_http: collection of communication pools
:param request: Fixture request
:return: a collection of schemes
"""
Serialization.custom_serialization_funcs.pop("DistributedPaillier")
Serialization.custom_deserialization_funcs.pop("DistributedPaillier")
Serialization.set_serialization_logic(DistributedPaillier, check_annotations=False)
corruption_threshold: int = request.param # type: ignore[attr-defined]
key_length = 64
prime_threshold = 200
correct_param_biprime = 20
stat_sec_shamir = 20
distributed_schemes = tuple(
await asyncio.gather(
*[
DistributedPaillier.from_security_parameter(
pool_http[i],
corruption_threshold,
key_length,
prime_threshold,
correct_param_biprime,
stat_sec_shamir,
distributed=False,
precision=8,
)
for i in range(len(pool_http))
]
)
)
return cast(
Tuple[DistributedPaillier, ...],
distributed_schemes,
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"plaintext", [1, 2, 3, -1, -2, -3, 1.5, 42.42424242, -1.5, -42.42424242]
)
async def test_distributed_paillier_with_communication(
distributed_schemes: Tuple[DistributedPaillier, ...],
plaintext: Union[float, int],
) -> None:
"""
Tests distributed encryption and decryption using communication
:param distributed_schemes: a collection of schemes
:param plaintext: plaintext to encrypt and decrypt
"""
enc = {0: distributed_schemes[0].encrypt(plaintext)}
for iplayer in range(1, len(distributed_schemes)):
player_name = "local" + str(iplayer)
await distributed_schemes[0].pool.send(player_name, enc[0])
enc[iplayer] = await distributed_schemes[iplayer].pool.recv("local0")
dec = await asyncio.gather(
*[
distributed_schemes[i].decrypt(enc[i])
for i in range(len(distributed_schemes))
]
)
assert all(d == plaintext for d in dec)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"plaintext", [1, 2, 3, -1, -2, -3, 1.5, 42.42424242, -1.5, -42.42424242]
)
async def test_distributed_paillier_serialization(
distributed_schemes: Tuple[DistributedPaillier, ...],
plaintext: Union[float, int],
) -> None:
"""
Tests serialization of the distributed Paillier.
:param distributed_schemes: a collection of schemes
:param plaintext: plaintext to encrypt
"""
enc = {0: distributed_schemes[0].encrypt(plaintext)}
for iplayer in range(1, len(distributed_schemes)):
player_name = "local" + str(iplayer)
await distributed_schemes[0].pool.send(player_name, enc[0])
await distributed_schemes[0].pool.send(player_name, distributed_schemes[0])
enc[iplayer] = await distributed_schemes[iplayer].pool.recv("local0")
d_scheme_recv = await distributed_schemes[iplayer].pool.recv("local0")
# check equality of received values
assert enc[0] == enc[iplayer]
assert d_scheme_recv == distributed_schemes[iplayer] == distributed_schemes[0]
@pytest.mark.asyncio
async def test_distributed_paillier_exception(pool_http: Tuple[Pool, ...]) -> None:
"""
Tests raising of exception when corruption threshold is set incorrectly.
:param pool_http: collection of communication pools
"""
max_corruption_threshold = math.ceil(len(pool_http) / 2) - 1
corruption_threshold = max_corruption_threshold + 1
key_length = 64
prime_threshold = 200
correct_param_biprime = 20
stat_sec_shamir = 20
with pytest.raises(ValueError):
_distributed_schemes = await asyncio.gather(
*[
DistributedPaillier.from_security_parameter(
pool_http[i],
corruption_threshold,
key_length,
prime_threshold,
correct_param_biprime,
stat_sec_shamir,
distributed=False,
)
for i in range(len(pool_http))
]
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"plaintext", [1, 2, 3, -1, -2, -3, 1.5, 42.42424242, -1.5, -42.42424242]
)
async def test_distributed_paillier_encrypt_decrypt(
distributed_schemes: Tuple[DistributedPaillier, ...],
plaintext: Union[float, int],
) -> None:
"""
Tests distributed encryption and decryption
:param distributed_schemes: a collection of schemes
:param plaintext: plaintext to encrypt and decrypt
"""
enc = distributed_schemes[0].encrypt(plaintext)
dec = await asyncio.gather(
*[distributed_schemes[i].decrypt(enc) for i in range(len(distributed_schemes))]
)
assert all(d == plaintext for d in dec)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"receivers_id,result_indices",
[
(0, (0,)),
(1, (0, 1)),
],
)
async def test_distributed_paillier_encrypt_decrypt_receivers(
distributed_schemes: Tuple[DistributedPaillier, ...],
receivers_id: int,
result_indices: Tuple[int],
) -> None:
"""
Tests distributed decryption revealing the results to a subset of receivers only.
:param distributed_schemes: a collection of schemes
:param receivers_id: parties to reveal the decryptions to
:param result_indices: indices of the parties that should have received the decryptions
"""
if receivers_id == 0:
receiver0_list = [["local0"]] * len(distributed_schemes)
receiver0_list[0] = ["self"]
receivers = tuple(receiver0_list)
elif receivers_id == 1:
receivers01_list = [["local0", "local1"]] * len(distributed_schemes)
receivers01_list[0] = ["self", "local1"]
receivers01_list[1] = ["local0", "self"]
receivers = tuple(receivers01_list)
enc = distributed_schemes[0].encrypt(42)
dec = await asyncio.gather(
*[
distributed_schemes[i].decrypt(enc, receivers=receivers[i])
for i in range(len(distributed_schemes))
]
)
for i in range(len(distributed_schemes)):
if i in result_indices:
assert dec[i] == 42
else:
assert dec[i] is None
``` |
{
"source": "JimenezJC/cozy-exchange",
"score": 2
} |
#### File: apps/profiles/admin.py
```python
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# from .models import Profile
#
# class ProfileInline(admin.StackedInline):
# """
#
# """
# model = Profile
# can_delete = False
# verbose_name_plural = 'Profile'
# fk_name = 'user'
#
# class CustomUserAdmin(UserAdmin):
# inlines = (ProfileInline, )
#
# def get_inline_instances(self, request, obj=None):
# if not obj:
# return list()
# return super(CustomUserAdmin, self).get_inline_instances(request, obj)
#
#
# admin.site.unregister(User)
# admin.site.register(User, CustomUserAdmin)
# class UserStripeAdmin(admin.ModelAdmin):
# class Meta:
# model = UserStripe
#
# admin.site.register(UserStripe, UserStripeAdmin)
```
#### File: apps/store/models.py
```python
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
from .manager import CategoryManager, SubCatergoryManager, ItemManager, TransactionManager
from django.db.models.signals import post_save
import datetime
class Category(models.Model):
"""This is a model for a predetermined list of clothing categories.
List:
Jackets, shirts, sweaters, sweatshirts, pants, t-shirts, hats, accessories, skate, bike, and other
Attributes:
name: A string of the name of a category
slug: A slug to make our links more presentable on the web app
"""
name = models.CharField(max_length=50, db_index=True)
slug = models.SlugField(max_length=50, db_index=True, unique=True)
objects = CategoryManager()
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
verbose_name = 'category'
verbose_name_plural = 'categories'
def get_absolute_url(self):
return reverse('store:item_list_by_category', args=[self.slug])
def save(self, **kwargs):
"""
This function is an override of the save function of Category
so thaat we can automatically create a slug
Args:s
self: current instance of that object
"""
if not self.pk:
slug = self.name
slug = slug.lower()
slug = slug.replace(" ","-")
self.slug = slug
super(Category, self).save(**kwargs)
class SubCategory(models.Model):
"""
This is a model for a list of clothing sub-categories that are either
predetermind or user input.
Attributes:
name: A string of the name of a sub-category
parent: foreign key to the Category. the class SubCategory is a
child to category.
"""
name = models.CharField(max_length=16)
slug = models.SlugField(max_length = 16)
parent = models.ForeignKey(Category, related_name = 'subCats')
objects = SubCatergoryManager()
def __str__(self):
return self.name
def save(self, **kwargs):
"""
This function is an override of the save function so that the subCategory object
will be automiatcally updated everytime there is achange within the item
Args:
self: current instance of that object
"""
if not self.pk:
slug = self.name
slug = slug.lower()
slug = slug.replace(" ","-")
self.slug = slug
super(SubCategory, self).save(**kwargs)
class Meta:
ordering = ('name',)
verbose_name = 'subcategory'
verbose_name_plural = 'subcategories'
# Needs to be moved to profile
# class Brand(models.Model):
# name = models.CharField(max_length=32)
# description = models.TextField(blank=True)
# location = models.CharField(max_length = 50)
# owner = models.ForeignKey('profiles.Profile')
# email = models.EmailField(blank=True)
#
# def _str_(self):
# return self.name
class Item(models.Model):
"""
This is a model for items sold on the exchange.
Attributes:
name: A string of the name of the item
slug: A slug to make our links more readable
description: A string which should describe the item for the users
materla: A string which should identify the materials used to make the item
category: A foregin key to category to make items more organized
subCategory: A foregin key to subCatergory to make our items even more organized
avgSoldPrice: A number which will go through all items to achieve the avgSoldPrice
lowestCurrListing: A number which will represent the lowest current avaible item. A query will be used to find this
highestCurrListing: A number which will represent the highest current available item. A query will be used to find this
lowestSoldListing: A number which will represent the lowest price a item has been sold for this item.
highestSoldListing: A number which will represent the highest price a item has been sold for that item.
lastActive: A date and time which represent when the last time the item has been edited
available: A boolean which represents whether there are items avialble or not
created = The date and time field that the item was created
updated = the date and time field the item was last updated
stock: a integer that represents the amount of items that are availble for the user to buy
"""
name = models.CharField(max_length=200, db_index=True, unique = True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField()
seller = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# brand = models.ForeignKey(Brand)
description = models.TextField(blank=True)
#Change to location model
material = models.TextField(blank=True)
# related_name may not be needed. Research!!!
category = models.ForeignKey(Category, related_name='Item')
subCategory = models.ForeignKey(SubCategory)
location = models.CharField(max_length = 50)
price = models.DecimalField(
default=1.00,
validators=[MinValueValidator(1.0)],
decimal_places=2,
max_digits=10,
)
lastActive = models.DateTimeField(default = timezone.now)
visible = models.BooleanField(default = True)
stock = models.PositiveIntegerField(
default = 0
)
objects = ItemManager()
def save(self, **kwargs):
"""
This function is an override of the save function so that the item object
will be automiatcally updated everytime there is achange within the item
Args:
self: current instance of that object
"""
if not self.pk:
slug = self.name
slug = slug.lower()
slug = slug.replace(" ","-")
self.slug = slug
super(Item, self).save(**kwargs)
@property
def category_name(self):
return self.category.name
@property
def subCategory_name(self):
return self.subCategory.name
def __str__(self):
return self.name
class Meta:
ordering = ('-lastActive',)
index_together = (('id','slug'),)
verbose_name_plural = 'Items'
def get_absolute_url(self):
return reverse('store:item_detail', args=[self.id, self.slug])
def __unicode__(self):
return ('%d: %s' (self.id, self.name))
# Add a save to transactions
class Transaction(models.Model):
"""
This is a model for items on the exchange.
Attributes:
seller: A foriegn key to a user object that sold the items
buyer: A foreign key to a user object that is buying the items
amountExchanged: the amount of money exchanged between seller and
buyer
item = A foreign key to the item object to mark it as sold
deliveryAddress = a text field which contains the address of the
seller
receiveAddress = a text field which contains the address of the
buyer
ratingSeller = a decimal field which contains a rating for the
seller 1-5
ratingBuyer = a decimal field which contains a rating for the buyer
1-5
isValid = a boolean field which stores whether the transaction
will go through or cancel
"""
seller = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='Seller'
)
buyer = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='Buyer'
)
amountExchanged = models.DecimalField(
decimal_places=2,
max_digits=10
)
item = models.ForeignKey(Item, related_name = "Item")
deliveryAddress = models.TextField()
receiveAddress = models.TextField()
timeSold = models.DateTimeField(auto_now = True)
ratingSeller = models.DecimalField(
decimal_places = 1,
max_digits = 1
)
ratingBuyer = models.DecimalField(
decimal_places = 1,
max_digits = 1
)
isValid = models.BooleanField(default = True)
objects = TransactionManager()
```
#### File: apps/store/permissions.py
```python
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
message = 'You must be the owner of this object'
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.seller == request.user
class IsBuyerOrSeller(BasePermission):
message = 'You must either be the buyer or the seller of this listing'
def has_object_permission(self,request,view,obj):
if request.method in SAFE_METHODS:
return True
return (obj.seller == request.user) or obj.buyer == (request.user)
``` |
{
"source": "jimenezjose/fusion-engine-client",
"score": 2
} |
#### File: python/examples/message_decode.py
```python
from argparse import ArgumentParser
import os
import sys
root_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(root_dir)
from fusion_engine_client.messages.core import MessagePayload
from fusion_engine_client.parsers import FusionEngineDecoder
def print_message(header, contents):
if isinstance(contents, MessagePayload):
parts = str(contents).split('\n')
parts[0] += ' [sequence=%d, size=%d B]' % (header.sequence_number, header.get_message_size())
print('\n'.join(parts))
else:
print('Decoded %s message [sequence=%d, size=%d B]' %
(header.get_type_string(), header.sequence_number, header.get_message_size()))
if __name__ == "__main__":
parser = ArgumentParser(description="""\
Decode and print the contents of messages contained in a *.p1log file or other
binary file containing FusionEngine messages. The binary file may also contain
other types of data.
""")
parser.add_argument('file', type=str, help="The path to a binary file to be read.")
options = parser.parse_args()
f = open(options.file, 'rb')
decoder = FusionEngineDecoder()
while True:
# Read the next message header.
data = f.read(1024)
if len(data) == 0:
break
# Decode the incoming data and print the contents of any complete messages.
messages = decoder.on_data(data)
for (header, message) in messages:
print_message(header, message)
```
#### File: fusion_engine_client/analysis/file_index.py
```python
from typing import Union
from collections import namedtuple
import io
import os
import numpy as np
from ..messages import MessageHeader, MessageType, Timestamp
from ..parsers import FusionEngineDecoder
FileIndexEntry = namedtuple('Element', ['time', 'type', 'offset'])
class FileIndexIterator(object):
def __init__(self, np_iterator):
self.np_iterator = np_iterator
def __next__(self):
if self.np_iterator is None:
raise StopIteration()
else:
entry = next(self.np_iterator)
return FileIndexEntry(time=Timestamp(entry[0]), type=MessageType(entry[1]), offset=entry[2])
class FileIndex(object):
"""!
@brief An index of FusionEngine message entries within a `.p1log` file used to facilitate quick access.
This class reads a `.p1i` file from disk containing FusionEngine message index entries. Each index entry includes
the P1 time of the message (if applicable), the @ref MessageType, and the message offset within the file (in bytes).
A @ref FileIndex instance may be used to quickly locate entries within a specific time range, or entries for one or
more message types, without having to parse the variable-length messages in the `.p1log` file itself.
@section file_index_examples Usage Examples
@subsection file_index_iterate Iterate Over Elements
@ref FileIndex supports supports two methods for accessing individual FusionEngine message entries. You can iterate
over the @ref FileIndex class itself, accessing one @ref FileIndexEntry object at a time:
```py
for entry in file_index:
log_file.seek(entry.offset, io.SEEK_SET)
...
```
Alternatively, you can access any of the `time`, `type`, or `offset` arrays directly. Each of these members returns
a NumPy `ndarray` object listing the P1 times (in seconds), @ref MessageType values, or byte offsets respectively:
```.py
for offset in file_index.offset:
log_file.seek(offset, io.SEEK_SET)
...
```
@subsection file_index_type Find All Messages For A Specific Type
@ref FileIndex supports slicing by a single @ref MessageType:
```py
for entry in file_index[MessageType.POSE]:
log_file.seek(entry.offset, io.SEEK_SET)
...
```
or by a list containing one or more @ref MessageType values:
```py
for entry in file_index[(MessageType.POSE, MessageType.GNSS_INFO)]:
log_file.seek(entry.offset, io.SEEK_SET)
...
```
@subsection file_index_time Find All Messages For A Specific Time Range
One of the most common uses is to search for messages within a specific time range. @ref FileIndex supports slicing
by P1 time using `Timestamp` objects or `float` values:
```py
for entry in file_index[Timestamp(2.0):Timestamp(5.0)]:
log_file.seek(entry.offset, io.SEEK_SET)
...
for entry in file_index[2.0:5.0]:
log_file.seek(entry.offset, io.SEEK_SET)
...
```
As with all Python `slice()` operations, the start time is inclusive and the stop time is exclusive. Either time may
be omitted to slice from the beginning or to the end of the data:
```py
for entry in file_index[:5.0]:
log_file.seek(entry.offset, io.SEEK_SET)
...
for entry in file_index[2.0:]:
log_file.seek(entry.offset, io.SEEK_SET)
...
```
@subsection file_index_by_index Access Messages By Index
Similar to @ref file_index_time "slicing by time", if desired you can access elements within a specific range of
indices within the file. For example, the following returns elements 2 through 7 in the file:
```py
for entry in file_index[2:8]:
log_file.seek(entry.offset, io.SEEK_SET)
...
```
"""
# Note: To reduce the index file size, we've made the following limitations:
# - Fractional timestamp is floored so time 123.4 becomes 123. The data read should not assume that an entry's
# timestamp is its exact time
_RAW_DTYPE = np.dtype([('int', '<u4'), ('type', '<u2'), ('offset', '<u8')])
_DTYPE = np.dtype([('time', '<f8'), ('type', '<u2'), ('offset', '<u8')])
def __init__(self, index_path: str = None, data_path: str = None, delete_on_error=True,
data: Union[np.ndarray, list] = None, t0: Timestamp = None):
"""!
@brief Construct a new @ref FileIndex instance.
@param index_path The path to a `.p1i` index file to be loaded.
@param data_path The path to the `.p1log` data file corresponding with `index_path`, used to validate the loaded
index entries. If `None`, defaults to `filename.p1log` if it exists.
@param delete_on_error If `True`, delete the index file if an error is detected before raising an exception.
Otherwise, leave the file unchanged.
@param data A NumPy `ndarray` or Python `list` containing information about each FusionEngine message in the
`.p1log` file. For internal use.
@param t0 The P1 time corresponding with the start of the `.p1log` file, if known. For internal use.
"""
if data is None:
self._data = None
else:
if isinstance(data, list):
self._data = np.array(data, dtype=FileIndex._DTYPE)
elif data.dtype == FileIndex._DTYPE:
self._data = data
else:
raise ValueError('Unsupported array format.')
if index_path is not None:
if self._data is None:
self.load(index_path=index_path, data_path=data_path, delete_on_error=delete_on_error)
else:
raise ValueError('Cannot specify both path and data.')
if t0 is not None:
self.t0 = t0
elif self._data is None:
self.t0 = None
else:
idx = np.argmax(~np.isnan(self._data['time']))
if idx >= 0:
self.t0 = Timestamp(self._data['time'][idx])
else:
self.t0 = None
def load(self, index_path, data_path=None, delete_on_error=True):
"""!
@brief Load a `.p1i` index file from disk.
@param index_path The path to the file to be read.
@param data_path The path to the `.p1log` data file corresponding with `index_path`, used to validate the loaded
index entries. If `None`, defaults to `filename.p1log` if it exists.
@param delete_on_error If `True`, delete the index file if an error is detected before raising an exception.
Otherwise, leave the file unchanged.
"""
if os.path.exists(index_path):
raw_data = np.fromfile(index_path, dtype=FileIndex._RAW_DTYPE)
self._data = FileIndex._from_raw(raw_data)
else:
raise FileNotFoundError("Index file '%s' does not exist." % index_path)
# If a .p1log data file exists for this index file, check that the data file size is consistent with the index.
# If the index doesn't cover the full binary file, the user might have interrupted the read when it was being
# generated, or they may have overwritten the .p1log file.
if data_path is None:
data_path = os.path.splitext(index_path)[0] + '.p1log'
if not os.path.exists(data_path):
# If the user didn't explicitly set data_path and the default file doesn't exist, it is not considered
# an error.
return
elif not os.path.exists(data_path):
raise ValueError("Specified data file '%s' not found." % data_path)
with open(data_path, 'rb') as data_file:
# Compute the data file size.
data_file.seek(0, io.SEEK_END)
data_file_size = data_file.tell()
data_file.seek(0, 0)
# Check for empty files.
if data_file_size == 0 and len(self) != 0:
if delete_on_error:
os.remove(index_path)
raise ValueError("Data file empty but index populated. [%d elements]" % len(self))
elif data_file_size != 0 and len(self) == 0:
if delete_on_error:
os.remove(index_path)
raise ValueError("Index file empty but data file not 0 length. [size=%d B]" % data_file_size)
# See if the index is larger than the data file.
last_offset = self.offset[-1]
if last_offset > data_file_size - MessageHeader.calcsize():
if delete_on_error:
os.remove(index_path)
raise ValueError("Last index entry past end of file. [size=%d B, start_offset=%d B]" %
(data_file_size, last_offset))
# Read the header of the last entry to get its size, then use that to compute the expected data file size
# from the offset in the last index entry.
data_file.seek(last_offset, io.SEEK_SET)
buffer = data_file.read(MessageHeader.calcsize())
data_file.seek(0, io.SEEK_SET)
header = MessageHeader()
header.unpack(buffer=buffer, warn_on_unrecognized=False)
message_size_bytes = MessageHeader.calcsize() + header.payload_size_bytes
index_size = last_offset + message_size_bytes
if index_size != data_file_size:
if delete_on_error:
os.remove(index_path)
raise ValueError("Size expected by index file does not match binary file. [size=%d B, expected=%d B]" %
(data_file_size, index_size))
def save(self, index_path):
"""!
@brief Save the contents of this index as a `.p1i` file.
@param index_path The path to the file to be written.
"""
if self._data is not None:
raw_data = FileIndex._to_raw(self._data)
if os.path.exists(index_path):
os.remove(index_path)
raw_data.tofile(index_path)
def __len__(self):
if self._data is None:
return 0
else:
return len(self._data['time'])
def __getattr__(self, key):
if key == 'time':
return self._data['time'] if self._data is not None else None
elif key == 'type':
return self._data['type'] if self._data is not None else None
elif key == 'offset':
return self._data['offset'] if self._data is not None else None
else:
raise AttributeError
def __getitem__(self, key):
# No data available.
if self._data is None:
return FileIndex()
# Key is a string (e.g., index['type']), defer to getattr() (e.g., index.type).
elif isinstance(key, str):
return getattr(self, key)
# Return entries for a specific message type.
elif isinstance(key, MessageType):
idx = self._data['type'] == key
return FileIndex(data=self._data[idx], t0=self.t0)
# Return entries for a list of message types.
elif isinstance(key, (set, list, tuple)) and len(key) > 0 and isinstance(key[0], MessageType):
idx = np.isin(self._data['type'], key)
return FileIndex(data=self._data[idx], t0=self.t0)
# Return a single element by index.
elif isinstance(key, int):
return FileIndex(data=self._data[key:(key + 1)], t0=self.t0)
# Key is a slice in time. Return a subset of the data.
elif isinstance(key, slice) and (isinstance(key.start, (Timestamp, float)) or
isinstance(key.stop, (Timestamp, float))):
# Time is continuous, so step sizes are not supported.
if key.step is not None:
raise ValueError('Step size not supported for time ranges.')
else:
start_idx = np.argmax(self._data['time'] >= key.start) if key.start is not None else 0
end_idx = np.argmax(self._data['time'] >= key.stop) if key.stop is not None else len(self._data)
return FileIndex(data=self._data[start_idx:end_idx], t0=self.t0)
# Key is an index slice or a list of individual element indices. Return a subset of the data.
else:
if isinstance(key, (set, list, tuple)):
key = np.array(key)
return FileIndex(data=self._data[key], t0=self.t0)
def __iter__(self):
if self._data is None:
return FileIndexIterator(None)
else:
return FileIndexIterator(iter(self._data))
@classmethod
def get_path(cls, data_path):
"""!
@brief Get the `.p1i` index file path corresponding with a FusionEngine `.p1log` file.
@param data_path The path to the `.p1log` file.
@return The corresponding `.p1i` file path.
"""
return os.path.splitext(data_path)[0] + '.p1i'
@classmethod
def _from_raw(cls, raw_data):
idx = raw_data['int'] == Timestamp._INVALID
data = raw_data.astype(dtype=cls._DTYPE)
data['time'][idx] = np.nan
return data
@classmethod
def _to_raw(cls, data):
time_sec = data['time']
idx = np.isnan(time_sec)
raw_data = data.astype(dtype=cls._RAW_DTYPE)
raw_data['int'][idx] = Timestamp._INVALID
return raw_data
class FileIndexBuilder(object):
"""!
@brief Helper class for constructing a @ref FileIndex.
This class can be used to construct a @ref FileIndex and a corresponding `.p1i` file when reading a `.p1log` file.
"""
def __init__(self):
self.raw_data = []
def from_file(self, data_path: str):
"""!
@brief Construct a @ref FileIndex from an existing `.p1log` file.
@param data_path The path to the `.p1log` file.
@return The generated @ref FileIndex instance.
"""
decoder = FusionEngineDecoder(return_offset=True)
with open(data_path, 'rb') as f:
# Read a chunk of data and process all messages found in it.
data = f.read(65536)
messages = decoder.on_data(data)
for (header, message, offset_bytes) in messages:
p1_time = message.__dict__.get('p1_time', None)
self.append(message_type=header.message_type, offset_bytes=offset_bytes, p1_time=p1_time)
return self.to_index()
def append(self, message_type: MessageType, offset_bytes: int, p1_time: Timestamp = None):
"""!
@brief Add an entry to the index data being accumulated.
@param message_type The type of the FusionEngine message.
@param offset_bytes The offset of the message within the `.p1log` file (in bytes).
@param p1_time The P1 time of the message, or `None` if the message does not have P1 time.
"""
if p1_time is None:
time_sec = np.nan
else:
time_sec = float(p1_time)
self.raw_data.append((time_sec, int(message_type), offset_bytes))
def save(self, index_path):
"""!
@brief Save the contents of the generated index as a `.p1i` file.
@param index_path The path to the file to be written.
"""
index = self.to_index()
index.save(index_path)
return index
def to_index(self):
"""!
@brief Construct a @ref FileIndex from the current set of data.
@return The generated @ref FileIndex instance.
"""
return FileIndex(data=self.raw_data)
def __len__(self):
return len(self.raw_data)
```
#### File: fusion_engine_client/utils/construct_utils.py
```python
from construct import Adapter, Enum
class NamedTupleAdapter(Adapter):
"""!
@brief Adapter for automatically converting between construct streams and
NamedTuples with corresponding fields.
Usage Example:
```{.py}
class VersionTuple(NamedTuple):
major: int
minor: int
VersionRawConstruct = Struct(
"major" / Int8ul,
"minor" / Int16ul,
)
VersionConstruct = NamedTupleAdapter(VersionTuple, VersionRawConstruct)
UserConfigConstruct = Struct(
"version" / VersionConstruct,
"thing2" / Int32ul,
)
UserConfigConstruct.build({'version': VersionTuple(2, 3), 'thing2': 4})
```
"""
def __init__(self, tuple_cls, *args):
"""!
@brief Create an adapter for (de)serializing NamedTuples.
@param tuple_cls The NamedTuple to adapt.
"""
super().__init__(*args)
self.tuple_cls = tuple_cls
def _decode(self, obj, context, path):
# skip _io member
return self.tuple_cls(*list(obj.values())[1:])
def _encode(self, obj, context, path):
return obj._asdict()
class ClassAdapter(Adapter):
"""!
@brief Adapter for automatically converting between construct streams and
a class with corresponding fields.
Usage Example:
```{.py}
class VersionClass:
def __init__(self, major=0, minor=0):
self.major = major
self.minor = minor
VersionRawConstruct = Struct(
"major" / Int8ul,
"minor" / Int16ul,
)
VersionConstruct = ClassAdapter(VersionClass, VersionRawConstruct)
UserConfigConstruct = Struct(
"version" / VersionConstruct,
"thing2" / Int32ul,
)
UserConfigConstruct.build({'version': VersionClass(2, 3), 'thing2': 4})
```
"""
def __init__(self, cls, *args):
"""!
@brief Create an adapter for (de)serializing a class.
@param cls The class to adapt.
"""
super().__init__(*args)
self.cls = cls
def _decode(self, obj, context, path):
val = self.cls()
val.__dict__.update(obj)
return val
def _encode(self, obj, context, path):
return obj.__dict__
class EnumAdapter(Adapter):
"""!
@brief Adapter for automatically converting between construct Enum and
python Enums.
Usage Example:
```{.py}
class ConfigType(IntEnum):
FOO = 0
BAR = 1
ConfigConstruct = EnumAdapter(ConfigType, Enum(Int32ul, ConfigType))
UserConfigConstruct = Struct(
"config_type" / ConfigConstruct,
)
data = UserConfigConstruct.build({'config_type': ConfigType.ACTIVE})
assert ConfigType.ACTIVE == UserConfigConstruct.parse(data).config_type
```
"""
def __init__(self, enum_cls, *args):
"""!
@brief Create an adapter for (de)serializing Enums.
@param enum_cls The Enum to adapt.
"""
super().__init__(*args)
self.enum_cls = enum_cls
def _decode(self, obj, context, path):
return self.enum_cls(int(obj))
def _encode(self, obj, context, path):
return obj
def AutoEnum(construct_cls, enum_cls):
"""!
@brief Wrapper for @ref EnumAdapter to make its arguments simpler.
Usage Example:
```{.py}
class ConfigType(IntEnum):
FOO = 0
BAR = 1
UserConfigConstruct = Struct(
"config_type" / AutoEnum(Int32ul, ConfigType),
)
data = UserConfigConstruct.build({'config_type': ConfigType.ACTIVE})
assert ConfigType.ACTIVE == UserConfigConstruct.parse(data).config_type
```
"""
return EnumAdapter(enum_cls, Enum(construct_cls, enum_cls))
```
#### File: fusion_engine_client/utils/trace.py
```python
import logging
import sys
__all__ = []
# Define Logger TRACE level and associated trace() function if it doesn't exist.
if not hasattr(logging, 'TRACE'):
logging.TRACE = logging.DEBUG - 1
if sys.version_info.major == 2:
logging._levelNames['TRACE'] = logging.TRACE
logging._levelNames[logging.TRACE] = 'TRACE'
else:
logging._nameToLevel['TRACE'] = logging.TRACE
logging._levelToName[logging.TRACE] = 'TRACE'
def trace(self, msg, *args, **kwargs):
self.log(logging.TRACE, msg, *args, **kwargs)
logging.Logger.trace = trace
```
#### File: python/tests/test_config.py
```python
import pytest
from fusion_engine_client.messages.configuration import ConfigurationSource, DeviceCourseOrientationConfig, InterfaceID, TransportType
from fusion_engine_client.messages import (SetConfigMessage,
Uart1BaudConfig,
ConfigType,
Direction,
ConfigResponseMessage,
GnssLeverArmConfig,
InvalidConfig,
OutputInterfaceConfigResponseMessage,
OutputInterfaceConfig)
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('point_one').setLevel(logging.DEBUG)
def test_set_config():
BASE_SIZE = 8
set_msg = SetConfigMessage(DeviceCourseOrientationConfig(Direction.BACKWARD, Direction.DOWN))
assert len(set_msg.pack()) == BASE_SIZE + 4
set_msg = SetConfigMessage(GnssLeverArmConfig(1, 2, 3))
assert len(set_msg.pack()) == BASE_SIZE + 12
set_msg = SetConfigMessage()
set_msg.config_object = Uart1BaudConfig(9600)
uart_data = set_msg.pack()
assert len(uart_data) == BASE_SIZE + 4
set_msg = SetConfigMessage()
set_msg.unpack(uart_data)
assert isinstance(set_msg.config_object, Uart1BaudConfig)
assert set_msg.config_object.GetType() == ConfigType.UART1_BAUD
assert set_msg.config_object.value == 9600
def test_bad_set_config():
BASE_SIZE = 8
set_msg = SetConfigMessage()
with pytest.raises(TypeError):
set_msg.pack()
set_msg = SetConfigMessage()
set_msg.config_object = "Dummy"
with pytest.raises(TypeError):
set_msg.pack()
set_msg = SetConfigMessage()
set_msg.config_object = InvalidConfig()
uart_data = set_msg.pack()
assert len(uart_data) == BASE_SIZE
set_msg = SetConfigMessage()
set_msg.unpack(uart_data)
assert isinstance(set_msg.config_object, InvalidConfig)
assert set_msg.config_object.GetType() == ConfigType.INVALID
def test_config_data():
BASE_SIZE = 12
data_msg = ConfigResponseMessage()
data_msg.config_object = GnssLeverArmConfig(1, 2, 3)
assert len(data_msg.pack()) == BASE_SIZE + 12
data_msg = ConfigResponseMessage()
data_msg.config_object = Uart1BaudConfig(9600)
data_msg.config_source = ConfigurationSource.SAVED
uart_data = data_msg.pack()
assert len(uart_data) == BASE_SIZE + 4
data_msg = ConfigResponseMessage()
data_msg.unpack(uart_data)
assert isinstance(data_msg.config_object, Uart1BaudConfig)
assert data_msg.config_object.GetType() == ConfigType.UART1_BAUD
assert data_msg.config_object.value == 9600
assert data_msg.config_source == ConfigurationSource.SAVED
def test_output_interface_data():
data_msg = OutputInterfaceConfigResponseMessage()
data_msg.output_interface_data = [
OutputInterfaceConfig(InterfaceID(TransportType.SERIAL, 0), [1, 2]),
]
packed_data = data_msg.pack()
assert len(data_msg.pack()) == 4 + 4 + 4 + 2
data_msg = OutputInterfaceConfigResponseMessage()
data_msg.output_interface_data = [
OutputInterfaceConfig(InterfaceID(TransportType.SERIAL, 0), [1, 2]),
OutputInterfaceConfig(InterfaceID(TransportType.SERIAL, 1), [1])
]
data_msg.config_source = ConfigurationSource.SAVED
packed_data = data_msg.pack()
assert len(data_msg.pack()) == 4 + (4 + 4 + 2) + (4 + 4 + 1)
data_msg = OutputInterfaceConfigResponseMessage()
data_msg.unpack(packed_data)
assert data_msg.config_source == ConfigurationSource.SAVED
assert len(data_msg.output_interface_data) == 2
assert data_msg.output_interface_data[0].output_interface == InterfaceID(TransportType.SERIAL, 0)
assert data_msg.output_interface_data[0].stream_indices == [1, 2]
assert data_msg.output_interface_data[1].output_interface == InterfaceID(TransportType.SERIAL, 1)
assert data_msg.output_interface_data[1].stream_indices == [1]
```
#### File: python/tests/test_file_index.py
```python
import os
import numpy as np
import pytest
from fusion_engine_client.analysis.file_index import FileIndex, FileIndexBuilder
from fusion_engine_client.messages import MessageType, Timestamp, message_type_to_class
from fusion_engine_client.parsers import FusionEngineEncoder
RAW_DATA = [
(None, MessageType.VERSION_INFO, 0),
(Timestamp(1.0), MessageType.POSE, 10),
(Timestamp(2.0), MessageType.POSE, 20),
(Timestamp(2.0), MessageType.GNSS_INFO, 30),
(None, MessageType.VERSION_INFO, 40),
(Timestamp(3.0), MessageType.POSE, 50),
(Timestamp(4.0), MessageType.POSE, 60),
]
def _test_time(time, raw_data):
raw_time = [e[0] for e in raw_data]
raw_is_none = [e is None for e in raw_time]
idx = np.logical_or(time == raw_time, np.logical_and(np.isnan(time), raw_is_none))
return idx.all()
def test_index():
index = FileIndex(data=RAW_DATA)
assert len(index) == len(RAW_DATA)
raw = [e for e in RAW_DATA if e[1] == MessageType.POSE]
idx = index.type == MessageType.POSE
assert np.sum(idx) == len(raw)
assert _test_time(index.time[idx], raw)
assert (index.offset[idx] == [e[2] for e in raw]).all()
raw = [e for e in RAW_DATA if e[1] == MessageType.VERSION_INFO]
idx = index.type == MessageType.VERSION_INFO
assert _test_time(index.time[idx], raw)
assert (index.offset[idx] == [e[2] for e in raw]).all()
def test_iterator():
index = FileIndex(data=RAW_DATA)
for i, entry in enumerate(index):
assert entry.type == RAW_DATA[i][1]
def test_type_slice():
index = FileIndex(data=RAW_DATA)
pose_index = index[MessageType.POSE]
raw = [e for e in RAW_DATA if e[1] == MessageType.POSE]
assert len(pose_index) == len(raw)
assert (pose_index.offset == [e[2] for e in raw]).all()
pose_index = index[(MessageType.POSE, MessageType.GNSS_INFO)]
raw = [e for e in RAW_DATA if e[1] == MessageType.POSE or e[1] == MessageType.GNSS_INFO]
assert len(pose_index) == len(raw)
assert (pose_index.offset == [e[2] for e in raw]).all()
def test_index_slice():
index = FileIndex(data=RAW_DATA)
# Access a single element.
sliced_index = index[3]
raw = [RAW_DATA[3]]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access to the end.
sliced_index = index[3:]
raw = RAW_DATA[3:]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access from the beginning.
sliced_index = index[:3]
raw = RAW_DATA[:3]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access a range.
sliced_index = index[2:4]
raw = RAW_DATA[2:4]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access individual indices.
sliced_index = index[(2, 3, 5)]
raw = [RAW_DATA[i] for i in (2, 3, 5)]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
def test_time_slice():
def _lower_bound(time):
return next(i for i, e in enumerate(RAW_DATA) if (e[0] is not None and e[0] >= time))
index = FileIndex(data=RAW_DATA)
# Access to the end.
sliced_index = index[2.0:]
raw = RAW_DATA[_lower_bound(2.0):]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access from the beginning.
sliced_index = index[:3.0]
raw = RAW_DATA[:_lower_bound(3.0)]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access a range.
sliced_index = index[2.0:3.0]
raw = RAW_DATA[_lower_bound(2.0):_lower_bound(3.0)]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
# Access by Timestamp.
sliced_index = index[Timestamp(2.0):Timestamp(3.0)]
raw = RAW_DATA[_lower_bound(2.0):_lower_bound(3.0)]
assert _test_time(sliced_index.time, raw)
assert (sliced_index.offset == [e[2] for e in raw]).all()
def test_empty_index():
index = FileIndex()
assert len(index) == 0
assert index.time is None
def test_builder(tmpdir):
builder = FileIndexBuilder()
for entry in RAW_DATA:
builder.append(p1_time=entry[0], message_type=entry[1], offset_bytes=entry[2])
assert len(builder) == len(RAW_DATA)
index = builder.to_index()
assert len(index) == len(RAW_DATA)
index_path = tmpdir.join('index.p1i')
index.save(index_path)
assert os.path.exists(index_path)
assert os.path.getsize(index_path) > 0
@pytest.fixture
def data_path(tmpdir):
prefix = tmpdir.join('my_data')
# Construct an binary data file and a corresponding index.
data_path = prefix + '.p1log'
index_path = prefix + '.p1i'
builder = FileIndexBuilder()
encoder = FusionEngineEncoder()
with open(data_path, 'wb') as f:
for entry in RAW_DATA:
builder.append(p1_time=entry[0], message_type=entry[1], offset_bytes=f.tell())
cls = message_type_to_class[entry[1]]
message = cls()
if entry[0] is not None and hasattr(message, 'p1_time'):
message.p1_time = entry[0]
f.write(encoder.encode_message(message))
builder.save(index_path)
return data_path
def test_validate_good(data_path):
index_path = FileIndex.get_path(data_path)
index = FileIndex(index_path=index_path, data_path=data_path)
assert len(index) == len(RAW_DATA)
def test_validate_index_empty(data_path):
index_path = FileIndex.get_path(data_path)
# Clear the index file.
with open(index_path, 'wb'):
pass
with pytest.raises(ValueError):
index = FileIndex(index_path=index_path, data_path=data_path)
def test_validate_data_file_empty(data_path):
index_path = FileIndex.get_path(data_path)
# Clear the data file.
with open(data_path, 'wb'):
pass
with pytest.raises(ValueError):
index = FileIndex(index_path=index_path, data_path=data_path)
def test_validate_index_too_small(data_path):
index_path = FileIndex.get_path(data_path)
# Strip one entry from the index file.
file_size = os.path.getsize(index_path)
with open(index_path, 'wb') as f:
f.truncate(file_size - FileIndex._RAW_DTYPE.itemsize)
with pytest.raises(ValueError):
index = FileIndex(index_path=index_path, data_path=data_path)
def test_validate_data_too_small(data_path):
index_path = FileIndex.get_path(data_path)
# Strip one entry from the index file.
file_size = os.path.getsize(data_path)
with open(data_path, 'wb') as f:
f.truncate(file_size - 10)
with pytest.raises(ValueError):
index = FileIndex(index_path=index_path, data_path=data_path)
def test_validate_data_too_large(data_path):
index_path = FileIndex.get_path(data_path)
# Strip one entry from the index file.
file_size = os.path.getsize(data_path)
with open(data_path, 'ab') as f:
f.write(b'abcd')
with pytest.raises(ValueError):
index = FileIndex(index_path=index_path, data_path=data_path)
```
#### File: python/tests/test_file_reader.py
```python
import numpy as np
from fusion_engine_client.analysis.file_reader import FileReader, MessageData, TimeAlignmentMode
from fusion_engine_client.messages import *
def setup():
data = {
PoseMessage.MESSAGE_TYPE: MessageData(PoseMessage.MESSAGE_TYPE, None),
PoseAuxMessage.MESSAGE_TYPE: MessageData(PoseAuxMessage.MESSAGE_TYPE, None),
GNSSInfoMessage.MESSAGE_TYPE: MessageData(GNSSInfoMessage.MESSAGE_TYPE, None),
}
message = PoseMessage()
message.p1_time = Timestamp(1.0)
message.velocity_body_mps = np.array([1.0, 2.0, 3.0])
data[PoseMessage.MESSAGE_TYPE].messages.append(message)
message = PoseMessage()
message.p1_time = Timestamp(2.0)
message.velocity_body_mps = np.array([4.0, 5.0, 6.0])
data[PoseMessage.MESSAGE_TYPE].messages.append(message)
message = PoseAuxMessage()
message.p1_time = Timestamp(2.0)
message.velocity_enu_mps = np.array([14.0, 15.0, 16.0])
data[PoseAuxMessage.MESSAGE_TYPE].messages.append(message)
message = PoseAuxMessage()
message.p1_time = Timestamp(3.0)
message.velocity_enu_mps = np.array([17.0, 18.0, 19.0])
data[PoseAuxMessage.MESSAGE_TYPE].messages.append(message)
message = GNSSInfoMessage()
message.p1_time = Timestamp(2.0)
message.gdop = 5.0
data[GNSSInfoMessage.MESSAGE_TYPE].messages.append(message)
message = GNSSInfoMessage()
message.p1_time = Timestamp(3.0)
message.gdop = 6.0
data[GNSSInfoMessage.MESSAGE_TYPE].messages.append(message)
return data
def test_time_align_drop():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.DROP)
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 1
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
def test_time_align_insert():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.INSERT)
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 3
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[PoseMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[PoseMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert data[PoseMessage.MESSAGE_TYPE].messages[0].velocity_body_mps[0] == 1.0
assert data[PoseMessage.MESSAGE_TYPE].messages[1].velocity_body_mps[0] == 4.0
assert np.isnan(data[PoseMessage.MESSAGE_TYPE].messages[2].velocity_body_mps[0])
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 3
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert np.isnan(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].velocity_enu_mps[0])
assert data[PoseAuxMessage.MESSAGE_TYPE].messages[1].velocity_enu_mps[0] == 14.0
assert data[PoseAuxMessage.MESSAGE_TYPE].messages[2].velocity_enu_mps[0] == 17.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 3
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert np.isnan(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].gdop)
assert data[GNSSInfoMessage.MESSAGE_TYPE].messages[1].gdop == 5.0
assert data[GNSSInfoMessage.MESSAGE_TYPE].messages[2].gdop == 6.0
def test_time_align_specific():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.DROP,
message_types=[PoseMessage.MESSAGE_TYPE, GNSSInfoMessage.MESSAGE_TYPE])
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 2
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[1].p1_time) == 3.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 1
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
``` |
{
"source": "jimenezjose/Phoenix",
"score": 3
} |
#### File: resources/systems/__init__.py
```python
from .status import HostnameStatus
from api.db import get_table
from flask_restful import (
Resource,
reqparse)
class Systems(Resource):
"""System resource for hostnames organization and details."""
def get(self):
"""GET request for all systems.
Returns:
Dictionary of data in hostnames table.
Success:
Status Code: 200 OK
* hostnames table returned.
"""
# query for all hostnames in the database
hostnames_table = get_table('hostnames')
return {'hostnames' : hostnames_table}, 200
@staticmethod
def add_all_resources(api, path):
"""Recursively adds all sub-resources in the 'system' resource.
Args:
api: flask_restful Api object.
path: string path for current resource. Example: 'api/systems'
"""
# register systems as an api resource
api.add_resource(Systems, path)
# directly add sub-resources of system
HostnameStatus.add_all_resources(api, '{}/<string:hostname_status>'.format(path))
```
#### File: hostname/history/__init__.py
```python
from api.db import (
add_filter_query_parameters,
get_table,
parse_filter_query_parameters,
validate,
zip_params)
from flask_restful import (
Resource,
reqparse)
class TestHistory(Resource):
"""History resource to fetch all running tests, queued tests, and test history."""
def get(self, hostname_status, hostname):
"""GET request for all info on tests_runs associated with the hostname.
Args:
hostname: string name of system hostname passed through url.
Returns:
Table dictionary of tests_runs with applied query paramters for filtering.
Success:
Status Code: 200 OK
* return tests runs information
Failure:
Status Code: 404 Not Found
* Invalid url - invalid hostname_status or hostname
"""
validate(hostname_status=hostname_status, hostname=hostname, http_error_code=404)
parser = reqparse.RequestParser()
add_filter_query_parameters(parser, 'tests_runs')
args = parser.parse_args()
filter = parse_filter_query_parameters(args, 'tests_runs')
# overwrite filter with static info from uri
static_params = zip_params(
hostname=hostname,
hostname_status=hostname_status
)
filter.update(static_params)
# query for filtered test-history
tests_runs = get_table('tests_runs', constraints=filter)
return {'tests_runs' : tests_runs}, 200
@staticmethod
def add_all_resources(api, path):
"""Recursively adds all sub-resources in the 'test' endpoint.
Args:
api: flask_restful Api object.
path: string path for current resource. Example: 'api/systems/.../test'
"""
# register tests as an api resource with the given path
api.add_resource(TestHistory, path)
# directly add sub-resources of 'test-history' <HERE>
```
#### File: systems/status/__init__.py
```python
from .hostname import Hostname
from api.db import (
delete_hostname,
get_running_tests,
get_table,
insert_hostname,
is_retired,
validate)
from flask_restful import (
abort,
Resource,
reqparse)
class HostnameStatus(Resource):
"""Statusflag that annotates a hostname system as 'retired' or 'active'."""
def get(self, hostname_status):
"""GET request for all systems labeled as given by the 'hostname_status'
Args:
hostname_status: string annotation of a system for a binary representation of an 'active'
or 'retired' hostname.
Returns:
Dictoinary of the hostnames table with a filtered hostname_status.
Success:
Status Code: 200 OK
* valid hostname_status provided.
Failure:
Status Code: 404 Not Found
* hostname_status provided unknown - not {active, retired} - invalid url.
"""
validate(hostname_status=hostname_status, http_error_code=404)
# Get all hostnames with given retired hostname status
hostnames_table = get_table('hostnames', hostname_status=hostname_status)
return {'hostnames' : hostnames_table}, 200
def post(self, hostname_status):
"""POST request to add a hostname to the database.
Args:
hostname_status: string annotation of a system for a binary representation of an 'active'
Returns:
Dictionary of inserted hostname with keys coinciding column names of the table hostnames.
Success:
Status Code: 201 Created
* hostname inserted into the database.
Failure:
Status Code: 404 Not Found
* status provided does not exist.
* required arguments not supplied in request.
Status Code: 405 Method Not Allowed
* attempt to add a non-active hostname not allowed.
Status Code: 409 Conflict
* duplicate insertion for active hostname not allowed.
"""
validate(hostname_status=hostname_status, http_error_code=404)
# require 'hostname' parameter from request.
parser = reqparse.RequestParser()
parser.add_argument('hostname', type=str, required=True)
args = parser.parse_args()
if is_retired(hostname_status):
# Hostnames added to the database must be active, return status code 405 Method Not Allowed
return {'message' : 'The method is not allowed for the requested URL.'}, 405
# check if working hostname already exists in db.
existing_hostname = get_table('hostnames', hostname=args['hostname'], hostname_status=hostname_status)
if existing_hostname:
# active hostname already exists, returning conflict status code 409.
return {'message' : '{} hostname, {}, already exists. Insertion not allowed.'.format(hostname_status, args['hostname'])}, 409
# otherwise, insert hostname into db.
inserted_hostname = insert_hostname(args['hostname'])
return inserted_hostname
# take off hostname id for simplicity of function definition TODO
def delete(self, hostname_status):
"""DELETE the hostname by setting the retired flag to True.
Args:
hostname_status: string annotation of a system to show retired status.
Returns:
Dictionary of deleted hostname with keys coinciding the column names of table hostnames.
Success:
Status Code: 200 OK
* hostname deleted.
Failure:
Status Code: 404 Not Found
* invalid url - hostname_status is not valid.
Status Code: 400 Bad Request
* no parameters were passed in the JSON body to the request.
Status Code: 405 Method Not Allowed
* attempt to do a DELETE request on invalid hostname_status in url
Status Code: 409 Conflict
* hostname did not exist in the database.
* hostname is marked as retired in the database.
* active hostname is busy with running tests.
"""
validate(hostname_status=hostname_status, http_error_code=404)
if is_retired(hostname_status):
# only remove active hostnames; return 405 Method Not Allowed
return {'message' : 'The method is not allowed for the requested URL.'}, 405
# require 'hostname' parameter from request.
parser = reqparse.RequestParser()
parser.add_argument('hostname', type=str)
parser.add_argument('hostnames_id', type=int)
args = parser.parse_args()
if args['hostname'] is None and args['hostnames_id'] is None:
# at least one argument is required otherwise throw 400 Bad Request.
errors = {
'hostname' : 'Missing parameter in JSON body',
'hostnames_id' : 'Missing parameter in JSON body',
'message' : 'At least one paramter is required',
}
abort(400, message=errors)
# validate that hostname info exists in the db
validate(
hostname=args['hostname'],
hostnames_id=args['hostnames_id'],
http_error_code=409
)
# validate that the hostname is active
active_hostname = get_table(
'hostnames',
hostname=args['hostname'],
hostnames_id=args['hostnames_id'],
hostname_status=hostname_status
)
if not active_hostname:
# hostname is not active - validation check failed.
supplied_args = {key : value for key, value in args.items() if value is not None}
error_msg = 'No active hostname found with supplied args: {}'.format(supplied_args)
abort(409, message=error_msg)
# if hostname is running tests - abort DELETE request
running_tests = get_running_tests(hostname=args['hostname'], hostnames_id=args['hostnames_id'])
if running_tests:
# system currently running tests - throw 409 Conflict
error_msg = 'System is Busy. Currently processing {} tests.'.format(len(running_tests))
errors = {args['hostname'] : error_msg}
abort(409, message=errors)
# internally hostnames_id takes precedence over hostname string
hostnames_deleted = delete_hostname(hostnames_id=args['hostnames_id'], hostname=args['hostname'])
return {'hostnames' : hostnames_deleted}, 200
@staticmethod
def add_all_resources(api, path):
"""Recursively adds all sub-resources in the 'system/<string:hostname_status>' resource.
Args:
api: flask_restful Api object.
path: string path for current resource. Example: 'api/systems/active'
"""
# register systems as an api resource
api.add_resource(HostnameStatus, path)
# directly add sub-resources of systems/<string:hostname_status>
Hostname.add_all_resources(api, '{}/<string:hostname>'.format(path))
``` |
{
"source": "jimenezl/dotfiles",
"score": 3
} |
#### File: jimenezl/dotfiles/script.py
```python
from subprocess import call
import sys
import os
def main():
DOTFILES_DIR = os.path.dirname(os.path.abspath(__file__))
dot_config = DOTFILES_DIR + "/../.config"
list_of_configs_file = open(
DOTFILES_DIR + '/config/list_of_configs.txt', 'r')
list_of_configs = []
for line in list_of_configs_file:
list_of_configs.append(line.strip())
for program_name in list_of_configs:
call(
["rsync", "-r", dot_config + "/" + program_name, DOTFILES_DIR + "/config"])
home_dir_configs_file = open(
DOTFILES_DIR + '/home_dir/home_dir_configs.txt', 'r')
home_dir_configs = []
for line in home_dir_configs_file:
home_dir_configs.append(line.strip())
for file_name in home_dir_configs:
call(
["rsync", "-r", DOTFILES_DIR + "/../" + file_name, DOTFILES_DIR + "/home_dir"])
if __name__ == '__main__':
main()
``` |
{
"source": "Jimeng927/Deploy_a_dada_dashboard",
"score": 3
} |
#### File: Deploy_a_dada_dashboard/wrangling_scripts/wrangle_data.py
```python
import pandas as pd
import plotly.graph_objs as go
#Clean the dataset
df_br = pd.read_csv('data/birth_rate.csv', skiprows=4)
df_le = pd.read_csv('data/life_expectancy.csv', skiprows=4)
df_le = df_le.drop(['Country Code','Indicator Name','Indicator Code','2018','2019','Unnamed: 64'],axis=1)
df_meta = pd.read_csv('data/Metadata_Country_API_SP.DYN.LE00.IN_DS2_en_csv_v2_820880.csv')
df = df_br.merge(df_meta, on=['Country Code'], how='left')
df = df.drop(['Country Code','Indicator Name','Indicator Code','2018','2019','Unnamed: 64','SpecialNotes','TableName','Unnamed: 5'],axis=1)
countrylist = ['United States', 'China', 'Japan', 'Korea','Germany', 'United Kingdom', 'India', 'France', 'Brazil', 'Italy', 'Nigeria']
regionlist = ['East Asia & Pacific','Europe & Central Asia','Latin America & Caribbean','Middle East & North Africa','North America','South Asia','Sub-Saharan Africa']
incomelist= ['High income','Upper middle income','Lower middle income','Low income']
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
# first chart plots birth rates from 1960 to 2017 in top selected countries
# as a line chart
graph_one = []
dfc = df[df['Country Name'].isin(countrylist)]
dfc = dfc.melt(id_vars='Country Name', value_vars = dfc.columns[1:-2])
dfc.columns = ['country','year', 'birth_rate']
dfc['year'] = dfc['year'].astype('datetime64[ns]').dt.year
for country in countrylist:
x_val = dfc[dfc['country'] == country].year.tolist()
y_val = dfc[dfc['country'] == country].birth_rate.tolist()
graph_one.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country)
)
layout_one = dict(title = 'Birth Rates from Year 1960 to 2017',
xaxis = dict(title = 'Year', dtick=5),
yaxis = dict(title = 'Birth Rate (%)'),
)
# second chart plots ararble land for 2015 as a bar chart
graph_two = []
dfr = df.groupby('Region').mean().reset_index()
dfr = dfr.melt(id_vars='Region', value_vars = dfr.columns[1:])
dfr.columns = ['region','year', 'birth_rate']
dfr['year'] = dfr['year'].astype('datetime64[ns]').dt.year
for region in regionlist:
x_val = dfr[dfr['region'] == region].year.tolist()
y_val = dfr[dfr['region'] == region].birth_rate.tolist()
graph_two.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = region)
)
layout_two = dict(title = 'Birth Rates by Region',
xaxis = dict(title = 'Year', dtick=5),
yaxis = dict(title = 'Birth Rate (%)'),
)
# third chart plots percent of population that is rural from 1990 to 2015
graph_three = []
dfi = df.groupby('IncomeGroup').mean().reset_index()
dfi = dfi.melt(id_vars='IncomeGroup', value_vars = dfi.columns[1:])
dfi.columns = ['income','year', 'birth_rate']
dfi['year'] = dfi['year'].astype('datetime64[ns]').dt.year
for income in incomelist:
x_val = dfi[dfi['income'] == income].year.tolist()
y_val = dfi[dfi['income'] == income].birth_rate.tolist()
graph_three.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = income)
)
layout_three = dict(title = 'Birth Rates by Income Group',
xaxis = dict(title = 'Year', dtick=5),
yaxis = dict(title = 'Birth Rate (%)'),
)
# fourth chart shows rural population vs arable land
graph_four = []
dfl = df_le[df_le['Country Name'].isin(countrylist)]
dfl = dfl.melt(id_vars='Country Name', value_vars = dfl.columns[1:])
dfl.columns = ['country','year', 'life_expectancy']
dfl['year'] = dfl['year'].astype('datetime64[ns]').dt.year
for country in countrylist:
x_val = dfl[dfl['country'] == country].year.tolist()
y_val = dfl[dfl['country'] == country].life_expectancy.tolist()
graph_four.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country)
)
layout_four = dict(title = 'Life Expectancy from Year 1960 to 2017',
xaxis = dict(title = 'Year', dtick=5),
yaxis = dict(title = 'Life Expectancy (years)'),
)
# append all charts to the figures list
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
figures.append(dict(data=graph_two, layout=layout_two))
figures.append(dict(data=graph_three, layout=layout_three))
figures.append(dict(data=graph_four, layout=layout_four))
return figures
``` |
{
"source": "jimenofonseca/BSTS-SG",
"score": 3
} |
#### File: jimenofonseca/BSTS-SG/auxiliary.py
```python
import calendar
import datetime
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from causalimpact import CausalImpact
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import TimeSeriesSplit
from config import INTERVENTION_CALENDAR
class BlockingTimeSeriesSplit():
def __init__(self, n_splits):
self.n_splits = n_splits
def get_n_splits(self, X, y, groups):
return self.n_splits
def split(self, X, y=None, groups=None):
n_samples = len(X)
k_fold_size = n_samples // self.n_splits
indices = np.arange(n_samples)
margin = 0
for i in range(self.n_splits):
start = i * k_fold_size
stop = start + k_fold_size
mid = int(0.8 * (stop - start)) + start
yield indices[start: mid], indices[mid + margin: stop]
class GrupedTimeseriesKFold():
def __init__(self, n_splits=5, groupby='smapee'):
self.n_splits = n_splits
self.groupby = groupby
def split(self, X, y=None, groups=None, ):
groups = X.groupby(self.groupby).groups
split_trains1 = []
split_tests1 = []
split_trains2 = []
split_tests2 = []
split_trains3 = []
split_tests3 = []
split_trains4 = []
split_tests4 = []
split_trains5 = []
split_tests5 = []
for group, indexes in groups.items():
tscv = TimeSeriesSplit(n_splits=5)
counter = 0
for train_index, test_index in tscv.split(indexes):
if counter == 0:
split_trains1.extend(indexes[train_index].values)
split_tests1.extend(indexes[test_index].values)
elif counter == 1:
split_trains2.extend(indexes[train_index].values)
split_tests2.extend(indexes[test_index].values)
elif counter == 2:
split_trains3.extend(indexes[train_index].values)
split_tests3.extend(indexes[test_index].values)
elif counter == 3:
split_trains4.extend(indexes[train_index].values)
split_tests4.extend(indexes[test_index].values)
elif counter == 4:
split_trains5.extend(indexes[train_index].values)
split_tests5.extend(indexes[test_index].values)
else:
print("ERROR")
counter += 1
cv = [(split_trains1, split_tests1),
(split_trains2, split_tests2),
(split_trains3, split_tests3),
(split_trains4, split_tests4),
(split_trains5, split_tests5)]
for rxm, tx in cv:
yield (np.array(rxm), np.array(tx))
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def week_of_month(tgtdate):
days_this_month = calendar.mdays[tgtdate.month]
for i in range(1, days_this_month):
d = datetime.datetime(tgtdate.year, tgtdate.month, i)
if d.day - d.weekday() > 0:
startdate = d
break
# now we canuse the modulo 7 appraoch
return (tgtdate - startdate).days // 7 + 1
def graph_check_gbm_timeseries(data_mean, X_names, y_limits):
visual = data_mean.set_index('timestamp')
intervention_list = visual['INTERVENTION'].unique()
# Set up the matplotlib figure
f, axes = plt.subplots(4, 3, figsize=(15, 9))
sns.despine(left=True)
fields_to_plot = ['CONSUMPTION_kWh'] + X_names
for i, intervention in enumerate(intervention_list):
plot = visual[visual['INTERVENTION'] == intervention]
if list(plot['INTERVENTION'].values[0])[-1] == 'L':
row = 0
column = int(list(plot['INTERVENTION'].values[0])[0]) - 1
else:
row = int(list(plot['INTERVENTION'].values[0])[-1])
column = int(list(plot['INTERVENTION'].values[0])[0]) - 1
if intervention == '2T4':
row = 1
column = 1
elif intervention == '2T5':
row = 2
column = 1
ax = axes[row, column]
ax.set_ylim(y_limits[0], y_limits[1])
plot[fields_to_plot].plot(ax=ax, title=intervention)
font = {'family': 'Arial',
'size': 10}
matplotlib.rc('font', **font)
def graph_check_gbm_dist(data_mean, X_names):
visual = data_mean.set_index('timestamp')
visual['observation'] = np.log1p(visual.CONSUMPTION_kWh)
intervention_list = visual['INTERVENTION'].unique()
# Set up the matplotlib figure
f, axes = plt.subplots(4, 3, figsize=(10, 9))
sns.despine(left=True)
for i, intervention in enumerate(intervention_list):
plot = visual[visual['INTERVENTION'] == intervention]
if list(plot['INTERVENTION'].values[0])[-1] == 'L':
row = 0
column = int(list(plot['INTERVENTION'].values[0])[0]) - 1
else:
row = int(list(plot['INTERVENTION'].values[0])[-1])
column = int(list(plot['INTERVENTION'].values[0])[0]) - 1
if intervention == '2T4':
row = 1
column = 1
elif intervention == '2T5':
row = 2
column = 1
ax = axes[row, column]
# plot observation
sns.distplot(plot.observation, hist=False, ax=ax, kde_kws={"label": intervention})
# plit predction
for field in X_names:
value = np.log1p(plot[field])
legend = round(np.sqrt(mean_squared_error(value, plot.observation)), 4)
sns.distplot(value, ax=ax, hist=False, kde_kws={"label": field.split("_")[-1] + " " + str(legend)})
def prepare_data_synthetic_bsts(data, INTERVENTION, X_names):
# let's get the data for the first experiment of sensors in VIEW
data_selection = data[data['INTERVENTION'] == INTERVENTION]
dict_info = {'y': data_selection['CONSUMPTION_kWh']}
for i, field in enumerate(X_names):
dict_info["x" + str(i)] = data_selection[field]
dat_final = pd.DataFrame(dict_info)
return dat_final
def prepare_data_control_bsts(data, INTERVENTION, X_names):
# let's get the data for the first experiment of sensors in VIEW
# let's get the data for the first experiment of sensors in VIEW
df = data.copy()
data_selection = df[df['INTERVENTION'] == INTERVENTION]
data_control = df[df['INTERVENTION'] == list(INTERVENTION)[0] + 'CONTROL']
data_mean = data_selection.merge(data_control, left_index=True, right_index=True, suffixes=('', '_y'))
dict_info = {'y':data_mean['CONSUMPTION_kWh'],
'x1':data_mean['CONSUMPTION_kWh_y']}
data = pd.DataFrame(dict_info)
return data, data_mean.index
def graph_check_cumulative_bsts(data_mean, X_names):
visual = data_mean.set_index('timestamp')
intervention_list = visual['INTERVENTION'].unique()
# Set up the matplotlib figure
f, axes = plt.subplots(3, 3, figsize=(15, 9))
dataframe2 = pd.DataFrame()
for i, intervention in enumerate(intervention_list):
if list(intervention)[-1] == 'L':
x = 1 # do nothing
else:
experiment = int(list(intervention)[0])
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
end_intervention_date = intervention_data[3]
# get position for the plot
row = int(list(intervention)[-1]) - 1
column = int(list(intervention)[0]) - 1
if intervention == '2T4':
row = 0
column = 1
elif intervention == '2T5':
row = 1
column = 1
ax = axes[row, column]
data = prepare_data_synthetic_bsts(data_mean.set_index('timestamp'), intervention, X_names)
ci = CausalImpact(data, pre_period, post_period, prior_level_sd=None, standarize=True)
ax = ci.plot(figsize=(5, 3), end_intervention_date=end_intervention_date, panels=['cumulative'],
add_axes=ax)
ax.set_title(intervention)
# get data
table = ci.summary_data
pi_value = ci.p_value
effect = str(round(table.loc['rel_effect', 'average'] * 100, 2)) + '/n' + '[' + str(
round(table.loc['rel_effect_lower', 'average'] * 100, 2)) + ',' + str(
round(table.loc['rel_effect_upper', 'average'] * 100, 2)) + ']'
table_df = pd.DataFrame({'id': [intervention], 'effect': [effect], 'p_value': [pi_value]})
dataframe2 = dataframe2.append(table_df, ignore_index=True)
print(dataframe2)
plt.show()
font = {'family': 'Arial',
'size': 10}
matplotlib.rc('font', **font)
def graph_check_all_bsts(data_mean, intervention, X_names, title):
experiment = int(list(intervention)[0])
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
end_intervention_date = intervention_data[3]
data = prepare_data_synthetic_bsts(data_mean.set_index('timestamp'), intervention, X_names)
x = data.copy()
x = x.rename(columns={'y':'Observation', 'x0':'Bayes. Synth. Control Group'})
ci = CausalImpact(data, pre_period, post_period, prior_level_sd=None, standarize=True)
font = {'family': 'Arial',
'size': 18}
ci.plot(figsize=(7, 9), end_intervention_date=end_intervention_date, title=title)
matplotlib.rc('font', **font)
return ci, x
def graph_check_all_bsts_control(data_mean, intervention, X_names, title):
experiment = int(list(intervention)[0])
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
end_intervention_date = intervention_data[3]
data, time = prepare_data_control_bsts(data_mean.set_index('timestamp'), intervention, X_names)
x = data.copy()
x = x.rename(columns={'y':'Observation', 'x1':'Random. Control Group'})
ci = CausalImpact(data, pre_period, post_period, prior_level_sd=None, standarize=True)
font = {'family': 'Arial',
'size': 18}
ci.plot(figsize=(7, 9), end_intervention_date=end_intervention_date, title=title)
matplotlib.rc('font', **font)
return ci, x
def graph_check_all_bsts_table(data_mean, intervention, X_names, n):
experiment = int(list(intervention)[0])
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
data = prepare_data_synthetic_bsts(data_mean.set_index('timestamp'), intervention, X_names)
ci = CausalImpact(data, pre_period, post_period, prior_level_sd=None, standarize=True)
table = ci.summary_data
# Min_avg = ["-",
# round(table.loc['predicted_lower', 'average'],2),
# round(table.loc['abs_effect_lower','average'],2),
# round(table.loc['rel_effect_lower','average']*100,2)]
# M_avg = [round(table.loc['actual','average'],2),
# round(table.loc['predicted', 'average'],2),
# round(table.loc['abs_effect','average'],2),
# round(table.loc['rel_effect','average']*100,2)]
# SD_avg = ["-",
# round(table.loc['predicted_sd', 'average'],2),
# round(table.loc['abs_effect_sd','average'],2),
# round(table.loc['rel_effect_sd','average']*100,2)]
# Max_avg = ["-",
# round(table.loc['predicted_upper','average'],2),
# round(table.loc['abs_effect_upper','average'],2),
# round(table.loc['rel_effect_upper','average']*100,2)]
#
# Min_cum = ["-",
# round(table.loc['predicted_lower', 'cumulative'],2),
# round(table.loc['abs_effect_lower','cumulative'],2),
# round(table.loc['rel_effect_lower','cumulative']*100,2)]
# M_cum = [round(table.loc['actual','cumulative'],2),
# round(table.loc['predicted', 'cumulative'],2),
# round(table.loc['abs_effect','cumulative'],2),
# round(table.loc['rel_effect','cumulative']*100,2)]
# SD_cum = ["-",
# round(table.loc['predicted_sd', 'cumulative'],2),
# round(table.loc['abs_effect_sd','cumulative'],2),
# round(table.loc['rel_effect_sd','cumulative']*100,2)]
# Max_acum = ["-",
# round(table.loc['predicted_upper','cumulative'],2),
# round(table.loc['abs_effect_upper','cumulative'],2),
# round(table.loc['rel_effect_upper','cumulative']*100,2)]
#
# data = pd.DataFrame({"Treatment": ["Treatement "+intervention+ " (n="+str(n)+")", "","",""],
# "Data": ["Observation (kWh)",
# "Counterfactual (kWh)",
# "Absolute effect (kWh)",
# "Relative effect (%)"],
# "Min_avg":Min_avg,
# "M_avg":M_avg,
# "SD_avg":SD_avg,
# "Max_avg":Max_avg,
# "Min_cum": Min_cum,
# "M_cum": M_cum,
# "SD_cum": SD_cum,
# "Max_acum": Max_acum,
# })
data = pd.DataFrame({"Treatment": ["Treatement " + intervention + "(n=" + str(n) + ")"],
"Observation": str(round(table.loc['actual','average'],2))+" kWh",
"Counterfactual\n(s.d.)": put_together_two(round(table.loc['predicted', 'average'],2), round(table.loc['predicted_sd', 'average'],2)),
"Absolute effect\n[95% c.i.]": put_together_three(round(table.loc['abs_effect', 'average'],2), round(table.loc['abs_effect_lower', 'average'],2), round(table.loc['abs_effect_upper', 'average'],2)),
"Percentage Change\n[95% c.i.]": put_together_three_perc(round(table.loc['rel_effect', 'average']*100,2), round(table.loc['rel_effect_lower', 'average']*100,2), round(table.loc['rel_effect_upper', 'average']*100,2)),
"Observation ": str(round(table.loc['actual', 'cumulative'], 2))+" kWh",
"Counterfactual\n(s.d.) ": put_together_two(round(table.loc['predicted', 'cumulative'], 2),
round(table.loc['predicted_sd', 'cumulative'], 2)),
"Absolute effect\n[95% c.i.] ": put_together_three(round(table.loc['abs_effect', 'cumulative'], 2),
round(table.loc['abs_effect_lower', 'cumulative'],
2),
round(table.loc['abs_effect_upper', 'cumulative'],
2)),
"Percentage Change\n[95% c.i.] ": put_together_three_perc(
round(table.loc['rel_effect', 'cumulative'] * 100, 2),
round(table.loc['rel_effect_lower', 'cumulative'] * 100, 2),
round(table.loc['rel_effect_upper', 'cumulative'] * 100, 2)),
})
return data
def graph_point_effects(data_mean, intervention, X_names):
experiment = int(list(intervention)[0])
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
data = prepare_data_synthetic_bsts(data_mean.set_index('timestamp'), intervention, X_names)
ci = CausalImpact(data, pre_period, post_period, prior_level_sd=None, standarize=True)
point_effects = ci.inferences['point_effects'].values
return point_effects
def put_together_two(a,b):
return str(a)+" kWh\n("+str(b)+")"
def put_together_three(a,b,c):
return str(a)+" kWh\n["+str(b)+","+str(c)+"]"
def put_together_three_perc(a,b,c):
return str(a)+"%\n["+str(b)+","+str(c)+"]"
```
#### File: jimenofonseca/BSTS-SG/lgbm_imputer.py
```python
import os
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from custom_scorer_module import scorer_rmse
from sklearn.metrics import make_scorer
from auxiliary import GrupedTimeseriesKFold
from config import SEED
def imputer(df: pd.DataFrame,
timestamp_feature_name: str,
target_feature_name: str,
numerical_features_list: list,
categorical_features_list: list,
id_column: str,
window: int = 7,
get_best_parameters: bool = False,
params: dict = {'learning_rate': 0.01,'num_leaves': 16,'min_data_in_leaf': 1000,'num_iterations': 10000,'objective': 'rmse','metric': 'rmse'},
groupby:str='smapee'
):
"""
:param data:
:param target_column:
:param window:
:param id_column:
:return:
"""
data = df.copy() #because it gets changed and lgbm breaks
# rolling
data['rolling_back'] = data.groupby(by=id_column)[target_feature_name] \
.rolling(window=window, min_periods=1).mean().interpolate().values
# reversed rolling
data['rolling_forw'] = data.iloc[::-1].groupby(by=id_column)[target_feature_name] \
.rolling(window=window, min_periods=1).mean().interpolate().values
# rolling mean for same hour of the week
data['rolling_back_h'] = data.groupby(by=[id_column, 'dayofweek'])[target_feature_name] \
.rolling(window=3, min_periods=1).mean().interpolate().values
data['rolling_back_h_f'] = data.iloc[::-1].groupby(by=[id_column, 'dayofweek'])[target_feature_name] \
.rolling(window=3, min_periods=1).mean().interpolate().values
tr_idx, val_idx = ~data[target_feature_name].isnull(), data[target_feature_name].isnull()
numerical_features_list = numerical_features_list + ['rolling_back',
'rolling_forw',
'rolling_back_h',
'rolling_back_h_f']
features_list = numerical_features_list + categorical_features_list
if get_best_parameters:
# train in the log domain
data[target_feature_name] = np.log(1 + data[target_feature_name])
X = data.loc[tr_idx, features_list]
X = X.reset_index(drop=True)
y = data.loc[tr_idx, target_feature_name]
y = y.reset_index(drop=True)
grid_params = {'learning_rate': [0.001, 0.01, 0.1],
'num_leaves': [4, 16, 32, 64],
'max_depth': [-1],
'num_iterations': [10000],
'min_data_in_leaf': [20, 100, 200, 500],
'boosting': ['gbdt']}
mdl = lgb.LGBMRegressor(n_jobs=1,
metric='rmse',
objective='rmse',
seed=SEED)
grid = GridSearchCV(mdl,
grid_params,
verbose=1,
cv=GrupedTimeseriesKFold(groupby=groupby),
n_jobs=-1,
scoring=make_scorer(scorer_rmse, greater_is_better=False))
# Run the grid
grid.fit(X, y)
# Print the best parameters found
print(grid.best_params_)
print(grid.best_score_)
else:
evals_result = {} # to record eval results for plotting
# train in the log domain
data[target_feature_name] = np.log(1 + data[target_feature_name])
X = data.loc[tr_idx, features_list]
y = data.loc[tr_idx, target_feature_name]
lgb_train = lgb.Dataset(X, y, categorical_feature=categorical_features_list)
lgb_eval = lgb.Dataset(X, y, categorical_feature=categorical_features_list)
reg = lgb.train(params,
lgb_train,
valid_sets=(lgb_train, lgb_eval),
evals_result=evals_result,
early_stopping_rounds=5000,
verbose_eval=10000)
data[f'{target_feature_name}_imputed'] = np.nan
data.loc[val_idx, f'{target_feature_name}_imputed'] = reg.predict(data.loc[val_idx, features_list])
data.loc[val_idx, f'{target_feature_name}'] = data.loc[val_idx, f'{target_feature_name}_imputed'].values
# return to the real domain
data[target_feature_name] = np.exp(data[target_feature_name]) - 1
data[f'{target_feature_name}_imputed'] = np.exp(data[f'{target_feature_name}_imputed']) - 1
#also get some idea of how it looks for all the data
data[f'{target_feature_name}_all_imputed'] = np.exp(reg.predict(data[features_list]))-1
# check from what features_list our imputer learned the most
lgb.plot_importance(reg, title=f'feature importance for "{target_feature_name}"')
lgb.plot_metric(evals_result, metric='rmse')
return data
``` |
{
"source": "JIMENOFONSECA/DEG-USA",
"score": 2
} |
#### File: DEG-USA/model/1_prepare_data_and_inference.py
```python
import numpy as np
import pandas as pd
from enthalpygradients import EnthalpyGradient
import time
from model.auxiliary import read_weather_data_scenario
from model.constants import COP_cooling, COP_heating, RH_base_cooling_perc, RH_base_heating_perc, T_base_cooling_C, \
T_base_heating_C, ACH_Commercial, ACH_Residential
from pointers import METADATA_FILE_PATH, INTERMEDIATE_RESULT_FILE_PATH
def main():
# local variables
output_path = INTERMEDIATE_RESULT_FILE_PATH
scenarios_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='SCENARIOS')['SCENARIO'].values
cities_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='CITIES')['CITY'].values
floor_area_predictions_df = pd.read_excel(METADATA_FILE_PATH, sheet_name="FLOOR_AREA").set_index('year')
climate_region_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='CITIES')['Climate Region'].values
floor_area_climate_df = pd.read_excel(METADATA_FILE_PATH, sheet_name="FLOOR_AREA_CLIMATE").set_index('Climate Region')
# calculate specific energy consumption per major city
specific_thermal_consumption_per_city_df = calc_specific_energy_per_major_city(cities_array,
climate_region_array,
floor_area_climate_df,
scenarios_array)
# calculate weighted average per scenario
data_weighted_average_df = calc_weighted_average_per_scenario(specific_thermal_consumption_per_city_df)
# calculate the energy consumption per scenario incorporating variance in built areas
data_final_df = calc_total_energy_consumption_per_scenario(data_weighted_average_df, floor_area_predictions_df,
scenarios_array)
#save the results to disk
data_final_df.to_csv(output_path, index=False)
print("done")
def calc_total_energy_consumption_per_scenario(data_weighted_average_df, floor_area_predictions_df, scenarios_array):
data_final_df = pd.DataFrame()
for scenario in scenarios_array:
data_scenario = data_weighted_average_df[data_weighted_average_df["SCENARIO"] == scenario]
year = data_scenario['YEAR'].values[0]
data_floor_area_scenario = floor_area_predictions_df.loc[float(year)]
for sector in ['Residential', 'Commercial']:
# calculate totals
total_heating_kWhm2yr = \
data_scenario[data_scenario["BUILDING_CLASS"] == sector]['TOTAL_HEATING_kWh_m2_yr'].values[0]
total_cooling_kWhm2yr = \
data_scenario[data_scenario["BUILDING_CLASS"] == sector]['TOTAL_COOLING_kWh_m2_yr'].values[0]
# add uncertainty in total built area
mean_m2 = data_floor_area_scenario['GFA_mean_' + sector + '_m2']
std_m2 = data_floor_area_scenario['GFA_sd_' + sector + '_m2']
GFA_m2 = np.random.normal(mean_m2, std_m2, 100)
total_heating_EJ = GFA_m2 * total_heating_kWhm2yr * 3.6E-12
total_cooling_EJ = GFA_m2 * total_cooling_kWhm2yr * 3.6E-12
# list of fields to extract
dict_data = pd.DataFrame({"SCENARIO": scenario,
"YEAR": year,
"BUILDING_CLASS": sector,
"GFA_Bm2": GFA_m2 /1E9,
"TOTAL_HEATING_kWh_m2_yr": total_heating_kWhm2yr,
"TOTAL_COOLING_kWh_m2_yr": total_cooling_kWhm2yr,
"TOTAL_HEATING_EJ": total_heating_EJ,
"TOTAL_COOLING_EJ": total_cooling_EJ})
data_final_df = pd.concat([data_final_df, dict_data], ignore_index=True)
return data_final_df
def calc_weighted_average_per_scenario(specific_thermal_consumption_per_city_df):
data_mean_per_scenario = specific_thermal_consumption_per_city_df.groupby(
["YEAR", "BUILDING_CLASS", "SCENARIO", "CLIMATE"],
as_index=False).agg('mean')
data_mean_per_scenario["TOTAL_HEATING_kWh_m2_yr"] = data_mean_per_scenario["TOTAL_HEATING_kWh_m2_yr"] * \
data_mean_per_scenario["WEIGHT"]
data_mean_per_scenario["TOTAL_COOLING_kWh_m2_yr"] = data_mean_per_scenario["TOTAL_COOLING_kWh_m2_yr"] * \
data_mean_per_scenario["WEIGHT"]
data_weighted_average = data_mean_per_scenario.groupby(["YEAR", "BUILDING_CLASS", "SCENARIO"], as_index=False).agg(
'sum')
return data_weighted_average
def calc_specific_energy_per_major_city(cities_array, climate_region_array, floor_area_climate_df, scenarios_array):
specific_thermal_consumption_per_city_df = pd.DataFrame()
for city, climate in zip(cities_array, climate_region_array):
floor_area_climate = floor_area_climate_df.loc[climate]
for scenario in scenarios_array:
# read wheater data
T_outdoor_C, RH_outdoor_perc = read_weather_data_scenario(city, scenario)
# get the scanario year
year_scenario = scenario.split("_")[-1]
for sector, ACH in zip(['Residential', 'Commercial'], [ACH_Residential, ACH_Commercial]):
# calculate energy use intensities
# calculate specific energy consumption with daily enthalpy gradients model
eg = EnthalpyGradient(T_base_cooling_C, RH_base_cooling_perc)
sensible_cooling_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc, type='cooling',
ACH=ACH, COP=COP_cooling)
latent_cooling_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc,
type='dehumidification', ACH=ACH,
COP=COP_cooling)
eg = EnthalpyGradient(T_base_heating_C, RH_base_heating_perc)
sensible_heating_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc, type='heating',
ACH=ACH, COP=COP_heating)
latent_heating_kWhm2yr = eg.specific_thermal_consumption(T_outdoor_C, RH_outdoor_perc,
type='humidification', ACH=ACH,
COP=COP_heating)
# calculate specific totals
total_heating_kWhm2yr = sensible_heating_kWhm2yr + latent_heating_kWhm2yr
total_cooling_kWhm2yr = sensible_cooling_kWhm2yr + latent_cooling_kWhm2yr
# list of fields to extract
dict_data = pd.DataFrame({"CITY": city,
"CLIMATE": climate,
"WEIGHT": floor_area_climate['GFA_mean_' + sector + '_perc'],
"SCENARIO": scenario,
"YEAR": year_scenario,
"BUILDING_CLASS": sector,
"TOTAL_HEATING_kWh_m2_yr": total_heating_kWhm2yr,
"TOTAL_COOLING_kWh_m2_yr": total_cooling_kWhm2yr}, index=[0])
specific_thermal_consumption_per_city_df = pd.concat(
[specific_thermal_consumption_per_city_df, dict_data], ignore_index=True)
print("city {} done".format(city))
return specific_thermal_consumption_per_city_df
if __name__ == "__main__":
t0 = time.time()
main()
t1 = round((time.time() - t0)/60,2)
print("finished after {} minutes".format(t1))
``` |
{
"source": "JIMENOFONSECA/FonnachtsBankApp",
"score": 2
} |
#### File: JIMENOFONSECA/FonnachtsBankApp/colors.py
```python
import random
# GET COLORS OF CEA
COLORS_TO_RGB = {"red": "rgb(240,75,91)",
"red_light": "rgb(246,148,143)",
"red_lighter": "rgb(252,217,210)",
"blue": "rgb(63,192,194)",
"blue_light": "rgb(171,221,222)",
"blue_lighter": "rgb(225,242,242)",
"yellow": "rgb(255,209,29)",
"yellow_light": "rgb(255,225,133)",
"yellow_lighter": "rgb(255,243,211)",
"brown": "rgb(174,148,72)",
"brown_light": "rgb(201,183,135)",
"brown_lighter": "rgb(233,225,207)",
"purple": "rgb(171,95,127)",
"purple_light": "rgb(198,149,167)",
"purple_lighter": "rgb(231,214,219)",
"green": "rgb(126,199,143)",
"green_light": "rgb(178,219,183)",
"green_lighter": "rgb(227,241,228)",
"grey": "rgb(68,76,83)",
"grey_light": "rgb(126,127,132)",
"black": "rgb(35,31,32)",
"white": "rgb(255,255,255)",
"orange": "rgb(245,131,69)",
"orange_light": "rgb(248,159,109)",
"orange_lighter": "rgb(254,220,198)"}
COLOR_CATEGORY = {'Salary': "yellow",
'Percentage of Income Spent': "blue",
'Percentage of Income Saved': "blue_light",
'Expenses': "blue_light",
'Positive Interest': "yellow_light",
"Unknown Deposit": "grey_light",
"Investments": "yellow",
"Public transport": "green_light",
'Housing': "purple",
'Bills': "orange",
'Taxi': "green",
'Gym': "red_light",
'Supermarket': "brown_light",
'Health & Beauty': "brown",
'Trips': "red",
'Maricadas': "green",
'Restaurant': "purple_light",
"Unknown Withdrawal": "grey",
'Taxes': "orange_light",
"Rent Earnings": "red_light",
"JF-PUBLICA": "red",
"LF-PUBLICA": "red_light",
"APT501": "blue_lighter",
"LAX": "blue",
"NIDAU": "blue_light",
'CDT-212960': "green",
'CDT-211160': 'green_light',
'CDT-214370': 'green_lighter',
'120-045453-0': "orange",
'063-015834-8': "orange_light",
'92-785858-6': 'yellow',
'31-296964-2': 'yellow_light',
"REAL_ESTATE": "blue",
"BONDS": "green",
"RETIREMENT": "red",
"CASH": "yellow",
"SECURITIES": "orange",
"Initial investment": "red",
"Creditcard": "yellow",
"Interests earned to date": "yellow",
"Interest Earned": "yellow_light",
"O&M Real Estate": "black"}
def calculate_color(ACCOUNTS_CURRENCY):
dictionary = {cat: COLORS_TO_RGB[color] for cat, color in COLOR_CATEGORY.items()}
for account in ACCOUNTS_CURRENCY.keys():
if account not in dictionary.keys():
dictionary.update({account: random.choice(list(COLORS_TO_RGB.values()))})
return dictionary
if __name__ == '__main__':
x = 1
```
#### File: FonnachtsBankApp/statement_parser/preprocessing_manual.py
```python
from utils.support_functions import calculate_rate_exact_day, calculate_rate_exact_day_cop, \
calculate_rate_exact_day_cop_inversed
from decimal import Decimal
import os
import pandas as pd
from settings import CURRENCIES, calc_categories
from statement_parser.preproccessing import get_category
def conversion(x, to_currency):
year = x[0]
month = x[1]
value = x[2]
from_currency = x[3]
# in case it is the same currency
if from_currency == to_currency:
return Decimal(str(round(value, 4)))
# in case we have COP (not suppported with daily rates)
if from_currency == 'COP': # we use an approximated way:
rate = calculate_rate_exact_day_cop(to_currency)
elif to_currency == 'COP': # we use an apporximated way:
rate = calculate_rate_exact_day_cop_inversed(from_currency)
else:
rate = calculate_rate_exact_day(from_currency, month, year, to_currency)
if value == "":
return value
else:
#print(year,month,value, rate, from_currency, to_currency)
new_with = Decimal(str(round(rate, 4))) * Decimal(str(round(value, 4)))
return new_with
def MANUAL_parser(file_path, directories):
CATEGORIES, CATEGORIES_DEPOSIT, CATEGORIES_WITHDRAWAL = calc_categories(directories)
# process data
registry_accounts = [] # here we collect the name of the accounts we have read
registry_currency_per_account = [] # here we collect the currency of every account
registry_type_per_account = [] # here we collect the type of every account
registry = pd.DataFrame() # here we collecte th
xls = pd.ExcelFile(file_path)
accounts = xls.sheet_names
print('Reading the next accounts %s from file %s' % (accounts, file_path))
registry_accounts.extend(accounts)
for account in accounts:
# read it
data_account = pd.read_excel(file_path, account)
# calculate fields
data_account["ACCOUNT"] = account # add name of account to the database
# get currency and add it to registry_currency_per_account list
currency_account = list(set(data_account["CURRENCY"].values)) # get currency of account
type_account = list(set(data_account["TYPE"].values)) # get currency of account
if len(
currency_account) > 1: # whatch out, you are indicating two currencies for the same account. not possible
Exception(
"The account %s has two or more currencies %s, that is not supported" % (account, currency_account))
elif len(
type_account) > 1: # whatch out, you are indicating two currencies for the same account. not possible
Exception("The account %s has two or more types %s, that is not supported" % (account, type_account))
else:
registry_currency_per_account.append(currency_account[0])
registry_type_per_account.append(type_account[0])
# add account to data_frame of accounts:
registry = registry.append(data_account, ignore_index=True, sort=False)
# category assignment
withdrawal_boolean = [True if x > 0 and y <= 0 else False for x,y in zip(registry["DEBIT"].values, registry["CREDIT"].values)]
registry["CAT"], _, _, _ = get_category(registry["DESC_1"],
registry["DESC_1"],
registry["DESC_1"],
withdrawal_boolean,
CATEGORIES)
# sget metadata
registry_metadata = pd.DataFrame({"ACCOUNT": registry_accounts,
"TYPE": registry_type_per_account,
"CURRENCY": registry_currency_per_account})
return registry, registry_metadata
```
#### File: FonnachtsBankApp/statement_parser/preprocessing_post.py
```python
import os
import camelot
from PyPDF2 import PdfFileReader
import pandas as pd
from re import sub
from decimal import Decimal
import numpy as np
import datetime
from settings import calc_categories
PAGES_BEGINNING = 0 # pages to skip at the beginning of the statment
PAGES_END = 0 # pages to skip at the end of the statement
conversion_numeric_month = {"01": "Jan", "02": "Feb", "03": "Mar", "04": "Apr", "05": "May", "06": "Jun", "07": "Jul",
"08": "Aug", "09": "Sep", "10": "Oct", "11": "Nov", "12": "Dec"}
def Parser_function(filepath, CATEGORIES):
# get number of pages
pdf = PdfFileReader(open(filepath, 'rb'))
pages_pdf = pdf.getNumPages()
# get iterator and empy series to fill in
range_of_pages = range(PAGES_BEGINNING, pages_pdf - PAGES_END)
date_str = []
withdrawal_Decimal = []
deposit_Decimal = []
description_str = []
description2_str = []
description3_str = []
withdrawal_boolean = []
index_where_total_is = []
real_month = [] # just because some extrats get money from past months.
real_year = []
balance_Decimal = []
# go over each page and extract data
for page in range_of_pages:
if index_where_total_is != []: # if total is in the end of the page
break
print("Page No. ", page + 1)
# read pdf into dataframe
# set where to check in the statement
if page + 1 == 1: # the first page is always different
region = ['20,450,700,50']
else:
region = ['20,750,700,50']
page_table_format = camelot.read_pdf(filepath, pages=str(page + 1), flavor='stream',
table_areas=region)
dataframe_table = page_table_format[0].df
# indicate if total is in first column, this means there is an error and the region needs to change
index_where_total_is2 = dataframe_table.index[dataframe_table[0] == "Total"].tolist()
if index_where_total_is2 != []:
print("read 3 columns, 4 expected, correcting...")
page_table_format = camelot.read_pdf(filepath, pages=str(page + 1), flavor='stream',
table_areas=['20,750,700,50'])
dataframe_table = page_table_format[0].df
index_where_total_is2 = [1]
if index_where_total_is2 != []:
print("read 3 columns, 4 expected, correcting...")
page_table_format = camelot.read_pdf(filepath, pages=str(page + 1), flavor='stream',
table_areas=['20,610,700,200'])
dataframe_table = page_table_format[0].df
index_where_total_is2 = [1]
if len(dataframe_table.ix[0]) != 6:
raise Exception("error, the table does not have the right number of columns, page", str(page + 1))
# indicate where the total is, to be used in the next loop
index_where_total_is = dataframe_table.index[dataframe_table[1] == "Total"].tolist()
# take out the last part
if index_where_total_is != []:
dataframe_table_raw = dataframe_table.drop(dataframe_table.index[index_where_total_is[0] + 1:])
else:
dataframe_table_raw = dataframe_table
# select the number of column with witdrawal
columns_number = dataframe_table.shape[1]
for column_index in range(columns_number):
if "Lastschrift" in dataframe_table.loc[:, column_index].values:
with_column_index = column_index
if "Gutschrift" in dataframe_table.loc[:, column_index].values:
depo_column_index = column_index
if "Valuta" in dataframe_table.loc[:, column_index].values:
date_column_index = column_index
if "Text" in dataframe_table.loc[:, column_index].values:
text_column_index = column_index
if "Saldo" in dataframe_table.loc[:, column_index].values:
balance_column_index = column_index
# indicate the total of withdrawals and deposits:
if index_where_total_is != []:
index = index_where_total_is[0]
if dataframe_table_raw.loc[index, with_column_index] == '':
withdrawal_total_decimal = 0.0
else:
withdrawal_total_decimal = Decimal(
sub(r'[^\d.]', '', dataframe_table_raw.loc[index, with_column_index]))
if dataframe_table_raw.loc[index, depo_column_index] == '':
deposit_total_decimal = 0.0
else:
deposit_total_decimal = Decimal(sub(r'[^\d.]', '', dataframe_table_raw.loc[index, depo_column_index]))
# start filling in data
len_dataframe = dataframe_table_raw.shape[0]
for row in range(len_dataframe):
date = dataframe_table_raw.loc[row, date_column_index]
text = dataframe_table_raw.loc[row, text_column_index]
if date != '' and date != "Valuta" and text.split()[0] != "ZINSABSCHLUSS":
length_date = len(date.split('.'))
if length_date > 1:
year_str = "20" + date.split('.')[2]
date_withyear = date.split('.')[0] + " " + date.split('.')[1] + " " + year_str
real_month.append(conversion_numeric_month[date.split('.')[1]])
real_year.append(year_str)
date_withyear_formatted = datetime.datetime.strptime(date_withyear, '%d %m %Y')
date_str.append(date_withyear_formatted)
# withdrawal and deposit
if dataframe_table_raw.loc[row, with_column_index] == '':
withdrawal_Decimal.append(Decimal(0.0))
withdrawal_boolean.append(False)
else:
x = dataframe_table_raw.loc[row, balance_column_index].replace(" ", "")
balance_Decimal.append(x)
withdrawal_Decimal.append(
Decimal(sub(r'[^\d.]', '', dataframe_table_raw.loc[row, with_column_index])))
withdrawal_boolean.append(True)
if dataframe_table_raw.loc[row, depo_column_index] == '':
deposit_Decimal.append(Decimal(0.0))
else:
x = dataframe_table_raw.loc[row, balance_column_index].replace(" ", "")
balance_Decimal.append(x)
deposit_Decimal.append(
Decimal(sub(r'[^\d.]', '', dataframe_table_raw.loc[row, depo_column_index])))
if dataframe_table_raw.loc[row, with_column_index] == '' and dataframe_table_raw.loc[
row, depo_column_index] == '':
balance_Decimal.append(Decimal(0.0))
# accumulate balance if the last value was ''
if balance_Decimal[-1] == '':
if len(balance_Decimal) > 1:
balance_Decimal[-1] = balance_Decimal[-2]
else:
balance_Decimal[-1] = dataframe_table_raw.loc[row - 1, balance_column_index].replace(" ",
"")
if balance_Decimal[-1] == '':
balance_Decimal[-1] = dataframe_table_raw.loc[
row - 2, balance_column_index].replace(" ", "")
# descriptions
description_str.append(dataframe_table_raw.loc[row, text_column_index])
if dataframe_table_raw.loc[row + 1, date_column_index] == '':
description2_str.append(dataframe_table_raw.loc[row + 1, text_column_index])
else:
description2_str.append('')
try:
if dataframe_table_raw.loc[row + 2, date_column_index] == '':
description3_str.append(dataframe_table_raw.loc[row + 2, text_column_index])
else:
description3_str.append('')
except:
description3_str.append('')
# category assignment
category_final2, category1_str, category2_str, category3_str = get_category(description2_str, description3_str,
description_str, withdrawal_boolean, CATEGORIES)
# assertment data
if sum(withdrawal_Decimal) != withdrawal_total_decimal:
print("Warning, the total of withdrawals does not match")
if sum(deposit_Decimal) != deposit_total_decimal:
print("Warning, the total of deposits does not match")
statement_df = pd.DataFrame({"DATE": date_str,
"DEBIT": withdrawal_Decimal,
"CREDIT": deposit_Decimal,
"BALANCE": balance_Decimal,
"YEAR": real_year,
"MONTH": real_month,
"DESC_1": description_str,
"DESC_2": description2_str,
"DESC_3": description3_str,
"CAT": category_final2,
"CAT1": category1_str,
"CAT2": category2_str,
"CAT3": category3_str})
return statement_df
def get_category(description2_str, description3_str, description_str,
withdrawal_boolean, CATEGORIES):
category_final = []
category_final2 = []
category1_str = np.vectorize(Categorization)(description_str, CATEGORIES)
category2_str = np.vectorize(Categorization)(description2_str, CATEGORIES)
category3_str = np.vectorize(Categorization)(description3_str, CATEGORIES)
# get confirmation form the three subcategories
describe = zip(category1_str, category2_str, category3_str)
for cat1, cat2, cat3 in describe:
if cat1 == "No Category" and cat2 == "No Category" and cat3 != "No Category":
category_final.append(cat3)
elif cat1 == cat2:
category_final.append(cat1)
elif cat2 != "No Category" and cat1 == "No Category":
category_final.append(cat2)
elif cat1 != "No Category":
category_final.append(cat1)
else:
category_final.append("No Category")
# get confirmation form the three subcategories
for cat, width in zip(category_final, withdrawal_boolean):
if cat == "No Category" and width == True:
category_final2.append("Unknown Withdrawal")
elif cat == "No Category" and width == False:
category_final2.append("Unknown Deposit")
else:
category_final2.append(cat)
return category_final2, category1_str, category2_str, category3_str
def mysplit(s):
head = s.rstrip('0123456789')
tail = s[len(head):]
return head, tail
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def Categorization(description_str, CATEGORIES):
# categorize full phrases
cat_flag = True
for category_str, names_inside_category in CATEGORIES.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)
if description_str in names_inside_category:
cat = category_str
cat_flag = False
# categorize first word
if cat_flag:
for category_str, names_inside_category in CATEGORIES.items():
description_split = description_str.split(" ")[
0] # for name, age in dictionary.iteritems(): (for Python 2.x)
if description_split in names_inside_category:
cat = category_str
cat_flag = False
# if after the coma the first word matches
if cat_flag:
for category_str, names_inside_category in CATEGORIES.items():
description_split = description_str.split(",")
lenght_words = len(description_split)
if lenght_words > 1: # (for Python 2.x)
if description_split[1].split()[0] in names_inside_category:
cat = category_str
cat_flag = False
# Check if there were payments in foreing currency (generally they are at the end)
if cat_flag:
for category_str, names_inside_category in CATEGORIES.items():
description_split = description_str.split()
lenght_words = len(description_split)
if lenght_words == 2 and hasNumbers(description_split[1]):
description_split = description_split[-1][:3]
if description_split in names_inside_category:
cat = category_str
cat_flag = False
# Check if there were payments in foreing currency (some are at the beginning)
if cat_flag:
for category_str, names_inside_category in CATEGORIES.items():
description_split = description_str.split()
lenght_words = len(description_split)
if lenght_words == 1 and hasNumbers(description_split[0]):
description_split = description_split[-1][:3]
if description_split in names_inside_category:
cat = category_str
cat_flag = False
# last resource, if the word is anywhere
if cat_flag:
for category_str, names_inside_category in CATEGORIES.items():
description_split = description_str.split(" ")
lenght_words = len(description_split)
if lenght_words > 1: # (for Python 2.x)
for word in description_split:
if word in names_inside_category:
cat = category_str
cat_flag = False
if cat_flag:
cat = "No Category"
return cat
def Metadata(filepath):
page_table_format = camelot.read_pdf(filepath, pages=str(1), flavor='stream',
table_areas=['20,600,700,50'])
dataframe_table_raw = page_table_format[0].df
# locate where is the name of the account
index_where_date = dataframe_table_raw.index[dataframe_table_raw[0] == "Privatkonto"].tolist()
if index_where_date == []:
index_where_date = dataframe_table_raw.index[dataframe_table_raw[0] == "E-Sparkonto"].tolist()
if index_where_date == []:
index_where_date = dataframe_table_raw.index[dataframe_table_raw[0] == "E- Sparkonto"].tolist()
index_where_date = index_where_date[0] + 1
date = dataframe_table_raw.loc[index_where_date, 0].split('-')[1]
month_str = conversion_numeric_month[date.split('.')[1]]
year_str = date.split('.')[-1]
date_statement_str = date.split('.')[0] + " " + month_str + " " + year_str
if dataframe_table_raw.shape[1] == 5:
if "Kontonummer" in dataframe_table_raw.loc[index_where_date + 1, 2]:
account_str = dataframe_table_raw.loc[index_where_date + 1, 2].split(" ", 1)[1]
else:
account_str = dataframe_table_raw.loc[index_where_date + 2, 2].split(" ", 1)[1]
elif dataframe_table_raw.shape[1] == 6:
if "Kontonummer" in dataframe_table_raw.loc[index_where_date + 1, 3]:
account_str = dataframe_table_raw.loc[index_where_date + 1, 3].split(" ", 1)[1]
else:
account_str = dataframe_table_raw.loc[index_where_date + 2, 3].split(" ", 1)[1]
elif dataframe_table_raw.shape[1] == 7:
if "Kontonummer" in dataframe_table_raw.loc[index_where_date + 1, 4]:
account_str = dataframe_table_raw.loc[index_where_date + 1, 4].split(" ", 1)[1]
else:
account_str = dataframe_table_raw.loc[index_where_date + 2, 4].split(" ", 1)[1]
elif dataframe_table_raw.shape[1] == 3:
if "Kontonummer" in dataframe_table_raw.loc[index_where_date + 2, 1]:
account_str = dataframe_table_raw.loc[index_where_date + 2, 1].split(" ", 1)[1]
else:
account_str = dataframe_table_raw.loc[index_where_date + 2, 1].split(" ", 1)[1]
else:
print("error the frame shape was {}, valid are 5,6,7,3".format(dataframe_table_raw.shape[1]))
return date_statement_str, month_str, year_str, account_str.replace(" ", "")
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
elif fullPath.endswith(".pdf"):
allFiles.append(fullPath)
return allFiles
# Read pdf into DataFrame
def POSTFINANCE_parser(DIRECTORY, directories):
registry = pd.DataFrame()
CATEGORIES, CATEGORIES_DEPOSIT, CATEGORIES_WITHDRAWAL = calc_categories(directories)
# process data
# get list folders which indicate the years:
all_files_paths = getListOfFiles(DIRECTORY)
registry_accounts = []
for filepath in all_files_paths:
# extract account number, balance and month of statement form the first page
print("Working on file {}".format(filepath.split("/")[-1]))
date_statement_str, month_str, year_str, account_str = Metadata(filepath)
registry_accounts.append(account_str)
print("Working on statement Postfinance {} of the account No. {}".format(date_statement_str, account_str))
# Iterate over the pages of the statement
statement_df= Parser_function(filepath, CATEGORIES)
statement_df["ACCOUNT"] = account_str
statement_df["STATEMENT"] = date_statement_str
statement_df["TYPE"] = "CASH"
statement_df["CURRENCY"] = "CHF"
registry = pd.concat([registry, statement_df], ignore_index=True)
registry_accounts = list(set(registry_accounts))
registry_metadata = pd.DataFrame({"ACCOUNT": registry_accounts,
"TYPE": ["CASH"]*len(registry_accounts),
"CURRENCY": ["CHF"]*len(registry_accounts)})
return registry, registry_metadata
``` |
{
"source": "JIMENOFONSECA/HBLM-USA",
"score": 2
} |
#### File: HBLM-USA/model/4_prediction.py
```python
import time
import pandas as pd
from model.auxiliary import parse_scenario_name, percentile
from model.constants import MODEL_NAME
from pointers import METADATA_FILE_PATH, INTERMEDIATE_RESULT_FILE_PATH, FINAL_RESULT_FILE_PATH
def main():
# local variables
intermediate_result = INTERMEDIATE_RESULT_FILE_PATH
output_path = FINAL_RESULT_FILE_PATH
scenarios_array = pd.read_excel(METADATA_FILE_PATH, sheet_name='SCENARIOS')['SCENARIO'].values
# group per scenario and building class and calculate mean and variance
data_consumption = pd.read_csv(intermediate_result)
data_consumption = data_consumption.groupby(["BUILDING_CLASS", "SCENARIO"],
as_index=False).agg([percentile(50), percentile(2.5), percentile(97.5)])
final_df = pd.DataFrame()
for scenario in scenarios_array:
ipcc_scenario_name = parse_scenario_name(scenario)
year_scenario = scenario.split("_")[-1]
for sector in ['Residential', 'Commercial']:
use = "GFA_Bm2"
mean_area = data_consumption.loc[sector, scenario][use, 'percentile_50']
est_97_5 = data_consumption.loc[sector, scenario][use, 'percentile_97.5']
est_2_5 = data_consumption.loc[sector, scenario][use, 'percentile_2.5']
dict_area_mean = {'Model': MODEL_NAME,
'Region': 'USA',
'Unit': 'bn m2/yr',
'Variable': 'Energy Service|Buildings|' + sector + '|Floor Space',
'Scenario': ipcc_scenario_name + ' - 50th percentile',
'Year': year_scenario,
'Value': mean_area,
}
dict_area_min = {'Model': MODEL_NAME,
'Region': 'USA',
'Unit': 'bn m2/yr',
'Variable': 'Energy Service|Buildings|' + sector + '|Floor Space',
'Scenario': ipcc_scenario_name + ' - 2.5th percentile',
'Year': year_scenario,
'Value': est_2_5,
}
dict_area_max = {'Model': MODEL_NAME,
'Region': 'USA',
'Unit': 'bn m2/yr',
'Variable': 'Energy Service|Buildings|' + sector + '|Floor Space',
'Scenario': ipcc_scenario_name + ' - 97.5th percentile',
'Year': year_scenario,
'Value': est_97_5,
}
use = "TOTAL_CONSUMPTION_EJ"
mean_EJ = data_consumption.loc[sector, scenario][use, 'percentile_50']
est_97_5_EJ = data_consumption.loc[sector, scenario][use, 'percentile_97.5']
est_2_5_EJ = data_consumption.loc[sector, scenario][use, 'percentile_2.5']
dict_mean = {'Model': MODEL_NAME,
'Region': 'USA',
'Unit': 'EJ/yr',
'Variable': 'Final Energy|Buildings|' + sector,
'Scenario': ipcc_scenario_name + ' - 50th percentile',
'Year': year_scenario,
'Value': mean_EJ,
}
dict_min = {'Model': MODEL_NAME,
'Region': 'USA',
'Unit': 'EJ/yr',
'Variable': 'Final Energy|Buildings|' + sector,
'Scenario': ipcc_scenario_name + ' - 2.5th percentile',
'Year': year_scenario,
'Value': est_2_5_EJ,
}
dict_max = {'Model': MODEL_NAME,
'Region': 'USA',
'Unit': 'EJ/yr',
'Variable': 'Final Energy|Buildings|' + sector,
'Scenario': ipcc_scenario_name + ' - 97.5th percentile',
'Year': year_scenario,
'Value': est_97_5_EJ,
}
dataframe = pd.DataFrame([dict_mean, dict_min, dict_max, dict_area_mean, dict_area_min, dict_area_max])
final_df = pd.concat([final_df, dataframe], ignore_index=True)
result = pd.pivot_table(final_df, values='Value', columns='Year',
index=['Model', 'Scenario', 'Region', 'Variable', 'Unit'])
result.to_csv(output_path)
if __name__ == "__main__":
t0 = time.time()
main()
t1 = round((time.time() - t0) / 60, 2)
print("finished after {} minutes".format(t1))
``` |
{
"source": "jimevans/buck",
"score": 2
} |
#### File: buck/programs/buck_repo.py
```python
from __future__ import print_function
import os
import platform
import subprocess
import sys
import tempfile
import textwrap
from timing import monotonic_time_nanos
from tracing import Tracing
from buck_tool import BuckTool, which, check_output, JAVA_MAX_HEAP_SIZE_MB
from buck_tool import BuckToolException, RestartBuck
import buck_version
JAVA_CLASSPATHS = [
"build/abi_processor/classes",
"build/classes",
"build/dx_classes",
"src",
"third-party/java/android/sdklib.jar",
"third-party/java/aopalliance/aopalliance.jar",
"third-party/java/args4j/args4j-2.0.30.jar",
"third-party/java/asm/asm-debug-all-5.0.3.jar",
"third-party/java/closure-templates/soy-excluding-deps.jar",
"third-party/java/commons-compress/commons-compress-1.8.1.jar",
"third-party/java/dd-plist/dd-plist.jar",
"third-party/java/ddmlib/ddmlib-22.5.3.jar",
"third-party/java/eclipse/org.eclipse.core.contenttype_3.4.200.v20140207-1251.jar",
"third-party/java/eclipse/org.eclipse.core.jobs_3.6.1.v20141014-1248.jar",
"third-party/java/eclipse/org.eclipse.core.resources_3.9.1.v20140825-1431.jar",
"third-party/java/eclipse/org.eclipse.core.runtime_3.10.0.v20140318-2214.jar",
"third-party/java/eclipse/org.eclipse.equinox.common_3.6.200.v20130402-1505.jar",
"third-party/java/eclipse/org.eclipse.equinox.preferences_3.5.200.v20140224-1527.jar",
"third-party/java/eclipse/org.eclipse.jdt.core.prefs",
"third-party/java/eclipse/org.eclipse.jdt.core_3.10.2.v20150120-1634.jar",
"third-party/java/eclipse/org.eclipse.osgi_3.10.2.v20150203-1939.jar",
"third-party/java/gson/gson-2.2.4.jar",
"third-party/java/guava/guava-18.0.jar",
"third-party/java/guice/guice-3.0.jar",
"third-party/java/guice/guice-assistedinject-3.0.jar",
"third-party/java/guice/guice-multibindings-3.0.jar",
"third-party/java/icu4j/icu4j-54.1.1.jar",
"third-party/java/infer-annotations/infer-annotations-1.5.jar",
"third-party/java/ini4j/ini4j-0.5.2.jar",
"third-party/java/jackson/jackson-annotations-2.0.5.jar",
"third-party/java/jackson/jackson-core-2.0.5.jar",
"third-party/java/jackson/jackson-databind-2.0.5.jar",
"third-party/java/jackson/jackson-datatype-jdk7-2.5.0.jar",
"third-party/java/jetty/jetty-all-9.2.10.v20150310.jar",
"third-party/java/jsr/javax.inject-1.jar",
"third-party/java/jsr/jsr305.jar",
"third-party/java/nailgun/nailgun-server-0.9.2-SNAPSHOT.jar",
"third-party/java/okhttp/okhttp-2.2.0.jar",
"third-party/java/okio/okio-1.2.0.jar",
"third-party/java/servlet-api/javax.servlet-api-3.1.0.jar",
"third-party/java/slf4j/slf4j-api-1.7.2.jar",
"third-party/java/stringtemplate/ST-4.0.8.jar",
"third-party/java/xz-java-1.3/xz-1.3.jar",
]
RESOURCES = {
"abi_processor_classes": "build/abi_processor/classes",
"android_agent_path": "assets/android/agent.apk",
"buck_client": "build/ng",
"buck_server": "bin/buck",
"dx": "third-party/java/dx/etc/dx",
"jacoco_agent_jar": "third-party/java/jacoco/jacocoagent.jar",
"logging_config_file": "config/logging.properties",
"native_exopackage_fake_path": "assets/android/native-exopackage-fakes.apk",
"path_to_asm_jar": "third-party/java/asm/asm-debug-all-5.0.3.jar",
"path_to_buck_py": "src/com/facebook/buck/parser/buck.py",
"path_to_compile_asset_catalogs_build_phase_sh": (
"src/com/facebook/buck/apple/compile_asset_catalogs_build_phase.sh"),
"path_to_compile_asset_catalogs_py": (
"src/com/facebook/buck/apple/compile_asset_catalogs.py"),
"path_to_intellij_py": "src/com/facebook/buck/command/intellij.py",
"path_to_pathlib_py": "third-party/py/pathlib/pathlib.py",
"path_to_pex": "src/com/facebook/buck/python/pex.py",
"path_to_python_test_main": "src/com/facebook/buck/python/__test_main__.py",
"path_to_sh_binary_template": "src/com/facebook/buck/shell/sh_binary_template",
"path_to_static_content": "webserver/static",
"quickstart_origin_dir": "src/com/facebook/buck/cli/quickstart/android",
"report_generator_jar": "build/report-generator.jar",
"testrunner_classes": "build/testrunner/classes",
}
def get_ant_env(max_heap_size_mb):
ant_env = os.environ.copy()
ant_opts = ant_env.get('ANT_OPTS', '')
if ant_opts.find('-Xmx') == -1:
# Adjust the max heap size if it's not already specified.
ant_max_heap_arg = '-Xmx{0}m'.format(max_heap_size_mb)
if ant_opts:
ant_opts += ' '
ant_opts += ant_max_heap_arg
ant_env['ANT_OPTS'] = ant_opts
return ant_env
class BuckRepo(BuckTool):
def __init__(self, buck_bin_dir, buck_project):
super(BuckRepo, self).__init__(buck_project)
self._buck_dir = self._platform_path(os.path.dirname(buck_bin_dir))
self._build_success_file = os.path.join(
self._buck_dir, "build", "successful-build")
dot_git = os.path.join(self._buck_dir, '.git')
self._is_git = os.path.exists(dot_git) and os.path.isdir(dot_git) and which('git') and \
sys.platform != 'cygwin'
self._is_buck_repo_dirty_override = os.environ.get('BUCK_REPOSITORY_DIRTY')
buck_version = buck_project.buck_version
if self._is_git and not buck_project.has_no_buck_check and buck_version:
revision = buck_version[0]
branch = buck_version[1] if len(buck_version) > 1 else None
self._checkout_and_clean(revision, branch)
self._build()
def _checkout_and_clean(self, revision, branch):
with Tracing('BuckRepo._checkout_and_clean'):
if not self._revision_exists(revision):
print(textwrap.dedent("""\
Required revision {0} is not
available in the local repository.
Buck is fetching updates from git. You can disable this by creating
a '.nobuckcheck' file in your repository, but this might lead to
strange bugs or build failures.""".format(revision)),
file=sys.stderr)
git_command = ['git', 'fetch']
git_command.extend(['--all'] if not branch else ['origin', branch])
try:
subprocess.check_call(
git_command,
stdout=sys.stderr,
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to fetch Buck updates from git."""))
current_revision = self._get_git_revision()
if current_revision != revision:
print(textwrap.dedent("""\
Buck is at {0}, but should be {1}.
Buck is updating itself. To disable this, add a '.nobuckcheck'
file to your project root. In general, you should only disable
this if you are developing Buck.""".format(
current_revision, revision)),
file=sys.stderr)
try:
subprocess.check_call(
['git', 'checkout', '--quiet', revision],
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to update Buck to revision {0}.""".format(revision)))
if os.path.exists(self._build_success_file):
os.remove(self._build_success_file)
ant = self._check_for_ant()
self._run_ant_clean(ant)
raise RestartBuck()
def _join_buck_dir(self, relative_path):
return os.path.join(self._buck_dir, *(relative_path.split('/')))
def _is_dirty(self):
if self._is_buck_repo_dirty_override:
return self._is_buck_repo_dirty_override == "1"
if not self._is_git:
return False
output = check_output(
['git', 'status', '--porcelain'],
cwd=self._buck_dir)
return bool(output.strip())
def _has_local_changes(self):
if not self._is_git:
return False
output = check_output(
['git', 'ls-files', '-m'],
cwd=self._buck_dir)
return bool(output.strip())
def _get_git_revision(self):
if not self._is_git:
return 'N/A'
return buck_version.get_git_revision(self._buck_dir)
def _get_git_commit_timestamp(self):
if self._is_buck_repo_dirty_override or not self._is_git:
return -1
return buck_version.get_git_revision_timestamp(self._buck_dir)
def _revision_exists(self, revision):
returncode = subprocess.call(
['git', 'cat-file', '-e', revision],
cwd=self._buck_dir)
return returncode == 0
def _check_for_ant(self):
ant = which('ant')
if not ant:
message = "You do not have ant on your $PATH. Cannot build Buck."
if sys.platform == "darwin":
message += "\nTry running 'brew install ant'."
raise BuckToolException(message)
return ant
def _print_ant_failure_and_exit(self, ant_log_path):
print(textwrap.dedent("""\
::: 'ant' failed in the buck repo at '{0}',
::: and 'buck' is not properly built. It will be unusable
::: until the error is corrected. You can check the logs
::: at {1} to figure out what broke.""".format(
self._buck_dir, ant_log_path)), file=sys.stderr)
if self._is_git:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: git -C "{0}" clean -xfd""".format(self._buck_dir)))
else:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: rm -rf "{0}"/build""".format(self._buck_dir)))
def _run_ant_clean(self, ant):
clean_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant-clean.log')
with open(clean_log_path, 'w') as clean_log:
exitcode = subprocess.call([ant, 'clean'], stdout=clean_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(clean_log_path)
def _run_ant(self, ant):
ant_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant.log')
with open(ant_log_path, 'w') as ant_log:
exitcode = subprocess.call([ant], stdout=ant_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(ant_log_path)
def _build(self):
with Tracing('BuckRepo._build'):
if not os.path.exists(self._build_success_file):
print(
"Buck does not appear to have been built -- building Buck!",
file=sys.stderr)
ant = self._check_for_ant()
self._run_ant_clean(ant)
self._run_ant(ant)
open(self._build_success_file, 'w').close()
print("All done, continuing with build.", file=sys.stderr)
def _has_resource(self, resource):
return True
def _get_resource(self, resource, exe=False):
return self._join_buck_dir(RESOURCES[resource.name])
def _get_buck_version_uid(self):
with Tracing('BuckRepo._get_buck_version_uid'):
# First try to get the "clean" buck version. If it succeeds,
# return it.
clean_version = buck_version.get_clean_buck_version(
self._buck_dir,
allow_dirty=self._is_buck_repo_dirty_override == "1")
if clean_version is not None:
return clean_version
# Otherwise, if there is a .nobuckcheck file, or if there isn't
# a .buckversion file, fall back to a "dirty" version.
if (self._buck_project.has_no_buck_check or
not self._buck_project.buck_version):
return buck_version.get_dirty_buck_version(self._buck_dir)
if self._has_local_changes():
print(textwrap.dedent("""\
::: Your buck directory has local modifications, and therefore
::: builds will not be able to use a distributed cache.
::: The following files must be either reverted or committed:"""),
file=sys.stderr)
subprocess.call(
['git', 'ls-files', '-m'],
stdout=sys.stderr,
cwd=self._buck_dir)
elif os.environ.get('BUCK_CLEAN_REPO_IF_DIRTY') != 'NO':
print(textwrap.dedent("""\
::: Your local buck directory is dirty, and therefore builds will
::: not be able to use a distributed cache."""), file=sys.stderr)
if sys.stdout.isatty():
print(
"::: Do you want to clean your buck directory? [y/N]",
file=sys.stderr)
choice = raw_input().lower()
if choice == "y":
subprocess.call(
['git', 'clean', '-fd'],
stdout=sys.stderr,
cwd=self._buck_dir)
raise RestartBuck()
return buck_version.get_dirty_buck_version(self._buck_dir)
def _get_extra_java_args(self):
return [
"-Dbuck.git_commit={0}".format(self._get_git_revision()),
"-Dbuck.git_commit_timestamp={0}".format(
self._get_git_commit_timestamp()),
"-Dbuck.git_dirty={0}".format(int(self._is_dirty())),
]
def _get_bootstrap_classpath(self):
return self._join_buck_dir("build/bootstrapper/bootstrapper.jar")
def _get_java_classpath(self):
return self._pathsep.join([self._join_buck_dir(p) for p in JAVA_CLASSPATHS])
```
#### File: buck/programs/gen_buck_info.py
```python
import os
import json
import sys
import buck_version
def main(argv):
# Locate the root of the buck repo. We'll need to be there to
# generate the buck version UID.
path = os.getcwd()
while not os.path.exists(os.path.join(path, '.buckconfig')):
path = os.path.dirname(path)
# Attempt to create a "clean" version, but fall back to a "dirty"
# one if need be.
version = buck_version.get_clean_buck_version(path)
timestamp = -1
if version is None:
version = buck_version.get_dirty_buck_version(path)
else:
timestamp = buck_version.get_git_revision_timestamp(path)
json.dump(
{'version': version, 'timestamp': timestamp},
sys.stdout,
sort_keys=True,
indent=2)
sys.exit(main(sys.argv))
```
#### File: cli/bootstrapper/class_loader_test.py
```python
import os
import subprocess
import sys
import unittest
class ClassLoaderTest(unittest.TestCase):
def test_should_not_pollute_classpath_when_processor_path_is_set(self):
"""
Tests that annotation processors get their own class path, isolated from Buck's.
There was a bug caused by adding annotation processors and setting the processorpath
for javac. In that case, Buck's version of guava would leak into the classpath of the
annotation processor causing it to fail to run and all heck breaking loose."""
root_directory = os.getcwd()
buck_path = os.path.join(root_directory, 'bin', 'buck')
test_data_directory = os.path.join(
root_directory,
'test',
'com',
'facebook',
'buck',
'cli',
'bootstrapper',
'testdata',
'old_guava')
# Pass thru our environment, except disabling buckd so that we can be sure the right buck
# is run.
child_environment = dict(os.environ)
child_environment["NO_BUCKD"] = "1"
proc = subprocess.Popen(
[buck_path, 'build', '//:example'],
cwd=test_data_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=child_environment)
stdout, stderr = proc.communicate()
# Copy output through to unittest's output so failures are easy to debug. Can't just
# provide sys.stdout/sys.stderr to Popen because unittest has replaced the streams with
# things that aren't directly compatible with Popen.
sys.stdout.write(stdout)
sys.stdout.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
self.assertEquals(0, proc.returncode)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jimevans/infield-fly",
"score": 3
} |
#### File: jimevans/infield-fly/notification.py
```python
import logging
from twilio.rest import Client
class Notifier:
"""Notifies a user via SMS using the Twilio API"""
def __init__(self, account_sid, auth_token, sending_number):
super().__init__()
self.account_sid = account_sid
self.auth_token = auth_token
self.sending_number = sending_number
@classmethod
def create_default_notifier(cls, config):
"""Creates a notifier using the specified notification settings"""
logger = logging.getLogger()
if config.notification.sid is None or config.notification.sid == "":
logger.warning("Twilio account SID not set. Not notifying.")
return None
if config.notification.auth_token is None or config.notification.auth_token == "":
logger.warning("Twilio auth token not set. Not notifying.")
return None
if (config.notification.sending_number is None
or config.notification.sending_number == ""):
logger.warning("Twilio SMS sending number not set. Not notifying.")
return None
account_sid = config.notification.sid
auth_token = config.notification.auth_token
sender = config.notification.sending_number
return Notifier(account_sid, auth_token, sender)
def notify(self, phone_number, message_body):
"""Sends an SMS notification to the specified phone number with the specified body"""
client = Client(self.account_sid, self.auth_token)
message = client.messages.create(
body = message_body, from_=self.sending_number, to=phone_number)
return message.sid
``` |
{
"source": "JimEverest/scratch-vm",
"score": 3
} |
#### File: pydobot/backup/dobot.py
```python
import serial
import struct
import time
import threading
import warnings
from .message import Message
from enums.PTPMode import PTPMode
from enums.CommunicationProtocolIDs import CommunicationProtocolIDs
from enums.ControlValues import ControlValues
class Dobot:
def __init__(self, port, verbose=False):
threading.Thread.__init__(self)
self._on = True
self.verbose = verbose
self.lock = threading.Lock()
self.ser = serial.Serial(port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
is_open = self.ser.isOpen()
if self.verbose:
print('pydobot: %s open' % self.ser.name if is_open else 'failed to open serial port')
self._set_queued_cmd_start_exec()
self._set_queued_cmd_clear()
self._set_ptp_joint_params(200, 200, 200, 200, 200, 200, 200, 200)
self._set_ptp_coordinate_params(velocity=200, acceleration=200)
self._set_ptp_jump_params(10, 200)
self._set_ptp_common_params(velocity=100, acceleration=100)
self._get_pose()
"""
Gets the current command index
"""
def _get_queued_cmd_current_index(self):
msg = Message()
msg.id = CommunicationProtocolIDs.GET_QUEUED_CMD_CURRENT_INDEX
response = self._send_command(msg)
idx = struct.unpack_from('L', response.params, 0)[0]
return idx
"""
Gets the real-time pose of the Dobot
"""
def _get_pose(self):
msg = Message()
msg.id = CommunicationProtocolIDs.GET_POSE
response = self._send_command(msg)
self.x = struct.unpack_from('f', response.params, 0)[0]
self.y = struct.unpack_from('f', response.params, 4)[0]
self.z = struct.unpack_from('f', response.params, 8)[0]
self.r = struct.unpack_from('f', response.params, 12)[0]
self.j1 = struct.unpack_from('f', response.params, 16)[0]
self.j2 = struct.unpack_from('f', response.params, 20)[0]
self.j3 = struct.unpack_from('f', response.params, 24)[0]
self.j4 = struct.unpack_from('f', response.params, 28)[0]
if self.verbose:
print("pydobot: x:%03.1f \
y:%03.1f \
z:%03.1f \
r:%03.1f \
j1:%03.1f \
j2:%03.1f \
j3:%03.1f \
j4:%03.1f" %
(self.x, self.y, self.z, self.r, self.j1, self.j2, self.j3, self.j4))
return response
def _read_message(self):
time.sleep(0.1)
b = self.ser.read_all()
if len(b) > 0:
msg = Message(b)
if self.verbose:
print('pydobot: <<', msg)
return msg
return
def _send_command(self, msg, wait=False):
self.lock.acquire()
self._send_message(msg)
response = self._read_message()
self.lock.release()
if not wait:
return response
expected_idx = struct.unpack_from('L', response.params, 0)[0]
if self.verbose:
print('pydobot: waiting for command', expected_idx)
while True:
current_idx = self._get_queued_cmd_current_index()
if current_idx != expected_idx:
time.sleep(0.1)
continue
if self.verbose:
print('pydobot: command %d executed' % current_idx)
break
return response
def _send_message(self, msg):
time.sleep(0.1)
if self.verbose:
print('pydobot: >>', msg)
self.ser.write(msg.bytes())
"""
Executes the CP Command
"""
def _set_cp_cmd(self, x, y, z):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_CP_CMD
msg.ctrl = ControlValues.THREE
msg.params = bytearray(bytes([0x01]))
msg.params.extend(bytearray(struct.pack('f', x)))
msg.params.extend(bytearray(struct.pack('f', y)))
msg.params.extend(bytearray(struct.pack('f', z)))
msg.params.append(0x00)
return self._send_command(msg)
"""
Sets the status of the gripper
"""
def _set_end_effector_gripper(self, enable=False):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_END_EFFECTOR_GRIPPER
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray([0x01]))
if enable is True:
msg.params.extend(bytearray([0x01]))
else:
msg.params.extend(bytearray([0x00]))
return self._send_command(msg)
"""
Sets the status of the suction cup
"""
def _set_end_effector_suction_cup(self, enable=False):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_END_EFFECTOR_SUCTION_CUP
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray([0x01]))
if enable is True:
msg.params.extend(bytearray([0x01]))
else:
msg.params.extend(bytearray([0x00]))
return self._send_command(msg)
"""
Sets the velocity ratio and the acceleration ratio in PTP mode
"""
def _set_ptp_joint_params(self, v_x, v_y, v_z, v_r, a_x, a_y, a_z, a_r):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_JOINT_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', v_x)))
msg.params.extend(bytearray(struct.pack('f', v_y)))
msg.params.extend(bytearray(struct.pack('f', v_z)))
msg.params.extend(bytearray(struct.pack('f', v_r)))
msg.params.extend(bytearray(struct.pack('f', a_x)))
msg.params.extend(bytearray(struct.pack('f', a_y)))
msg.params.extend(bytearray(struct.pack('f', a_z)))
msg.params.extend(bytearray(struct.pack('f', a_r)))
return self._send_command(msg)
"""
Sets the velocity and acceleration of the Cartesian coordinate axes in PTP mode
"""
def _set_ptp_coordinate_params(self, velocity, acceleration):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_COORDINATE_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', velocity)))
msg.params.extend(bytearray(struct.pack('f', velocity)))
msg.params.extend(bytearray(struct.pack('f', acceleration)))
msg.params.extend(bytearray(struct.pack('f', acceleration)))
return self._send_command(msg)
"""
Sets the lifting height and the maximum lifting height in JUMP mode
"""
def _set_ptp_jump_params(self, jump, limit):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_JUMP_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', jump)))
msg.params.extend(bytearray(struct.pack('f', limit)))
return self._send_command(msg)
"""
Sets the velocity ratio, acceleration ratio in PTP mode
"""
def _set_ptp_common_params(self, velocity, acceleration):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_COMMON_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', velocity)))
msg.params.extend(bytearray(struct.pack('f', acceleration)))
return self._send_command(msg)
"""
Executes PTP command
"""
def _set_ptp_cmd(self, x, y, z, r, mode, wait):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_PTP_CMD
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray([mode]))
msg.params.extend(bytearray(struct.pack('f', x)))
msg.params.extend(bytearray(struct.pack('f', y)))
msg.params.extend(bytearray(struct.pack('f', z)))
msg.params.extend(bytearray(struct.pack('f', r)))
return self._send_command(msg, wait)
"""
Clears command queue
"""
def _set_queued_cmd_clear(self):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_QUEUED_CMD_CLEAR
msg.ctrl = ControlValues.ONE
return self._send_command(msg)
"""
Start command
"""
def _set_queued_cmd_start_exec(self):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_QUEUED_CMD_START_EXEC
msg.ctrl = ControlValues.ONE
return self._send_command(msg)
"""
Stop command
"""
def _set_queued_cmd_stop_exec(self):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_QUEUED_CMD_STOP_EXEC
msg.ctrl = ControlValues.ONE
return self._send_command(msg)
def close(self):
self._on = False
self.lock.acquire()
self.ser.close()
if self.verbose:
print('pydobot: %s closed' % self.ser.name)
self.lock.release()
def go(self, x, y, z, r=0.):
warnings.warn('go() is deprecated, use move_to() instead')
self.move_to(x, y, z, r)
def move_to(self, x, y, z, r, wait=False):
self._set_ptp_cmd(x, y, z, r, mode=PTPMode.MOVL_XYZ, wait=wait)
def suck(self, enable):
self._set_end_effector_suction_cup(enable)
def grip(self, enable):
self._set_end_effector_gripper(enable)
def speed(self, velocity=100., acceleration=100.):
self._set_ptp_common_params(velocity, acceleration)
self._set_ptp_coordinate_params(velocity, acceleration)
def pose(self):
response = self._get_pose()
x = struct.unpack_from('f', response.params, 0)[0]
y = struct.unpack_from('f', response.params, 4)[0]
z = struct.unpack_from('f', response.params, 8)[0]
r = struct.unpack_from('f', response.params, 12)[0]
j1 = struct.unpack_from('f', response.params, 16)[0]
j2 = struct.unpack_from('f', response.params, 20)[0]
j3 = struct.unpack_from('f', response.params, 24)[0]
j4 = struct.unpack_from('f', response.params, 28)[0]
return x, y, z, r, j1, j2, j3, j4
``` |
{
"source": "Jimexist/advent-of-code-python",
"score": 4
} |
#### File: advent-of-code-python/days/day1.py
```python
import sys
def first_half(fin):
nums = [int(i) for i in fin]
return sum(nums)
def second_half(fin):
nums = [int(i) for i in fin]
seen = set()
x = 0
while True:
for i in nums:
x += i
if x in seen:
return x
seen.add(x)
if __name__ == "__main__":
print(second_half(sys.stdin))
``` |
{
"source": "Jimexist/chinese-mnist-with-pytorch-lightning-resnet",
"score": 3
} |
#### File: Jimexist/chinese-mnist-with-pytorch-lightning-resnet/model.py
```python
import os
from pathlib import Path
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.metrics import Accuracy
from sklearn.model_selection import StratifiedKFold
from torch import nn, optim
from torchvision.models import resnet18
try:
from dataset import ChineseMNISTDataModule, ChineseMNISTDataset
except:
pass
class ChineseMNISTResnetModel(pl.LightningModule):
def __init__(self, learning_rate=1e-3):
super().__init__()
self.learning_rate = learning_rate
self.num_classes = 15
resnet = resnet18(pretrained=True, progress=True)
resnet.conv1 = nn.Conv2d(
in_channels=1,
out_channels=resnet.conv1.out_channels,
kernel_size=resnet.conv1.kernel_size,
stride=resnet.conv1.stride,
dilation=resnet.conv1.dilation,
bias=resnet.conv1.bias,
)
resnet.fc = nn.Linear(512, self.num_classes)
self.resnet = resnet
self.accuracy = Accuracy(num_classes=self.num_classes)
self.criterion = nn.CrossEntropyLoss()
def forward(self, image):
image = image.permute(0, 3, 1, 2).contiguous().float()
return self.resnet(image)
def training_step(self, batch, batch_idx: int):
image, y = batch
yhat = self(image)
loss = self.criterion(yhat, y)
acc = self.accuracy(yhat, y)
return {"loss": loss, "acc": acc}
def validation_step(self, batch, batch_idx: int):
image, y = batch
yhat = self(image)
loss = self.criterion(yhat, y)
acc = self.accuracy(yhat, y)
return {"val_loss": loss, "val_acc": acc, "progress_bar": {"val_acc": acc}}
def test_step(self, batch, batch_idx):
metrics = self.validation_step(batch, batch_idx)
return {"test_acc": metrics["val_acc"], "test_loss": metrics["val_loss"]}
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def training(k_folds: int = 5):
is_kaggle = os.path.isdir("/kaggle")
data_root = Path("/kaggle/input/chinese-mnist" if is_kaggle else "archive")
all_df = pd.read_csv(data_root / "chinese_mnist.csv")
skf = StratifiedKFold(n_splits=k_folds, shuffle=True)
checkpoint_callback = ModelCheckpoint(
filepath=os.getcwd(),
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min",
)
trainer = pl.Trainer(
gpus=1,
max_epochs=4,
precision=16,
val_check_interval=0.2,
checkpoint_callback=checkpoint_callback,
)
for train_indices, val_indices in skf.split(all_df, all_df.code):
data_module = ChineseMNISTDataModule(
data_root=data_root,
all_df=all_df,
train_indices=train_indices,
val_indices=val_indices,
)
model = ChineseMNISTResnetModel()
trainer.fit(model, data_module)
if __name__ == "__main__":
training()
``` |
{
"source": "Jimexist/thrift",
"score": 2
} |
#### File: nodejs/examples/httpServer.py
```python
import sys
sys.path.append('gen-py')
from hello import HelloSvc
from thrift.protocol import TJSONProtocol
from thrift.server import THttpServer
class HelloSvcHandler:
def hello_func(self):
print("Hello Called")
return "hello from Python"
processor = HelloSvc.Processor(HelloSvcHandler())
protoFactory = TJSONProtocol.TJSONProtocolFactory()
port = 9090
server = THttpServer.THttpServer(processor, ("localhost", port), protoFactory)
print "Python server running on port " + str(port)
server.serve()
```
#### File: src/transport/THeaderTransport.py
```python
import struct
import zlib
from thrift.compat import BufferIO, byte_index
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.protocol.TCompactProtocol import TCompactProtocol, readVarint, writeVarint
from thrift.Thrift import TApplicationException
from thrift.transport.TTransport import (
CReadableTransport,
TMemoryBuffer,
TTransportBase,
TTransportException,
)
U16 = struct.Struct("!H")
I32 = struct.Struct("!i")
HEADER_MAGIC = 0x0FFF
HARD_MAX_FRAME_SIZE = 0x3FFFFFFF
class THeaderClientType(object):
HEADERS = 0x00
FRAMED_BINARY = 0x01
UNFRAMED_BINARY = 0x02
FRAMED_COMPACT = 0x03
UNFRAMED_COMPACT = 0x04
class THeaderSubprotocolID(object):
BINARY = 0x00
COMPACT = 0x02
class TInfoHeaderType(object):
KEY_VALUE = 0x01
class THeaderTransformID(object):
ZLIB = 0x01
READ_TRANSFORMS_BY_ID = {
THeaderTransformID.ZLIB: zlib.decompress,
}
WRITE_TRANSFORMS_BY_ID = {
THeaderTransformID.ZLIB: zlib.compress,
}
def _readString(trans):
size = readVarint(trans)
if size < 0:
raise TTransportException(
TTransportException.NEGATIVE_SIZE,
"Negative length"
)
return trans.read(size)
def _writeString(trans, value):
writeVarint(trans, len(value))
trans.write(value)
class THeaderTransport(TTransportBase, CReadableTransport):
def __init__(self, transport, allowed_client_types, default_protocol=THeaderSubprotocolID.BINARY):
self._transport = transport
self._client_type = THeaderClientType.HEADERS
self._allowed_client_types = allowed_client_types
self._read_buffer = BufferIO(b"")
self._read_headers = {}
self._write_buffer = BufferIO()
self._write_headers = {}
self._write_transforms = []
self.flags = 0
self.sequence_id = 0
self._protocol_id = default_protocol
self._max_frame_size = HARD_MAX_FRAME_SIZE
def isOpen(self):
return self._transport.isOpen()
def open(self):
return self._transport.open()
def close(self):
return self._transport.close()
def get_headers(self):
return self._read_headers
def set_header(self, key, value):
if not isinstance(key, bytes):
raise ValueError("header names must be bytes")
if not isinstance(value, bytes):
raise ValueError("header values must be bytes")
self._write_headers[key] = value
def clear_headers(self):
self._write_headers.clear()
def add_transform(self, transform_id):
if transform_id not in WRITE_TRANSFORMS_BY_ID:
raise ValueError("unknown transform")
self._write_transforms.append(transform_id)
def set_max_frame_size(self, size):
if not 0 < size < HARD_MAX_FRAME_SIZE:
raise ValueError("maximum frame size should be < %d and > 0" % HARD_MAX_FRAME_SIZE)
self._max_frame_size = size
@property
def protocol_id(self):
if self._client_type == THeaderClientType.HEADERS:
return self._protocol_id
elif self._client_type in (THeaderClientType.FRAMED_BINARY, THeaderClientType.UNFRAMED_BINARY):
return THeaderSubprotocolID.BINARY
elif self._client_type in (THeaderClientType.FRAMED_COMPACT, THeaderClientType.UNFRAMED_COMPACT):
return THeaderSubprotocolID.COMPACT
else:
raise TTransportException(
TTransportException.INVALID_CLIENT_TYPE,
"Protocol ID not know for client type %d" % self._client_type,
)
def read(self, sz):
# if there are bytes left in the buffer, produce those first.
bytes_read = self._read_buffer.read(sz)
bytes_left_to_read = sz - len(bytes_read)
if bytes_left_to_read == 0:
return bytes_read
# if we've determined this is an unframed client, just pass the read
# through to the underlying transport until we're reset again at the
# beginning of the next message.
if self._client_type in (THeaderClientType.UNFRAMED_BINARY, THeaderClientType.UNFRAMED_COMPACT):
return bytes_read + self._transport.read(bytes_left_to_read)
# we're empty and (maybe) framed. fill the buffers with the next frame.
self.readFrame(bytes_left_to_read)
return bytes_read + self._read_buffer.read(bytes_left_to_read)
def _set_client_type(self, client_type):
if client_type not in self._allowed_client_types:
raise TTransportException(
TTransportException.INVALID_CLIENT_TYPE,
"Client type %d not allowed by server." % client_type,
)
self._client_type = client_type
def readFrame(self, req_sz):
# the first word could either be the length field of a framed message
# or the first bytes of an unframed message.
first_word = self._transport.readAll(I32.size)
frame_size, = I32.unpack(first_word)
is_unframed = False
if frame_size & TBinaryProtocol.VERSION_MASK == TBinaryProtocol.VERSION_1:
self._set_client_type(THeaderClientType.UNFRAMED_BINARY)
is_unframed = True
elif (byte_index(first_word, 0) == TCompactProtocol.PROTOCOL_ID and
byte_index(first_word, 1) & TCompactProtocol.VERSION_MASK == TCompactProtocol.VERSION):
self._set_client_type(THeaderClientType.UNFRAMED_COMPACT)
is_unframed = True
if is_unframed:
bytes_left_to_read = req_sz - I32.size
if bytes_left_to_read > 0:
rest = self._transport.read(bytes_left_to_read)
else:
rest = b""
self._read_buffer = BufferIO(first_word + rest)
return
# ok, we're still here so we're framed.
if frame_size > self._max_frame_size:
raise TTransportException(
TTransportException.SIZE_LIMIT,
"Frame was too large.",
)
read_buffer = BufferIO(self._transport.readAll(frame_size))
# the next word is either going to be the version field of a
# binary/compact protocol message or the magic value + flags of a
# header protocol message.
second_word = read_buffer.read(I32.size)
version, = I32.unpack(second_word)
read_buffer.seek(0)
if version >> 16 == HEADER_MAGIC:
self._set_client_type(THeaderClientType.HEADERS)
self._read_buffer = self._parse_header_format(read_buffer)
elif version & TBinaryProtocol.VERSION_MASK == TBinaryProtocol.VERSION_1:
self._set_client_type(THeaderClientType.FRAMED_BINARY)
self._read_buffer = read_buffer
elif (byte_index(second_word, 0) == TCompactProtocol.PROTOCOL_ID and
byte_index(second_word, 1) & TCompactProtocol.VERSION_MASK == TCompactProtocol.VERSION):
self._set_client_type(THeaderClientType.FRAMED_COMPACT)
self._read_buffer = read_buffer
else:
raise TTransportException(
TTransportException.INVALID_CLIENT_TYPE,
"Could not detect client transport type.",
)
def _parse_header_format(self, buffer):
# make BufferIO look like TTransport for varint helpers
buffer_transport = TMemoryBuffer()
buffer_transport._buffer = buffer
buffer.read(2) # discard the magic bytes
self.flags, = U16.unpack(buffer.read(U16.size))
self.sequence_id, = I32.unpack(buffer.read(I32.size))
header_length = U16.unpack(buffer.read(U16.size))[0] * 4
end_of_headers = buffer.tell() + header_length
if end_of_headers > len(buffer.getvalue()):
raise TTransportException(
TTransportException.SIZE_LIMIT,
"Header size is larger than whole frame.",
)
self._protocol_id = readVarint(buffer_transport)
transforms = []
transform_count = readVarint(buffer_transport)
for _ in range(transform_count):
transform_id = readVarint(buffer_transport)
if transform_id not in READ_TRANSFORMS_BY_ID:
raise TApplicationException(
TApplicationException.INVALID_TRANSFORM,
"Unknown transform: %d" % transform_id,
)
transforms.append(transform_id)
transforms.reverse()
headers = {}
while buffer.tell() < end_of_headers:
header_type = readVarint(buffer_transport)
if header_type == TInfoHeaderType.KEY_VALUE:
count = readVarint(buffer_transport)
for _ in range(count):
key = _readString(buffer_transport)
value = _readString(buffer_transport)
headers[key] = value
else:
break # ignore unknown headers
self._read_headers = headers
# skip padding / anything we didn't understand
buffer.seek(end_of_headers)
payload = buffer.read()
for transform_id in transforms:
transform_fn = READ_TRANSFORMS_BY_ID[transform_id]
payload = transform_fn(payload)
return BufferIO(payload)
def write(self, buf):
self._write_buffer.write(buf)
def flush(self):
payload = self._write_buffer.getvalue()
self._write_buffer = BufferIO()
buffer = BufferIO()
if self._client_type == THeaderClientType.HEADERS:
for transform_id in self._write_transforms:
transform_fn = WRITE_TRANSFORMS_BY_ID[transform_id]
payload = transform_fn(payload)
headers = BufferIO()
writeVarint(headers, self._protocol_id)
writeVarint(headers, len(self._write_transforms))
for transform_id in self._write_transforms:
writeVarint(headers, transform_id)
if self._write_headers:
writeVarint(headers, TInfoHeaderType.KEY_VALUE)
writeVarint(headers, len(self._write_headers))
for key, value in self._write_headers.items():
_writeString(headers, key)
_writeString(headers, value)
self._write_headers = {}
padding_needed = (4 - (len(headers.getvalue()) % 4)) % 4
headers.write(b"\x00" * padding_needed)
header_bytes = headers.getvalue()
buffer.write(I32.pack(10 + len(header_bytes) + len(payload)))
buffer.write(U16.pack(HEADER_MAGIC))
buffer.write(U16.pack(self.flags))
buffer.write(I32.pack(self.sequence_id))
buffer.write(U16.pack(len(header_bytes) // 4))
buffer.write(header_bytes)
buffer.write(payload)
elif self._client_type in (THeaderClientType.FRAMED_BINARY, THeaderClientType.FRAMED_COMPACT):
buffer.write(I32.pack(len(payload)))
buffer.write(payload)
elif self._client_type in (THeaderClientType.UNFRAMED_BINARY, THeaderClientType.UNFRAMED_COMPACT):
buffer.write(payload)
else:
raise TTransportException(
TTransportException.INVALID_CLIENT_TYPE,
"Unknown client type.",
)
# the frame length field doesn't count towards the frame payload size
frame_bytes = buffer.getvalue()
frame_payload_size = len(frame_bytes) - 4
if frame_payload_size > self._max_frame_size:
raise TTransportException(
TTransportException.SIZE_LIMIT,
"Attempting to send frame that is too large.",
)
self._transport.write(frame_bytes)
self._transport.flush()
@property
def cstringio_buf(self):
return self._read_buffer
def cstringio_refill(self, partialread, reqlen):
result = bytearray(partialread)
while len(result) < reqlen:
result += self.read(reqlen - len(result))
self._read_buffer = BufferIO(result)
return self._read_buffer
```
#### File: py/test/thrift_transport.py
```python
import unittest
import os
import _import_local_thrift # noqa
from thrift.transport import TTransport
class TestTFileObjectTransport(unittest.TestCase):
def test_TFileObjectTransport(self):
test_dir = os.path.dirname(os.path.abspath(__file__))
datatxt_path = os.path.join(test_dir, 'data.txt')
buffer = '{"soft":"thrift","version":0.13,"1":true}'
with open(datatxt_path, "w+") as f:
buf = TTransport.TFileObjectTransport(f)
buf.write(buffer)
buf.flush()
buf.close()
with open(datatxt_path, "rb") as f:
buf = TTransport.TFileObjectTransport(f)
value = buf.read(len(buffer)).decode('utf-8')
self.assertEqual(buffer, value)
buf.close()
os.remove(datatxt_path)
class TestMemoryBuffer(unittest.TestCase):
def test_memorybuffer_write(self):
data = '{"1":[1,"hello"],"a":{"A":"abc"},"bool":true,"num":12345}'
buffer_w = TTransport.TMemoryBuffer()
buffer_w.write(data.encode('utf-8'))
value = buffer_w.getvalue()
self.assertEqual(value.decode('utf-8'), data)
buffer_w.close()
def test_memorybuffer_read(self):
data = '{"1":[1, "hello"],"a":{"A":"abc"},"bool":true,"num":12345}'
buffer_r = TTransport.TMemoryBuffer(data.encode('utf-8'))
value_r = buffer_r.read(len(data))
value = buffer_r.getvalue()
self.assertEqual(value.decode('utf-8'), data)
self.assertEqual(value_r.decode('utf-8'), data)
buffer_r.close()
if __name__ == '__main__':
unittest.main()
```
#### File: py/test/thrift_TZlibTransport.py
```python
import unittest
import random
import string
import _import_local_thrift # noqa
from thrift.transport import TTransport
from thrift.transport import TZlibTransport
def generate_random_buff():
data = []
buf_len = 1024 * 32
index = 0
while index < buf_len:
run_len = random.randint(1, 64)
if index + run_len > buf_len:
run_len = buf_len - index
for i in range(run_len):
data.extend(random.sample(string.printable, 1))
index += 1
new_data = ''.join(data)
return new_data
class TestTZlibTransport(unittest.TestCase):
def test_write_then_read(self):
buff = TTransport.TMemoryBuffer()
trans = TTransport.TBufferedTransportFactory().getTransport(buff)
zlib_trans = TZlibTransport.TZlibTransport(trans)
data_w = generate_random_buff()
zlib_trans.write(data_w.encode('utf-8'))
zlib_trans.flush()
value = buff.getvalue()
zlib_trans.close()
buff = TTransport.TMemoryBuffer(value)
trans = TTransport.TBufferedTransportFactory().getTransport(buff)
zlib_trans = TZlibTransport.TZlibTransport(trans)
data_r = zlib_trans.read(len(data_w))
zlib_trans.close()
try:
self.assertEqual(data_w, data_r.decode('utf-8'))
self.assertEqual(len(data_w), len(data_r.decode('utf-8')))
except AssertionError:
raise
def test_after_flushd_write_then_read(self):
buff = TTransport.TMemoryBuffer()
trans = TTransport.TBufferedTransportFactory().getTransport(buff)
zlib_trans = TZlibTransport.TZlibTransport(trans)
data_w_1 = "hello thrift !@#" * 50
zlib_trans.write(data_w_1.encode('utf-8'))
zlib_trans.flush()
data_w_2 = "{'name': 'thrift', 1: ['abcd' , 233, ('a','c')]}" * 20
zlib_trans.write(data_w_2.encode('utf-8'))
zlib_trans.flush()
value = buff.getvalue()
zlib_trans.close()
buff = TTransport.TMemoryBuffer(value)
trans = TTransport.TBufferedTransportFactory().getTransport(buff)
zlib_trans = TZlibTransport.TZlibTransport(trans)
data_r = zlib_trans.read(len(data_w_1) + len(data_w_2))
zlib_trans.close()
try:
self.assertEqual(data_w_1 + data_w_2, data_r.decode('utf-8'))
self.assertEqual(len(data_w_1) + len(data_w_2), len(data_r.decode('utf-8')))
except AssertionError:
raise
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jimfenton/datatracker",
"score": 3
} |
#### File: ietf/doc/tests_js.py
```python
import debug # pyflakes:ignore
from ietf.doc.factories import WgDraftFactory, DocumentAuthorFactory
from ietf.person.factories import PersonFactory
from ietf.person.models import Person
from ietf.utils.jstest import ( IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled,
presence_of_element_child_by_css_selector )
if selenium_enabled():
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
@ifSeleniumEnabled
class EditAuthorsTests(IetfSeleniumTestCase):
def setUp(self):
super(EditAuthorsTests, self).setUp()
self.wait = WebDriverWait(self.driver, 2)
def test_add_author_forms(self):
def _fill_in_author_form(form_elt, name, email, affiliation, country):
"""Fill in an author form on the edit authors page
The form_elt input should be an element containing all the relevant inputs.
"""
# To enter the person, type their name in the select2 search box, wait for the
# search to offer the result, then press 'enter' to accept the result and close
# the search input.
person_span = form_elt.find_element(By.CLASS_NAME, 'select2-selection')
self.scroll_to_element(person_span)
person_span.click()
input = self.driver.find_element(By.CSS_SELECTOR, '.select2-search__field[aria-controls*=author]')
input.send_keys(name)
result_selector = 'ul.select2-results__options[id*=author] > li.select2-results__option--selectable'
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, result_selector),
name
))
input.send_keys('\n') # select the object
# After the author is selected, the email select options will be populated.
# Wait for that, then click on the option corresponding to the requested email.
# This will only work if the email matches an address for the selected person.
email_select = form_elt.find_element(By.CSS_SELECTOR, 'select[name$="email"]')
email_option = self.wait.until(
presence_of_element_child_by_css_selector(email_select, 'option[value="{}"]'.format(email))
)
email_option.click() # select the email
# Fill in the affiliation and country. Finally, simple text inputs!
affil_input = form_elt.find_element(By.CSS_SELECTOR, 'input[name$="affiliation"]')
affil_input.send_keys(affiliation)
country_input = form_elt.find_element(By.CSS_SELECTOR, 'input[name$="country"]')
country_input.send_keys(country)
def _read_author_form(form_elt):
"""Read values from an author form
Note: returns the Person instance named in the person field, not just their name.
"""
hidden_person_input = form_elt.find_element(By.CSS_SELECTOR, 'select[name$="person"]')
email_select = form_elt.find_element(By.CSS_SELECTOR, 'select[name$="email"]')
affil_input = form_elt.find_element(By.CSS_SELECTOR, 'input[name$="affiliation"]')
country_input = form_elt.find_element(By.CSS_SELECTOR, 'input[name$="country"]')
return (
Person.objects.get(pk=hidden_person_input.get_attribute('value')),
email_select.get_attribute('value'),
affil_input.get_attribute('value'),
country_input.get_attribute('value'),
)
# Create testing resources
draft = WgDraftFactory()
DocumentAuthorFactory(document=draft)
authors = PersonFactory.create_batch(2) # authors we will add
orgs = ['some org', 'some other org'] # affiliations for the authors
countries = ['France', 'Uganda'] # countries for the authors
url = self.absreverse('ietf.doc.views_doc.edit_authors', kwargs=dict(name=draft.name))
# Star the test by logging in with appropriate permissions and retrieving the edit page
self.login('secretary')
self.driver.get(url)
# The draft has one author to start with. Find the list and check the count.
authors_list = self.driver.find_element(By.ID, 'authors-list')
author_forms = authors_list.find_elements(By.CLASS_NAME, 'author-panel')
self.assertEqual(len(author_forms), 1)
# get the "add author" button so we can add blank author forms
add_author_button = self.driver.find_element(By.ID, 'add-author-button')
for index, auth in enumerate(authors):
self.driver.execute_script("arguments[0].scrollIntoView();", add_author_button) # FIXME: no idea why this fails:
# self.scroll_to_element(add_author_button) # Can only click if it's in view!
self.driver.execute_script("arguments[0].click();", add_author_button) # FIXME: no idea why this fails:
# add_author_button.click() # Create a new form. Automatically scrolls to it.
author_forms = authors_list.find_elements(By.CLASS_NAME, 'author-panel')
authors_added = index + 1
self.assertEqual(len(author_forms), authors_added + 1) # Started with 1 author, hence +1
_fill_in_author_form(author_forms[index + 1], auth.name, str(auth.email()), orgs[index], countries[index])
# Check that the author forms have correct (and distinct) values
first_auth = draft.documentauthor_set.first()
self.assertEqual(
_read_author_form(author_forms[0]),
(first_auth.person, str(first_auth.email), first_auth.affiliation, first_auth.country),
)
for index, auth in enumerate(authors):
self.assertEqual(
_read_author_form(author_forms[index + 1]),
(auth, str(auth.email()), orgs[index], countries[index]),
)
# Must provide a "basis" (change reason)
self.driver.find_element(By.ID, 'id_basis').send_keys('change <PASSWORD>')
# Now click the 'submit' button and check that the update was accepted.
submit_button = self.driver.find_element(By.CSS_SELECTOR, 'button[type="submit"]')
self.driver.execute_script("arguments[0].click();", submit_button) # FIXME: no idea why this fails:
# self.scroll_to_element(submit_button)
# submit_button.click()
# Wait for redirect to the document_main view
self.wait.until(
expected_conditions.url_to_be(
self.absreverse('ietf.doc.views_doc.document_main', kwargs=dict(name=draft.name))
))
# Just a basic check that the expected authors show up. Details of the updates
# are tested separately.
self.assertEqual(
list(draft.documentauthor_set.values_list('person', flat=True)),
[first_auth.person.pk] + [auth.pk for auth in authors]
)
```
#### File: ietf/doc/views_bofreq.py
```python
import debug # pyflakes:ignore
import io
from django import forms
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse as urlreverse
from django.utils.html import escape
from ietf.doc.mails import (email_bofreq_title_changed, email_bofreq_editors_changed,
email_bofreq_new_revision, email_bofreq_responsible_changed)
from ietf.doc.models import (Document, DocAlias, DocEvent, NewRevisionDocEvent,
BofreqEditorDocEvent, BofreqResponsibleDocEvent, State)
from ietf.doc.utils import add_state_change_event
from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible
from ietf.ietfauth.utils import has_role, role_required
from ietf.person.fields import SearchablePersonsField
from ietf.utils import markdown
from ietf.utils.response import permission_denied
from ietf.utils.text import xslugify
from ietf.utils.textupload import get_cleaned_text_file_content
def bof_requests(request):
reqs = Document.objects.filter(type_id='bofreq')
for req in reqs:
req.latest_revision_event = req.latest_event(NewRevisionDocEvent)
req.responsible = bofreq_responsible(req)
req.editors = bofreq_editors(req)
sorted_reqs = sorted(sorted(reqs, key=lambda doc: doc.latest_revision_event.time, reverse=True), key=lambda doc: doc.get_state().order)
return render(request, 'doc/bofreq/bof_requests.html',dict(reqs=sorted_reqs))
class BofreqUploadForm(forms.Form):
ACTIONS = [
("enter", "Enter content directly"),
("upload", "Upload content from file"),
]
bofreq_submission = forms.ChoiceField(choices=ACTIONS, widget=forms.RadioSelect)
bofreq_file = forms.FileField(label="Markdown source file to upload", required=False)
bofreq_content = forms.CharField(widget=forms.Textarea(attrs={'rows':30}), required=False, strip=False)
def clean(self):
def require_field(f):
if not self.cleaned_data.get(f):
self.add_error(f, forms.ValidationError("You must fill in this field."))
return False
else:
return True
submission_method = self.cleaned_data.get("bofreq_submission")
content = ''
if submission_method == "enter":
if require_field("bofreq_content"):
content = self.cleaned_data["bofreq_content"].replace("\r", "")
default_content = render_to_string('doc/bofreq/bofreq_template.md',{})
if content==default_content:
raise forms.ValidationError('The example content may not be saved. Edit it as instructed to document this BOF request.')
elif submission_method == "upload":
if require_field("bofreq_file"):
content = get_cleaned_text_file_content(self.cleaned_data["bofreq_file"])
try:
_ = markdown.markdown(content)
except Exception as e:
raise forms.ValidationError(f'Markdown processing failed: {e}')
@login_required
def submit(request, name):
bofreq = get_object_or_404(Document, type="bofreq", name=name)
previous_editors = bofreq_editors(bofreq)
state_id = bofreq.get_state_slug('bofreq')
if not (has_role(request.user,('Secretariat', 'Area Director', 'IAB')) or (state_id=='proposed' and request.user.person in previous_editors)):
permission_denied(request,"You do not have permission to upload a new revision of this BOF Request")
if request.method == 'POST':
form = BofreqUploadForm(request.POST, request.FILES)
if form.is_valid():
bofreq.rev = "%02d" % (int(bofreq.rev)+1)
e = NewRevisionDocEvent.objects.create(
type="new_revision",
doc=bofreq,
by=request.user.person,
rev=bofreq.rev,
desc='New revision available',
time=bofreq.time,
)
bofreq.save_with_history([e])
bofreq_submission = form.cleaned_data['bofreq_submission']
if bofreq_submission == "upload":
content = get_cleaned_text_file_content(form.cleaned_data["bofreq_file"])
else:
content = form.cleaned_data['bofreq_content']
with io.open(bofreq.get_file_name(), 'w', encoding='utf-8') as destination:
destination.write(content)
email_bofreq_new_revision(request, bofreq)
return redirect('ietf.doc.views_doc.document_main', name=bofreq.name)
else:
init = {'bofreq_content':bofreq.text_or_error(),
'bofreq_submission':'enter',
}
form = BofreqUploadForm(initial=init)
return render(request, 'doc/bofreq/upload_content.html',
{'form':form,'doc':bofreq})
class NewBofreqForm(BofreqUploadForm):
title = forms.CharField(max_length=255)
field_order = ['title','bofreq_submission','bofreq_file','bofreq_content']
def __init__(self, requester, *args, **kwargs):
self._requester = requester
super().__init__(*args, **kwargs)
def name_from_title(self, title):
requester_slug = xslugify(self._requester.last_name())
title_slug = xslugify(title)
name = f'bofreq-{requester_slug[:64]}-{title_slug[:128]}'
return name.replace('_', '-')
def clean_title(self):
title = self.cleaned_data['title']
name = self.name_from_title(title)
if name == self.name_from_title(''):
raise forms.ValidationError('The filename derived from this title is empty. Please include a few descriptive words using ascii or numeric characters')
if Document.objects.filter(name=name).exists():
raise forms.ValidationError('This title produces a filename already used by an existing BOF request')
return title
@login_required
def new_bof_request(request):
if request.method == 'POST':
form = NewBofreqForm(request.user.person, request.POST, request.FILES)
if form.is_valid():
title = form.cleaned_data['title']
name = form.name_from_title(title)
bofreq = Document.objects.create(
type_id='bofreq',
name = name,
title = title,
abstract = '',
rev = '00',
)
bofreq.set_state(State.objects.get(type_id='bofreq',slug='proposed'))
e1 = NewRevisionDocEvent.objects.create(
type="new_revision",
doc=bofreq,
by=request.user.person,
rev=bofreq.rev,
desc='New revision available',
time=bofreq.time,
)
e2 = BofreqEditorDocEvent.objects.create(
type="changed_editors",
doc=bofreq,
rev=bofreq.rev,
by=request.user.person,
desc= f'Editors changed to {request.user.person.name}',
)
e2.editors.set([request.user.person])
bofreq.save_with_history([e1,e2])
alias = DocAlias.objects.create(name=name)
alias.docs.set([bofreq])
bofreq_submission = form.cleaned_data['bofreq_submission']
if bofreq_submission == "upload":
content = get_cleaned_text_file_content(form.cleaned_data["bofreq_file"])
else:
content = form.cleaned_data['bofreq_content']
with io.open(bofreq.get_file_name(), 'w', encoding='utf-8') as destination:
destination.write(content)
email_bofreq_new_revision(request, bofreq)
return redirect('ietf.doc.views_doc.document_main', name=bofreq.name)
else:
init = {'bofreq_content':escape(render_to_string('doc/bofreq/bofreq_template.md',{})),
'bofreq_submission':'enter',
}
form = NewBofreqForm(request.user.person, initial=init)
return render(request, 'doc/bofreq/new_bofreq.html',
{'form':form})
class ChangeEditorsForm(forms.Form):
editors = SearchablePersonsField(required=False)
@login_required
def change_editors(request, name):
bofreq = get_object_or_404(Document, type="bofreq", name=name)
previous_editors = bofreq_editors(bofreq)
state_id = bofreq.get_state_slug('bofreq')
if not (has_role(request.user,('Secretariat', 'Area Director', 'IAB')) or (state_id=='proposed' and request.user.person in previous_editors)):
permission_denied(request,"You do not have permission to change this document's editors")
if request.method == 'POST':
form = ChangeEditorsForm(request.POST)
if form.is_valid():
new_editors = form.cleaned_data['editors']
if set(new_editors) != set(previous_editors):
e = BofreqEditorDocEvent(type="changed_editors", doc=bofreq, rev=bofreq.rev, by=request.user.person)
e.desc = f'Editors changed to {", ".join([p.name for p in new_editors])}'
e.save()
e.editors.set(new_editors)
bofreq.save_with_history([e])
email_bofreq_editors_changed(request, bofreq, previous_editors)
return redirect("ietf.doc.views_doc.document_main", name=bofreq.name)
else:
init = { "editors" : previous_editors }
form = ChangeEditorsForm(initial=init)
titletext = bofreq.get_base_name()
return render(request, 'doc/bofreq/change_editors.html',
{'form': form,
'doc': bofreq,
'titletext' : titletext,
},
)
class ChangeResponsibleForm(forms.Form):
responsible = SearchablePersonsField(required=False)
def clean_responsible(self):
responsible = self.cleaned_data['responsible']
not_leadership = list()
for person in responsible:
if not has_role(person.user, ('Area Director', 'IAB')):
not_leadership.append(person)
if not_leadership:
raise forms.ValidationError('Only current IAB and IESG members are allowed. Please remove: '+', '.join([person.plain_name() for person in not_leadership]))
return responsible
@login_required
def change_responsible(request,name):
if not has_role(request.user,('Secretariat', 'Area Director', 'IAB')):
permission_denied(request,"You do not have permission to change this document's responsible leadership")
bofreq = get_object_or_404(Document, type="bofreq", name=name)
previous_responsible = bofreq_responsible(bofreq)
if request.method == 'POST':
form = ChangeResponsibleForm(request.POST)
if form.is_valid():
new_responsible = form.cleaned_data['responsible']
if set(new_responsible) != set(previous_responsible):
e = BofreqResponsibleDocEvent(type="changed_responsible", doc=bofreq, rev=bofreq.rev, by=request.user.person)
e.desc = f'Responsible leadership changed to {", ".join([p.name for p in new_responsible])}'
e.save()
e.responsible.set(new_responsible)
bofreq.save_with_history([e])
email_bofreq_responsible_changed(request, bofreq, previous_responsible)
return redirect("ietf.doc.views_doc.document_main", name=bofreq.name)
else:
init = { "responsible" : previous_responsible }
form = ChangeResponsibleForm(initial=init)
titletext = bofreq.get_base_name()
return render(request, 'doc/bofreq/change_responsible.html',
{'form': form,
'doc': bofreq,
'titletext' : titletext,
},
)
class ChangeTitleForm(forms.Form):
title = forms.CharField(max_length=255, label="Title", required=True)
@login_required
def edit_title(request, name):
bofreq = get_object_or_404(Document, type="bofreq", name=name)
editors = bofreq_editors(bofreq)
state_id = bofreq.get_state_slug('bofreq')
if not (has_role(request.user,('Secretariat', 'Area Director', 'IAB')) or (state_id=='proposed' and request.user.person in editors)):
permission_denied(request, "You do not have permission to edit this document's title")
if request.method == 'POST':
form = ChangeTitleForm(request.POST)
if form.is_valid():
bofreq.title = form.cleaned_data['title']
c = DocEvent(type="added_comment", doc=bofreq, rev=bofreq.rev, by=request.user.person)
c.desc = "Title changed to '%s'"%bofreq.title
c.save()
bofreq.save_with_history([c])
email_bofreq_title_changed(request, bofreq)
return redirect("ietf.doc.views_doc.document_main", name=bofreq.name)
else:
init = { "title" : bofreq.title }
form = ChangeTitleForm(initial=init)
titletext = bofreq.get_base_name()
return render(request, 'doc/change_title.html',
{'form': form,
'doc': bofreq,
'titletext' : titletext,
},
)
class ChangeStateForm(forms.Form):
new_state = forms.ModelChoiceField(State.objects.filter(type="bofreq", used=True), label="BOF Request State", empty_label=None, required=True)
comment = forms.CharField(widget=forms.Textarea, help_text="Optional comment for the state change history entry.", required=False, strip=False)
@role_required('Area Director', 'Secretariat', 'IAB')
def change_state(request, name, option=None):
bofreq = get_object_or_404(Document, type="bofreq", name=name)
login = request.user.person
if request.method == 'POST':
form = ChangeStateForm(request.POST)
if form.is_valid():
clean = form.cleaned_data
new_state = clean['new_state']
comment = clean['comment'].rstrip()
if comment:
c = DocEvent(type="added_comment", doc=bofreq, rev=bofreq.rev, by=login)
c.desc = comment
c.save()
prev_state = bofreq.get_state()
if new_state != prev_state:
bofreq.set_state(new_state)
events = []
events.append(add_state_change_event(bofreq, login, prev_state, new_state))
bofreq.save_with_history(events)
return redirect('ietf.doc.views_doc.document_main', name=bofreq.name)
else:
s = bofreq.get_state()
init = dict(new_state=s.pk if s else None)
form = ChangeStateForm(initial=init)
return render(request, 'doc/change_state.html',
dict(form=form,
doc=bofreq,
login=login,
help_url=urlreverse('ietf.doc.views_help.state_help', kwargs=dict(type="bofreq")),
))
```
#### File: group/migrations/0054_enable_delegation.py
```python
from django.db import migrations
def forward(apps, schema_editor):
GroupFeatures = apps.get_model('group','GroupFeatures')
for type_id in ('dir', 'iabasg', 'program', 'review', 'team'):
f = GroupFeatures.objects.get(type_id=type_id)
if 'delegate' not in f.groupman_roles:
f.groupman_roles.append('delegate')
f.save()
for type_id in ('adhoc', 'ag', 'iesg', 'irtf', 'ise', 'rag', 'dir', 'iabasg', 'program', 'review'):
f = GroupFeatures.objects.get(type_id=type_id)
if 'delegate' not in f.default_used_roles:
f.default_used_roles.append('delegate')
f.save()
def reverse (apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('group', '0053_populate_groupfeatures_session_purposes'),
]
operations = [
migrations.RunPython(forward,reverse),
]
```
#### File: group/migrations/0055_editorial_stream.py
```python
from django.db import migrations
def forward(apps, schema_editor):
Group = apps.get_model('group', 'Group')
GroupFeatures = apps.get_model('group', 'GroupFeatures')
Group.objects.create(
acronym='editorial',
name='Editorial Stream',
state_id='active',
type_id='editorial',
parent=None,
)
templ = GroupFeatures.objects.get(type='rfcedtyp')
templ.pk = None
templ.type_id='editorial'
templ.save()
def reverse(apps, schema_editor):
Group = apps.get_model('group', 'Group')
GroupFeatures = apps.get_model('group', 'GroupFeatures')
GroupFeatures.objects.filter(type='editorial').delete()
Group.objects.filter(acronym='editorial').delete()
class Migration(migrations.Migration):
dependencies = [
('group', '0054_enable_delegation'),
('name', '0043_editorial_stream_grouptype'),
]
operations = [
migrations.RunPython(forward, reverse),
]
```
#### File: meeting/templatetags/editor_tags.py
```python
import debug # pyflakes: ignore
from django import template
from django.utils.html import format_html
register = template.Library()
@register.simple_tag
def constraint_icon_for(constraint_name, count=None):
# icons must be valid HTML and kept up to date with tests.EditorTagTests.test_constraint_icon_for()
icons = {
'conflict': '<span class="encircled">{reversed}1</span>',
'conflic2': '<span class="encircled">{reversed}2</span>',
'conflic3': '<span class="encircled">{reversed}3</span>',
'bethere': '<i class="bi bi-person"></i>{count}',
'timerange': '<i class="bi bi-calendar"></i>',
'time_relation': 'Δ',
'wg_adjacent': '{reversed}<i class="bi bi-skip-end"></i>',
'chair_conflict': '{reversed}<i class="bi bi-person-circle"></i>',
'tech_overlap': '{reversed}<i class="bi bi-link"></i>',
'key_participant': '{reversed}<i class="bi bi-key"></i>',
'joint_with_groups': '<i class="bi bi-merge"></i>',
'responsible_ad': '<span class="encircled">AD</span>',
}
reversed_suffix = '-reversed'
if constraint_name.slug.endswith(reversed_suffix):
reversed = True
cn = constraint_name.slug[: -len(reversed_suffix)]
else:
reversed = False
cn = constraint_name.slug
return format_html(
icons[cn],
count=count or '',
reversed='-' if reversed else '',
)
```
#### File: name/migrations/0043_editorial_stream_grouptype.py
```python
from django.db import migrations
def forward(apps, schema_editor):
GroupTypeName = apps.get_model('name', 'GroupTypeName')
GroupTypeName.objects.create(
slug = 'editorial',
name = 'Editorial',
desc = 'Editorial Stream Group',
used = True,
)
def reverse(apps, schema_editor):
GroupTypeName = apps.get_model('name', 'GroupTypeName')
GroupTypeName.objects.filter(slug='editorial').delete()
class Migration(migrations.Migration):
dependencies = [
('name', '0042_editorial_stream'),
]
operations = [
migrations.RunPython(forward, reverse),
]
```
#### File: ietf/release/tests.py
```python
from pyquery import PyQuery
from django.urls import reverse
import debug # pyflakes:ignore
from ietf.utils.test_utils import TestCase
class ReleasePagesTest(TestCase):
def test_about(self):
url = reverse('ietf.release.views.release')+"about"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
text = q('#content').text()
for word in ["About", "2.00", "3.00", "4.00", "5.0.0", "6.0.0", "7.0.0", "8.0.0"]:
self.assertIn(word, text)
```
#### File: secr/sreq/tests.py
```python
import datetime
from django.urls import reverse
import debug # pyflakes:ignore
from ietf.utils.test_utils import TestCase
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.meeting.models import Session, ResourceAssociation, SchedulingEvent, Constraint
from ietf.meeting.factories import MeetingFactory, SessionFactory
from ietf.name.models import ConstraintName, TimerangeName
from ietf.person.models import Person
from ietf.secr.sreq.forms import SessionForm
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
from pyquery import PyQuery
SECR_USER='secretary'
class SreqUrlTests(TestCase):
def test_urls(self):
MeetingFactory(type_id='ietf',date=datetime.date.today())
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get("/secr/")
self.assertEqual(r.status_code, 200)
r = self.client.get("/secr/sreq/")
self.assertEqual(r.status_code, 200)
testgroup=GroupFactory()
r = self.client.get("/secr/sreq/%s/new/" % testgroup.acronym)
self.assertEqual(r.status_code, 200)
class SessionRequestTestCase(TestCase):
def test_main(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
SessionFactory.create_batch(2, meeting=meeting, status_id='sched')
SessionFactory.create_batch(2, meeting=meeting, status_id='disappr')
# An additional unscheduled group comes from make_immutable_base_data
url = reverse('ietf.secr.sreq.views.main')
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
sched = r.context['scheduled_groups']
self.assertEqual(len(sched), 2)
unsched = r.context['unscheduled_groups']
self.assertEqual(len(unsched), 8)
def test_approve(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
mars = GroupFactory(parent=area, acronym='mars')
# create session waiting for approval
session = SessionFactory(meeting=meeting, group=mars, status_id='apprw')
url = reverse('ietf.secr.sreq.views.approve', kwargs={'acronym':'mars'})
self.client.login(username="ad", password="<PASSWORD>")
r = self.client.get(url)
self.assertRedirects(r,reverse('ietf.secr.sreq.views.view', kwargs={'acronym':'mars'}))
self.assertEqual(SchedulingEvent.objects.filter(session=session).order_by('-id')[0].status_id, 'appr')
def test_cancel(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
session = SessionFactory(meeting=meeting, group__parent=area, group__acronym='mars', status_id='sched')
url = reverse('ietf.secr.sreq.views.cancel', kwargs={'acronym':'mars'})
self.client.login(username="ad", password="<PASSWORD>")
r = self.client.get(url)
self.assertRedirects(r,reverse('ietf.secr.sreq.views.main'))
self.assertEqual(SchedulingEvent.objects.filter(session=session).order_by('-id')[0].status_id, 'deleted')
def test_edit(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
mars = RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars').group
group2 = GroupFactory()
group3 = GroupFactory()
group4 = GroupFactory()
iabprog = GroupFactory(type_id='program')
SessionFactory(meeting=meeting,group=mars,status_id='sched')
url = reverse('ietf.secr.sreq.views.edit', kwargs={'acronym':'mars'})
self.client.login(username="marschairman", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
attendees = 10
comments = 'need lights'
mars_sessions = meeting.session_set.filter(group__acronym='mars')
empty_outbox()
post_data = {'num_session':'2',
'attendees': attendees,
'constraint_chair_conflict':iabprog.acronym,
'session_time_relation': 'subsequent-days',
'adjacent_with_wg': group2.acronym,
'joint_with_groups': group3.acronym + ' ' + group4.acronym,
'joint_for_session': '2',
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'session_set-TOTAL_FORMS': '3', # matches what view actually sends, even with only 2 filled in
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':mars_sessions[0].pk,
'session_set-0-name': mars_sessions[0].name,
'session_set-0-short': mars_sessions[0].short,
'session_set-0-purpose': mars_sessions[0].purpose_id,
'session_set-0-type': mars_sessions[0].type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': mars_sessions[0].on_agenda,
'session_set-0-remote_instructions': mars_sessions[0].remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': comments,
'session_set-0-DELETE': '',
# no session_set-1-id because it's a new request
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose': 'regular',
'session_set-1-type': 'regular',
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': True,
'session_set-1-remote_instructions': mars_sessions[0].remote_instructions,
'session_set-1-attendees': attendees,
'session_set-1-comments': comments,
'session_set-1-DELETE': '',
'session_set-2-id': '',
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '',
'session_set-2-on_agenda': 'True',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': 'on',
'submit': 'Continue'}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
redirect_url = reverse('ietf.secr.sreq.views.view', kwargs={'acronym': 'mars'})
self.assertRedirects(r, redirect_url)
# Check whether updates were stored in the database
sessions = Session.objects.filter(meeting=meeting, group=mars)
self.assertEqual(len(sessions), 2)
session = sessions[0]
self.assertEqual(session.constraints().get(name='chair_conflict').target.acronym, iabprog.acronym)
self.assertEqual(session.constraints().get(name='time_relation').time_relation, 'subsequent-days')
self.assertEqual(session.constraints().get(name='wg_adjacent').target.acronym, group2.acronym)
self.assertEqual(
list(session.constraints().get(name='timerange').timeranges.all().values('name')),
list(TimerangeName.objects.filter(name__in=['thursday-afternoon-early', 'thursday-afternoon-late']).values('name'))
)
self.assertFalse(sessions[0].joint_with_groups.count())
self.assertEqual(list(sessions[1].joint_with_groups.all()), [group3, group4])
# Check whether the updated data is visible on the view page
r = self.client.get(redirect_url)
self.assertContains(r, 'Schedule the sessions on subsequent days')
self.assertContains(r, 'Thursday early afternoon, Thursday late afternoon')
self.assertContains(r, group2.acronym)
self.assertContains(r, 'Second session with: {} {}'.format(group3.acronym, group4.acronym))
# check that a notification was sent
self.assertEqual(len(outbox), 1)
notification_payload = get_payload_text(outbox[0])
self.assertIn('1 Hour, 1 Hour', notification_payload)
self.assertNotIn('1 Hour, 1 Hour, 1 Hour', notification_payload)
# Edit again, changing the joint sessions and clearing some fields. The behaviour of
# edit is different depending on whether previous joint sessions were recorded.
empty_outbox()
post_data = {'num_session':'2',
'attendees':attendees,
'constraint_chair_conflict':'',
'comments':'need lights',
'joint_with_groups': group2.acronym,
'joint_for_session': '1',
'session_set-TOTAL_FORMS': '3', # matches what view actually sends, even with only 2 filled in
'session_set-INITIAL_FORMS': '2',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':sessions[0].pk,
'session_set-0-name': sessions[0].name,
'session_set-0-short': sessions[0].short,
'session_set-0-purpose': sessions[0].purpose_id,
'session_set-0-type': sessions[0].type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': sessions[0].on_agenda,
'session_set-0-remote_instructions': sessions[0].remote_instructions,
'session_set-0-attendees': sessions[0].attendees,
'session_set-0-comments': sessions[1].comments,
'session_set-0-DELETE': '',
'session_set-1-id': sessions[1].pk,
'session_set-1-name': sessions[1].name,
'session_set-1-short': sessions[1].short,
'session_set-1-purpose': sessions[1].purpose_id,
'session_set-1-type': sessions[1].type_id,
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': sessions[1].on_agenda,
'session_set-1-remote_instructions': sessions[1].remote_instructions,
'session_set-1-attendees': sessions[1].attendees,
'session_set-1-comments': sessions[1].comments,
'session_set-1-DELETE': '',
'session_set-2-id': '',
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '',
'session_set-2-on_agenda': 'True',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': 'on',
'submit': 'Continue'}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
self.assertRedirects(r, redirect_url)
# Check whether updates were stored in the database
sessions = Session.objects.filter(meeting=meeting, group=mars)
self.assertEqual(len(sessions), 2)
session = sessions[0]
self.assertFalse(session.constraints().filter(name='time_relation'))
self.assertFalse(session.constraints().filter(name='wg_adjacent'))
self.assertFalse(session.constraints().filter(name='timerange'))
self.assertEqual(list(sessions[0].joint_with_groups.all()), [group2])
self.assertFalse(sessions[1].joint_with_groups.count())
# check that a notification was sent
self.assertEqual(len(outbox), 1)
notification_payload = get_payload_text(outbox[0])
self.assertIn('1 Hour, 1 Hour', notification_payload)
self.assertNotIn('1 Hour, 1 Hour, 1 Hour', notification_payload)
# Check whether the updated data is visible on the view page
r = self.client.get(redirect_url)
self.assertContains(r, 'First session with: {}'.format(group2.acronym))
def test_edit_constraint_bethere(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
mars = RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars').group
session = SessionFactory(meeting=meeting, group=mars, status_id='sched')
Constraint.objects.create(
meeting=meeting,
source=mars,
person=Person.objects.get(user__username='marschairman'),
name_id='bethere',
)
self.assertEqual(session.people_constraints.count(), 1)
url = reverse('ietf.secr.sreq.views.edit', kwargs=dict(acronym='mars'))
self.client.login(username='marschairman', password='<PASSWORD>')
attendees = '10'
ad = Person.objects.get(user__username='ad')
post_data = {
'num_session': '1',
'attendees': attendees,
'bethere': str(ad.pk),
'constraint_chair_conflict':'',
'comments':'',
'joint_with_groups': '',
'joint_for_session': '',
'delete_conflict': 'on',
'session_set-TOTAL_FORMS': '3', # matches what view actually sends, even with only 2 filled in
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':session.pk,
'session_set-0-name': session.name,
'session_set-0-short': session.short,
'session_set-0-purpose': session.purpose_id,
'session_set-0-type': session.type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': session.on_agenda,
'session_set-0-remote_instructions': session.remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
'session_set-1-id': '',
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose':'regular',
'session_set-1-type':'regular',
'session_set-1-requested_duration': '',
'session_set-1-on_agenda': 'True',
'session_set-1-attendees': attendees,
'session_set-1-comments': '',
'session_set-1-DELETE': 'on',
'session_set-2-id': '',
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '',
'session_set-2-on_agenda': 'True',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': 'on',
'submit': 'Save',
}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
redirect_url = reverse('ietf.secr.sreq.views.view', kwargs={'acronym': 'mars'})
self.assertRedirects(r, redirect_url)
self.assertEqual([pc.person for pc in session.people_constraints.all()], [ad])
def test_edit_inactive_conflicts(self):
"""Inactive conflicts should be displayed and removable"""
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today(), group_conflicts=['chair_conflict'])
mars = RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars').group
session = SessionFactory(meeting=meeting, group=mars, status_id='sched')
other_group = GroupFactory()
Constraint.objects.create(
meeting=meeting,
name_id='conflict', # not in group_conflicts for the meeting
source=mars,
target=other_group,
)
url = reverse('ietf.secr.sreq.views.edit', kwargs=dict(acronym='mars'))
self.client.login(username='marschairman', password='<PASSWORD>')
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
# check that the inactive session is displayed
found = q('input#id_delete_conflict[type="checkbox"]')
self.assertEqual(len(found), 1)
delete_checkbox = found[0]
# check that the label on the checkbox is correct
self.assertIn('Delete this conflict', delete_checkbox.tail)
# check that the target is displayed correctly in the UI
self.assertIn(other_group.acronym, delete_checkbox.find('../input[@type="text"]').value)
attendees = '10'
post_data = {
'num_session': '1',
'attendees': attendees,
'constraint_chair_conflict':'',
'comments':'',
'joint_with_groups': '',
'joint_for_session': '',
'delete_conflict': 'on',
'session_set-TOTAL_FORMS': '1',
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
'session_set-0-id':session.pk,
'session_set-0-name': session.name,
'session_set-0-short': session.short,
'session_set-0-purpose': session.purpose_id,
'session_set-0-type': session.type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': session.on_agenda,
'session_set-0-remote_instructions': session.remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
'submit': 'Save',
}
r = self.client.post(url, post_data, HTTP_HOST='example.com')
redirect_url = reverse('ietf.secr.sreq.views.view', kwargs={'acronym': 'mars'})
self.assertRedirects(r, redirect_url)
self.assertEqual(len(mars.constraint_source_set.filter(name_id='conflict')), 0)
def test_tool_status(self):
MeetingFactory(type_id='ietf', date=datetime.date.today())
url = reverse('ietf.secr.sreq.views.tool_status')
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
r = self.client.post(url, {'message':'locked', 'submit':'Lock'})
self.assertRedirects(r,reverse('ietf.secr.sreq.views.main'))
def test_new_req_constraint_types(self):
"""Configurable constraint types should be handled correctly in a new request
Relies on SessionForm representing constraint values with element IDs
like id_constraint_<ConstraintName slug>
"""
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars')
url = reverse('ietf.secr.sreq.views.new', kwargs=dict(acronym='mars'))
self.client.login(username="marschairman", password="<PASSWORD>")
for expected in [
['conflict', 'conflic2', 'conflic3'],
['chair_conflict', 'tech_overlap', 'key_participant'],
]:
meeting.group_conflict_types.clear()
for slug in expected:
meeting.group_conflict_types.add(ConstraintName.objects.get(slug=slug))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertCountEqual(
[elt.attr('id') for elt in q.items('*[id^=id_constraint_]')],
['id_constraint_{}'.format(conf_name) for conf_name in expected],
)
def test_edit_req_constraint_types(self):
"""Editing a request constraint should show the expected constraints"""
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
SessionFactory(group__acronym='mars',
status_id='schedw',
meeting=meeting,
add_to_schedule=False)
RoleFactory(name_id='chair', person__user__username='marschairman', group__acronym='mars')
url = reverse('ietf.secr.sreq.views.edit', kwargs=dict(acronym='mars'))
self.client.login(username='marschairman', password='<PASSWORD>')
for expected in [
['conflict', 'conflic2', 'conflic3'],
['chair_conflict', 'tech_overlap', 'key_participant'],
]:
meeting.group_conflict_types.clear()
for slug in expected:
meeting.group_conflict_types.add(ConstraintName.objects.get(slug=slug))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertCountEqual(
[elt.attr('id') for elt in q.items('*[id^=id_constraint_]')],
['id_constraint_{}'.format(conf_name) for conf_name in expected],
)
class SubmitRequestCase(TestCase):
def setUp(self):
super(SubmitRequestCase, self).setUp()
# Ensure meeting numbers are predictable. Temporarily needed while basing
# constraint types on meeting number, expected to go away when #2770 is resolved.
MeetingFactory.reset_sequence(0)
def test_submit_request(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
group = GroupFactory(parent=area)
group2 = GroupFactory(parent=area)
group3 = GroupFactory(parent=area)
group4 = GroupFactory(parent=area)
session_count_before = Session.objects.filter(meeting=meeting, group=group).count()
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':group.acronym})
confirm_url = reverse('ietf.secr.sreq.views.confirm',kwargs={'acronym':group.acronym})
main_url = reverse('ietf.secr.sreq.views.main')
attendees = '10'
comments = 'need projector'
post_data = {'num_session':'1',
'attendees':attendees,
'constraint_chair_conflict':'',
'comments':comments,
'adjacent_with_wg': group2.acronym,
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'joint_with_groups': group3.acronym + ' ' + group4.acronym,
'joint_for_session': '1',
'session_set-TOTAL_FORMS': '1',
'session_set-INITIAL_FORMS': '0',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' to create a new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': 'regular',
'session_set-0-type': 'regular',
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': True,
'session_set-0-remote_instructions': '',
'session_set-0-attendees': attendees,
'session_set-0-comments': comments,
'session_set-0-DELETE': '',
'submit': 'Continue'}
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.post(url,post_data)
self.assertEqual(r.status_code, 200)
# Verify the contents of the confirm view
self.assertContains(r, 'Thursday early afternoon, Thursday late afternoon')
self.assertContains(r, group2.acronym)
self.assertContains(r, 'First session with: {} {}'.format(group3.acronym, group4.acronym))
post_data['submit'] = 'Submit'
r = self.client.post(confirm_url,post_data)
self.assertRedirects(r, main_url)
session_count_after = Session.objects.filter(meeting=meeting, group=group, type='regular').count()
self.assertEqual(session_count_after, session_count_before + 1)
# test that second confirm does not add sessions
r = self.client.post(confirm_url,post_data)
self.assertRedirects(r, main_url)
session_count_after = Session.objects.filter(meeting=meeting, group=group, type='regular').count()
self.assertEqual(session_count_after, session_count_before + 1)
# Verify database content
session = Session.objects.get(meeting=meeting, group=group)
self.assertEqual(session.constraints().get(name='wg_adjacent').target.acronym, group2.acronym)
self.assertEqual(
list(session.constraints().get(name='timerange').timeranges.all().values('name')),
list(TimerangeName.objects.filter(name__in=['thursday-afternoon-early', 'thursday-afternoon-late']).values('name'))
)
self.assertEqual(list(session.joint_with_groups.all()), [group3, group4])
def test_submit_request_invalid(self):
MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
group = GroupFactory(parent=area)
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':group.acronym})
attendees = '10'
comments = 'need projector'
post_data = {
'num_session':'2',
'attendees':attendees,
'constraint_chair_conflict':'',
'comments':comments,
'session_set-TOTAL_FORMS': '1',
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' to create a new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': 'regular',
'session_set-0-type': 'regular',
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': True,
'session_set-0-remote_instructions': '',
'session_set-0-attendees': attendees,
'session_set-0-comments': comments,
'session_set-0-DELETE': '',
}
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.post(url,post_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#session-request-form')),1)
self.assertContains(r, 'Must provide data for all sessions')
def test_submit_request_check_constraints(self):
m1 = MeetingFactory(type_id='ietf', date=datetime.date.today() - datetime.timedelta(days=100))
MeetingFactory(type_id='ietf', date=datetime.date.today(),
group_conflicts=['chair_conflict', 'conflic2', 'conflic3'])
ad = Person.objects.get(user__username='ad')
area = RoleFactory(name_id='ad', person=ad, group__type_id='area').group
group = GroupFactory(parent=area)
still_active_group = GroupFactory(parent=area)
Constraint.objects.create(
meeting=m1,
source=group,
target=still_active_group,
name_id='chair_conflict',
)
inactive_group = GroupFactory(parent=area, state_id='conclude')
inactive_group.save()
Constraint.objects.create(
meeting=m1,
source=group,
target=inactive_group,
name_id='chair_conflict',
)
session = SessionFactory(group=group, meeting=m1)
self.client.login(username="secretary", password="<PASSWORD>")
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':group.acronym})
r = self.client.get(url + '?previous')
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
conflict1 = q('[name="constraint_chair_conflict"]').val()
self.assertIn(still_active_group.acronym, conflict1)
self.assertNotIn(inactive_group.acronym, conflict1)
attendees = '10'
comments = 'need projector'
post_data = {'num_session':'1',
'attendees':attendees,
'constraint_chair_conflict': group.acronym,
'comments':comments,
'session_set-TOTAL_FORMS': '1',
'session_set-INITIAL_FORMS': '1',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' to create a new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': session.purpose_id,
'session_set-0-type': session.type_id,
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': session.on_agenda,
'session_set-0-remote_instructions': session.remote_instructions,
'session_set-0-attendees': attendees,
'session_set-0-comments': comments,
'session_set-0-DELETE': '',
'submit': 'Continue'}
r = self.client.post(url,post_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#session-request-form')),1)
self.assertContains(r, "Cannot declare a conflict with the same group")
def test_request_notification(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = GroupFactory(type_id='area')
RoleFactory(name_id='ad', person=ad, group=area)
group = GroupFactory(acronym='ames', parent=area)
group2 = GroupFactory(acronym='ames2', parent=area)
group3 = GroupFactory(acronym='ames2', parent=area)
group4 = GroupFactory(acronym='ames3', parent=area)
RoleFactory(name_id='chair', group=group, person__user__username='ameschairman')
resource = ResourceAssociation.objects.create(name_id='project')
# Bit of a test data hack - the fixture now has no used resources to pick from
resource.name.used=True
resource.name.save()
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':group.acronym})
confirm_url = reverse('ietf.secr.sreq.views.confirm',kwargs={'acronym':group.acronym})
len_before = len(outbox)
attendees = '10'
post_data = {'num_session':'2',
'attendees':attendees,
'bethere':str(ad.pk),
'constraint_chair_conflict':group4.acronym,
'comments':'',
'resources': resource.pk,
'session_time_relation': 'subsequent-days',
'adjacent_with_wg': group2.acronym,
'joint_with_groups': group3.acronym,
'joint_for_session': '2',
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'session_set-TOTAL_FORMS': '2',
'session_set-INITIAL_FORMS': '0',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' for new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': 'regular',
'session_set-0-type': 'regular',
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': True,
'session_set-0-remote_instructions': '',
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
# no 'session_set-1-id' for new session
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose': 'regular',
'session_set-1-type': 'regular',
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': True,
'session_set-1-remote_instructions': '',
'session_set-1-attendees': attendees,
'session_set-1-comments': '',
'session_set-1-DELETE': '',
'submit': 'Continue'}
self.client.login(username="ameschairman", password="<PASSWORD>")
# submit
r = self.client.post(url,post_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue('Confirm' in str(q("title")), r.context['form'].errors)
# confirm
post_data['submit'] = 'Submit'
r = self.client.post(confirm_url,post_data)
self.assertRedirects(r, reverse('ietf.secr.sreq.views.main'))
self.assertEqual(len(outbox),len_before+1)
notification = outbox[-1]
notification_payload = get_payload_text(notification)
sessions = Session.objects.filter(meeting=meeting,group=group)
self.assertEqual(len(sessions), 2)
session = sessions[0]
self.assertEqual(session.resources.count(),1)
self.assertEqual(session.people_constraints.count(),1)
self.assertEqual(session.constraints().get(name='time_relation').time_relation, 'subsequent-days')
self.assertEqual(session.constraints().get(name='wg_adjacent').target.acronym, group2.acronym)
self.assertEqual(
list(session.constraints().get(name='timerange').timeranges.all().values('name')),
list(TimerangeName.objects.filter(name__in=['thursday-afternoon-early', 'thursday-afternoon-late']).values('name'))
)
resource = session.resources.first()
self.assertTrue(resource.desc in notification_payload)
self.assertTrue('Schedule the sessions on subsequent days' in notification_payload)
self.assertTrue(group2.acronym in notification_payload)
self.assertTrue("Can't meet: Thursday early afternoon, Thursday late" in notification_payload)
self.assertTrue('Second session joint with: {}'.format(group3.acronym) in notification_payload)
self.assertTrue(ad.ascii_name() in notification_payload)
self.assertIn(ConstraintName.objects.get(slug='chair_conflict').name, notification_payload)
self.assertIn(group.acronym, notification_payload)
self.assertIn('1 Hour, 1 Hour', notification_payload)
self.assertNotIn('1 Hour, 1 Hour, 1 Hour', notification_payload)
self.assertNotIn('The third session requires your approval', notification_payload)
def test_request_notification_third_session(self):
meeting = MeetingFactory(type_id='ietf', date=datetime.date.today())
ad = Person.objects.get(user__username='ad')
area = GroupFactory(type_id='area')
RoleFactory(name_id='ad', person=ad, group=area)
group = GroupFactory(acronym='ames', parent=area)
group2 = GroupFactory(acronym='ames2', parent=area)
group3 = GroupFactory(acronym='ames2', parent=area)
group4 = GroupFactory(acronym='ames3', parent=area)
RoleFactory(name_id='chair', group=group, person__user__username='ameschairman')
resource = ResourceAssociation.objects.create(name_id='project')
# Bit of a test data hack - the fixture now has no used resources to pick from
resource.name.used=True
resource.name.save()
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':group.acronym})
confirm_url = reverse('ietf.secr.sreq.views.confirm',kwargs={'acronym':group.acronym})
len_before = len(outbox)
attendees = '10'
post_data = {'num_session':'2',
'third_session': 'true',
'attendees':attendees,
'bethere':str(ad.pk),
'constraint_chair_conflict':group4.acronym,
'comments':'',
'resources': resource.pk,
'session_time_relation': 'subsequent-days',
'adjacent_with_wg': group2.acronym,
'joint_with_groups': group3.acronym,
'joint_for_session': '2',
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'session_set-TOTAL_FORMS': '3',
'session_set-INITIAL_FORMS': '0',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' for new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': 'regular',
'session_set-0-type': 'regular',
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': True,
'session_set-0-remote_instructions': '',
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
# no 'session_set-1-id' for new session
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose': 'regular',
'session_set-1-type': 'regular',
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': True,
'session_set-1-remote_instructions': '',
'session_set-1-attendees': attendees,
'session_set-1-comments': '',
'session_set-1-DELETE': '',
# no 'session_set-2-id' for new session
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '3600',
'session_set-2-on_agenda': True,
'session_set-2-remote_instructions': '',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': '',
'submit': 'Continue'}
self.client.login(username="ameschairman", password="<PASSWORD>")
# submit
r = self.client.post(url,post_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue('Confirm' in str(q("title")), r.context['form'].errors)
# confirm
post_data['submit'] = 'Submit'
r = self.client.post(confirm_url,post_data)
self.assertRedirects(r, reverse('ietf.secr.sreq.views.main'))
self.assertEqual(len(outbox),len_before+1)
notification = outbox[-1]
notification_payload = get_payload_text(notification)
sessions = Session.objects.filter(meeting=meeting,group=group)
self.assertEqual(len(sessions), 3)
session = sessions[0]
self.assertEqual(session.resources.count(),1)
self.assertEqual(session.people_constraints.count(),1)
self.assertEqual(session.constraints().get(name='time_relation').time_relation, 'subsequent-days')
self.assertEqual(session.constraints().get(name='wg_adjacent').target.acronym, group2.acronym)
self.assertEqual(
list(session.constraints().get(name='timerange').timeranges.all().values('name')),
list(TimerangeName.objects.filter(name__in=['thursday-afternoon-early', 'thursday-afternoon-late']).values('name'))
)
resource = session.resources.first()
self.assertTrue(resource.desc in notification_payload)
self.assertTrue('Schedule the sessions on subsequent days' in notification_payload)
self.assertTrue(group2.acronym in notification_payload)
self.assertTrue("Can't meet: Thursday early afternoon, Thursday late" in notification_payload)
self.assertTrue('Second session joint with: {}'.format(group3.acronym) in notification_payload)
self.assertTrue(ad.ascii_name() in notification_payload)
self.assertIn(ConstraintName.objects.get(slug='chair_conflict').name, notification_payload)
self.assertIn(group.acronym, notification_payload)
self.assertIn('1 Hour, 1 Hour, 1 Hour', notification_payload)
self.assertIn('The third session requires your approval', notification_payload)
class LockAppTestCase(TestCase):
def setUp(self):
super().setUp()
self.meeting = MeetingFactory(type_id='ietf', date=datetime.date.today(),session_request_lock_message='locked')
self.group = GroupFactory(acronym='mars')
RoleFactory(name_id='chair', group=self.group, person__user__username='marschairman')
SessionFactory(group=self.group,meeting=self.meeting)
def test_edit_request(self):
url = reverse('ietf.secr.sreq.views.edit',kwargs={'acronym':self.group.acronym})
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q(':disabled[name="submit"]')), 0)
chair = self.group.role_set.filter(name_id='chair').first().person.user.username
self.client.login(username=chair, password=f'{<PASSWORD>')
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q(':disabled[name="submit"]')), 1)
def test_view_request(self):
url = reverse('ietf.secr.sreq.views.view',kwargs={'acronym':self.group.acronym})
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url,follow=True)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q(':disabled[name="edit"]')), 1)
def test_new_request(self):
url = reverse('ietf.secr.sreq.views.new',kwargs={'acronym':self.group.acronym})
# try as WG Chair
self.client.login(username="marschairman", password="<PASSWORD>")
r = self.client.get(url, follow=True)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#session-request-form')),0)
# try as Secretariat
self.client.login(username="secretary", password="<PASSWORD>")
r = self.client.get(url,follow=True)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#session-request-form')),1)
class NotMeetingCase(TestCase):
def test_not_meeting(self):
MeetingFactory(type_id='ietf',date=datetime.date.today())
group = GroupFactory(acronym='mars')
url = reverse('ietf.secr.sreq.views.no_session',kwargs={'acronym':group.acronym})
self.client.login(username="secretary", password="<PASSWORD>")
empty_outbox()
r = self.client.get(url,follow=True)
# If the view invoked by that get throws an exception (such as an integrity error),
# the traceback from this test will talk about a TransactionManagementError and
# yell about executing queries before the end of an 'atomic' block
# This is a sign of a problem - a get shouldn't have a side-effect like this one does
self.assertEqual(r.status_code, 200)
self.assertContains(r, 'A message was sent to notify not having a session')
r = self.client.get(url,follow=True)
self.assertEqual(r.status_code, 200)
self.assertContains(r, 'is already marked as not meeting')
self.assertEqual(len(outbox),1)
self.assertTrue('Not having a session' in outbox[0]['Subject'])
self.assertTrue('session-request@' in outbox[0]['To'])
class RetrievePreviousCase(TestCase):
pass
# test error if already scheduled
# test get previous exists/doesn't exist
# test that groups scheduled and unscheduled add up to total groups
# test access by unauthorized
class SessionFormTest(TestCase):
def setUp(self):
super().setUp()
self.meeting = MeetingFactory(type_id='ietf')
self.group1 = GroupFactory()
self.group2 = GroupFactory()
self.group3 = GroupFactory()
self.group4 = GroupFactory()
self.group5 = GroupFactory()
self.group6 = GroupFactory()
attendees = '10'
comments = 'need lights'
self.valid_form_data = {
'num_session': '2',
'third_session': 'true',
'attendees': attendees,
'constraint_chair_conflict': self.group2.acronym,
'constraint_tech_overlap': self.group3.acronym,
'constraint_key_participant': self.group4.acronym,
'comments': comments,
'session_time_relation': 'subsequent-days',
'adjacent_with_wg': self.group5.acronym,
'joint_with_groups': self.group6.acronym,
'joint_for_session': '3',
'timeranges': ['thursday-afternoon-early', 'thursday-afternoon-late'],
'submit': 'Continue',
'session_set-TOTAL_FORMS': '3',
'session_set-INITIAL_FORMS': '0',
'session_set-MIN_NUM_FORMS': '1',
'session_set-MAX_NUM_FORMS': '3',
# no 'session_set-0-id' for new session
'session_set-0-name': '',
'session_set-0-short': '',
'session_set-0-purpose': 'regular',
'session_set-0-type': 'regular',
'session_set-0-requested_duration': '3600',
'session_set-0-on_agenda': True,
'session_set-0-remote_instructions': '',
'session_set-0-attendees': attendees,
'session_set-0-comments': '',
'session_set-0-DELETE': '',
# no 'session_set-1-id' for new session
'session_set-1-name': '',
'session_set-1-short': '',
'session_set-1-purpose': 'regular',
'session_set-1-type': 'regular',
'session_set-1-requested_duration': '3600',
'session_set-1-on_agenda': True,
'session_set-1-remote_instructions': '',
'session_set-1-attendees': attendees,
'session_set-1-comments': '',
'session_set-1-DELETE': '',
# no 'session_set-2-id' for new session
'session_set-2-name': '',
'session_set-2-short': '',
'session_set-2-purpose': 'regular',
'session_set-2-type': 'regular',
'session_set-2-requested_duration': '3600',
'session_set-2-on_agenda': True,
'session_set-2-remote_instructions': '',
'session_set-2-attendees': attendees,
'session_set-2-comments': '',
'session_set-2-DELETE': '',
}
def test_valid(self):
# Test with three sessions
form = SessionForm(data=self.valid_form_data, group=self.group1, meeting=self.meeting)
self.assertTrue(form.is_valid())
# Test with two sessions
self.valid_form_data.update({
'third_session': '',
'session_set-TOTAL_FORMS': '2',
'joint_for_session': '2'
})
form = SessionForm(data=self.valid_form_data, group=self.group1, meeting=self.meeting)
self.assertTrue(form.is_valid())
# Test with one session
self.valid_form_data.update({
'num_session': 1,
'session_set-TOTAL_FORMS': '1',
'joint_for_session': '1',
'session_time_relation': '',
})
form = SessionForm(data=self.valid_form_data, group=self.group1, meeting=self.meeting)
self.assertTrue(form.is_valid())
def test_invalid_groups(self):
new_form_data = {
'constraint_chair_conflict': 'doesnotexist',
'constraint_tech_overlap': 'doesnotexist',
'constraint_key_participant': 'doesnotexist',
'adjacent_with_wg': 'doesnotexist',
'joint_with_groups': 'doesnotexist',
}
form = self._invalid_test_helper(new_form_data)
self.assertEqual(set(form.errors.keys()), set(new_form_data.keys()))
def test_valid_group_appears_in_multiple_conflicts(self):
"""Some conflict types allow overlapping groups"""
new_form_data = {
'constraint_chair_conflict': self.group2.acronym,
'constraint_tech_overlap': self.group2.acronym,
}
self.valid_form_data.update(new_form_data)
form = SessionForm(data=self.valid_form_data, group=self.group1, meeting=self.meeting)
self.assertTrue(form.is_valid())
def test_invalid_group_appears_in_multiple_conflicts(self):
"""Some conflict types do not allow overlapping groups"""
self.meeting.group_conflict_types.clear()
self.meeting.group_conflict_types.add(ConstraintName.objects.get(slug='conflict'))
self.meeting.group_conflict_types.add(ConstraintName.objects.get(slug='conflic2'))
new_form_data = {
'constraint_conflict': self.group2.acronym,
'constraint_conflic2': self.group2.acronym,
}
form = self._invalid_test_helper(new_form_data)
self.assertEqual(form.non_field_errors(), ['%s appears in conflicts more than once' % self.group2.acronym])
def test_invalid_conflict_with_self(self):
new_form_data = {
'constraint_chair_conflict': self.group1.acronym,
}
self._invalid_test_helper(new_form_data)
def test_invalid_session_time_relation(self):
form = self._invalid_test_helper({
'third_session': '',
'session_set-TOTAL_FORMS': 1,
'num_session': 1,
'joint_for_session': '1',
})
self.assertEqual(form.errors,
{
'session_time_relation': ['Time between sessions can only be used when two '
'sessions are requested.']
})
def test_invalid_joint_for_session(self):
form = self._invalid_test_helper({
'third_session': '',
'session_set-TOTAL_FORMS': '2',
'num_session': 2,
'joint_for_session': '3',
})
self.assertEqual(form.errors,
{
'joint_for_session': [
'Session 3 can not be the joint session, the session has not been requested.']
})
form = self._invalid_test_helper({
'third_session': '',
'session_set-TOTAL_FORMS': '1',
'num_session': 1,
'joint_for_session': '2',
'session_time_relation': '',
})
self.assertEqual(form.errors,
{
'joint_for_session': [
'Session 2 can not be the joint session, the session has not been requested.']
})
def test_invalid_missing_session_length(self):
form = self._invalid_test_helper({
'session_set-TOTAL_FORMS': '2',
'session_set-1-requested_duration': '',
'third_session': 'false',
'joint_for_session': None,
})
self.assertEqual(form.session_forms.errors,
[
{},
{'requested_duration': ['This field is required.']},
])
form = self._invalid_test_helper({
'session_set-1-requested_duration': '',
'session_set-2-requested_duration': '',
'joint_for_session': None,
})
self.assertEqual(
form.session_forms.errors,
[
{},
{'requested_duration': ['This field is required.']},
{'requested_duration': ['This field is required.']},
])
form = self._invalid_test_helper({
'session_set-2-requested_duration': '',
'joint_for_session': None,
})
self.assertEqual(form.session_forms.errors,
[
{},
{},
{'requested_duration': ['This field is required.']},
])
def _invalid_test_helper(self, new_form_data):
form_data = dict(self.valid_form_data, **new_form_data)
form = SessionForm(data=form_data, group=self.group1, meeting=self.meeting)
self.assertFalse(form.is_valid())
return form
```
#### File: ietf/utils/bootstrap.py
```python
import django_bootstrap5.renderers
class SeparateErrorsFromHelpTextFieldRenderer(django_bootstrap5.renderers.FieldRenderer):
def append_to_field(self, html):
if self.field_help:
html += '<div class="form-text">{}</div>'.format(self.field_help)
for e in self.field_errors:
html += '<div class="alert alert-danger my-3">{}</div>'.format(e)
return html
```
#### File: ietf/utils/tests_meetecho.py
```python
import datetime
import requests
import requests_mock
from pytz import timezone, utc
from unittest.mock import patch
from urllib.parse import urljoin
from django.conf import settings
from django.test import override_settings
from ietf.utils.tests import TestCase
from .meetecho import Conference, ConferenceManager, MeetechoAPI, MeetechoAPIError
API_BASE = 'https://meetecho-api.example.com'
CLIENT_ID = 'datatracker'
CLIENT_SECRET = '<PASSWORD>'
API_CONFIG={
'api_base': API_BASE,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
}
@override_settings(MEETECHO_API_CONFIG=API_CONFIG)
class APITests(TestCase):
retrieve_token_url = urljoin(API_BASE, 'auth/ietfservice/tokens')
schedule_meeting_url = urljoin(API_BASE, 'meeting/interim/createRoom')
fetch_meetings_url = urljoin(API_BASE, 'meeting/interim/fetchRooms')
delete_meetings_url = urljoin(API_BASE, 'meeting/interim/deleteRoom')
def setUp(self):
super().setUp()
self.requests_mock = requests_mock.Mocker()
self.requests_mock.start()
def tearDown(self):
self.requests_mock.stop()
super().tearDown()
def test_retrieve_wg_tokens(self):
data_to_fetch = {
'tokens': {
'acro': 'wg-token-value-for-acro',
'beta': 'different-token',
'gamma': 'this-is-not-the-same',
}
}
self.requests_mock.post(self.retrieve_token_url, status_code=200, json=data_to_fetch)
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
api_response = api.retrieve_wg_tokens(['acro', 'beta', 'gamma'])
self.assertTrue(self.requests_mock.called)
request = self.requests_mock.last_request
self.assertEqual(
request.headers['Content-Type'],
'application/json',
'Incorrect request content-type',
)
self.assertEqual(
request.json(),
{
'client': CLIENT_ID,
'secret': CLIENT_SECRET,
'wgs': ['acro', 'beta', 'gamma'],
}
)
self.assertEqual(api_response, data_to_fetch)
def test_schedule_meeting(self):
self.requests_mock.post(
self.schedule_meeting_url,
status_code=200,
json={
'rooms': {
'3d55bce0-535e-4ba8-bb8e-734911cf3c32': {
'room': {
'id': 18,
'start_time': '2021-09-14 10:00:00',
'duration': 130,
'description': 'interim-2021-wgname-01',
},
'url': 'https://meetings.conf.meetecho.com/interim/?short=3d55bce0-535e-4ba8-bb8e-734911cf3c32',
'deletion_token': 'session-deletion-token',
},
}
},
)
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
api_response = api.schedule_meeting(
wg_token='<PASSWORD>',
start_time=utc.localize(datetime.datetime(2021, 9, 14, 10, 0, 0)),
duration=datetime.timedelta(minutes=130),
description='interim-2021-wgname-01',
extrainfo='message for staff',
)
self.assertTrue(self.requests_mock.called)
request = self.requests_mock.last_request
self.assertIn('Authorization', request.headers)
self.assertEqual(
request.headers['Content-Type'],
'application/json',
'Incorrect request content-type',
)
self.assertEqual(request.headers['Authorization'], 'bearer my-token',
'Incorrect request authorization header')
self.assertEqual(
request.json(),
{
'duration': 130,
'start_time': '2021-09-14 10:00:00',
'extrainfo': 'message for staff',
'description': 'interim-2021-wgname-01',
},
'Incorrect request content'
)
# same time in different time zones
for start_time in [
utc.localize(datetime.datetime(2021, 9, 14, 10, 0, 0)),
timezone('america/halifax').localize(datetime.datetime(2021, 9, 14, 7, 0, 0)),
timezone('europe/kiev').localize(datetime.datetime(2021, 9, 14, 13, 0, 0)),
timezone('pacific/easter').localize(datetime.datetime(2021, 9, 14, 5, 0, 0)),
timezone('africa/porto-novo').localize(datetime.datetime(2021, 9, 14, 11, 0, 0)),
]:
self.assertEqual(
api_response,
{
'rooms': {
'3d55bce0-535e-4ba8-bb8e-734911cf3c32': {
'room': {
'id': 18,
'start_time': start_time,
'duration': datetime.timedelta(minutes=130),
'description': 'interim-2021-wgname-01',
},
'url': 'https://meetings.conf.meetecho.com/interim/?short=3d55bce0-535e-4ba8-bb8e-734911cf3c32',
'deletion_token': '<PASSWORD>',
},
}
},
f'Incorrect time conversion for {start_time.tzinfo.zone}',
)
def test_fetch_meetings(self):
self.maxDiff = 2048
self.requests_mock.get(
self.fetch_meetings_url,
status_code=200,
json={
'rooms': {
'3d55bce0-535e-4ba8-bb8e-734911cf3c32': {
'room': {
'id': 18,
'start_time': '2021-09-14 10:00:00',
'duration': 130,
'description': 'interim-2021-wgname-01',
},
'url': 'https://meetings.conf.meetecho.com/interim/?short=3d55bce0-535e-4ba8-bb8e-734911cf3c32',
'deletion_token': '<PASSWORD>',
},
'e68e96d4-d38f-475b-9073-ecab46ca96a5': {
'room': {
'id': 23,
'start_time': '2021-09-15 14:30:00',
'duration': 30,
'description': 'interim-2021-wgname-02',
},
'url': 'https://meetings.conf.meetecho.com/interim/?short=e68e96d4-d38f-475b-9073-ecab46ca96a5',
'deletion_token': '<PASSWORD>',
},
}
},
)
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
api_response = api.fetch_meetings(wg_token='<PASSWORD>')
self.assertTrue(self.requests_mock.called)
request = self.requests_mock.last_request
self.assertIn('Authorization', request.headers)
self.assertEqual(request.headers['Authorization'], 'bearer my-token',
'Incorrect request authorization header')
self.assertEqual(
api_response,
{
'rooms': {
'3d55bce0-535e-4ba8-bb8e-734911cf3c32': {
'room': {
'id': 18,
'start_time': utc.localize(datetime.datetime(2021, 9, 14, 10, 0, 0)),
'duration': datetime.timedelta(minutes=130),
'description': 'interim-2021-wgname-01',
},
'url': 'https://meetings.conf.meetecho.com/interim/?short=3d55bce0-535e-4ba8-bb8e-734911cf3c32',
'deletion_token': '<PASSWORD>',
},
'e68e96d4-d38f-475b-9073-ecab46ca96a5': {
'room': {
'id': 23,
'start_time': utc.localize(datetime.datetime(2021, 9, 15, 14, 30, 0)),
'duration': datetime.timedelta(minutes=30),
'description': 'interim-2021-wgname-02',
},
'url': 'https://meetings.conf.meetecho.com/interim/?short=e68e96d4-d38f-475b-9073-ecab46ca96a5',
'deletion_token': '<PASSWORD>',
},
}
},
)
def test_delete_meeting(self):
data_to_fetch = {}
self.requests_mock.post(self.delete_meetings_url, status_code=200, json=data_to_fetch)
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
api_response = api.delete_meeting(deletion_token='<PASSWORD>')
self.assertTrue(self.requests_mock.called)
request = self.requests_mock.last_request
self.assertIn('Authorization', request.headers)
self.assertEqual(request.headers['Authorization'], 'bearer delete-this-meeting-please',
'Incorrect request authorization header')
self.assertIsNone(request.body, 'Delete meeting request has no body')
self.assertCountEqual(api_response, data_to_fetch)
def test_request_helper_failed_requests(self):
self.requests_mock.register_uri(requests_mock.ANY, urljoin(API_BASE, 'unauthorized/url/endpoint'), status_code=401)
self.requests_mock.register_uri(requests_mock.ANY, urljoin(API_BASE, 'forbidden/url/endpoint'), status_code=403)
self.requests_mock.register_uri(requests_mock.ANY, urljoin(API_BASE, 'notfound/url/endpoint'), status_code=404)
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
for method in ['POST', 'GET']:
for code, endpoint in ((401, 'unauthorized/url/endpoint'), (403, 'forbidden/url/endpoint'), (404, 'notfound/url/endpoint')):
with self.assertRaises(Exception) as context:
api._request(method, endpoint)
self.assertIsInstance(context.exception, MeetechoAPIError)
self.assertIn(str(code), str(context.exception))
def test_request_helper_exception(self):
self.requests_mock.register_uri(requests_mock.ANY, urljoin(API_BASE, 'exception/url/endpoint'), exc=requests.exceptions.RequestException)
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
for method in ['POST', 'GET']:
with self.assertRaises(Exception) as context:
api._request(method, 'exception/url/endpoint')
self.assertIsInstance(context.exception, MeetechoAPIError)
def test_time_serialization(self):
"""Time de/serialization should be consistent"""
time = datetime.datetime.now(utc).replace(microsecond=0) # cut off to 0 microseconds
api = MeetechoAPI(API_BASE, CLIENT_ID, CLIENT_SECRET)
self.assertEqual(api._deserialize_time(api._serialize_time(time)), time)
@override_settings(MEETECHO_API_CONFIG=API_CONFIG)
class ConferenceManagerTests(TestCase):
def test_conference_from_api_dict(self):
confs = Conference.from_api_dict(
None,
{
'session-1-uuid': {
'room': {
'id': 1,
'start_time': utc.localize(datetime.datetime(2022,2,4,1,2,3)),
'duration': datetime.timedelta(minutes=45),
'description': 'some-description',
},
'url': 'https://example.com/some/url',
'deletion_token': '<PASSWORD>',
},
'session-2-uuid': {
'room': {
'id': 2,
'start_time': utc.localize(datetime.datetime(2022,2,5,4,5,6)),
'duration': datetime.timedelta(minutes=90),
'description': 'another-description',
},
'url': 'https://example.com/another/url',
'deletion_token': '<PASSWORD>',
},
}
)
self.assertCountEqual(
confs,
[
Conference(
manager=None,
id=1,
public_id='session-1-uuid',
description='some-description',
start_time=utc.localize(datetime.datetime(2022, 2, 4, 1, 2, 3)),
duration=datetime.timedelta(minutes=45),
url='https://example.com/some/url',
deletion_token='<PASSWORD>',
),
Conference(
manager=None,
id=2,
public_id='session-2-uuid',
description='another-description',
start_time=utc.localize(datetime.datetime(2022, 2, 5, 4, 5, 6)),
duration=datetime.timedelta(minutes=90),
url='https://example.com/another/url',
deletion_token='<PASSWORD>',
),
]
)
@patch.object(ConferenceManager, 'wg_token', return_value='atoken')
@patch('ietf.utils.meetecho.MeetechoAPI.fetch_meetings')
def test_fetch(self, mock_fetch, _):
mock_fetch.return_value = {
'rooms': {
'session-1-uuid': {
'room': {
'id': 1,
'start_time': utc.localize(datetime.datetime(2022,2,4,1,2,3)),
'duration': datetime.timedelta(minutes=45),
'description': 'some-description',
},
'url': 'https://example.com/some/url',
'deletion_token': '<PASSWORD>',
},
}
}
cm = ConferenceManager(settings.MEETECHO_API_CONFIG)
fetched = cm.fetch('acronym')
self.assertEqual(
fetched,
[Conference(
manager=cm,
id=1,
public_id='session-1-uuid',
description='some-description',
start_time=utc.localize(datetime.datetime(2022,2,4,1,2,3)),
duration=datetime.timedelta(minutes=45),
url='https://example.com/some/url',
deletion_token='<PASSWORD>',
)],
)
self.assertEqual(mock_fetch.call_args[0], ('atoken',))
@patch.object(ConferenceManager, 'wg_token', return_value='atoken')
@patch('ietf.utils.meetecho.MeetechoAPI.schedule_meeting')
def test_create(self, mock_schedule, _):
mock_schedule.return_value = {
'rooms': {
'session-1-uuid': {
'room': {
'id': 1,
'start_time': utc.localize(datetime.datetime(2022,2,4,1,2,3)),
'duration': datetime.timedelta(minutes=45),
'description': 'some-description',
},
'url': 'https://example.com/some/url',
'deletion_token': '<PASSWORD>',
},
},
}
cm = ConferenceManager(settings.MEETECHO_API_CONFIG)
result = cm.create('group', 'desc', 'starttime', 'dur', 'extra')
self.assertEqual(
result,
[Conference(
manager=cm,
id=1,
public_id='session-1-uuid',
description='some-description',
start_time=utc.localize(datetime.datetime(2022,2,4,1,2,3)),
duration=datetime.timedelta(minutes=45),
url='https://example.com/some/url',
deletion_token='<PASSWORD>',
)]
)
args, kwargs = mock_schedule.call_args
self.assertEqual(
kwargs,
{
'wg_token': '<PASSWORD>',
'description': 'desc',
'start_time': 'starttime',
'duration': 'dur',
'extrainfo': 'extra',
})
@patch('ietf.utils.meetecho.MeetechoAPI.delete_meeting')
def test_delete_conference(self, mock_delete):
cm = ConferenceManager(settings.MEETECHO_API_CONFIG)
cm.delete_conference(Conference(None, None, None, None, None, None, None, 'delete-this'))
args, kwargs = mock_delete.call_args
self.assertEqual(args, ('delete-this',))
@patch('ietf.utils.meetecho.MeetechoAPI.delete_meeting')
def test_delete_by_url(self, mock_delete):
cm = ConferenceManager(settings.MEETECHO_API_CONFIG)
cm.delete_conference(Conference(None, None, None, None, None, None, 'the-url', 'delete-this'))
args, kwargs = mock_delete.call_args
self.assertEqual(args, ('delete-this',))
``` |
{
"source": "jimfhahn/Annif",
"score": 3
} |
#### File: annif/analyzer/simple.py
```python
from . import analyzer
class SimpleAnalyzer(analyzer.Analyzer):
name = "simple"
def __init__(self, param, **kwargs):
self.param = param
super().__init__(**kwargs)
def _normalize_word(self, word):
return word.lower()
```
#### File: annif/backend/http.py
```python
import dateutil.parser
import requests
import requests.exceptions
from annif.suggestion import SubjectSuggestion, ListSuggestionResult
from annif.exception import OperationFailedException
from . import backend
class HTTPBackend(backend.AnnifBackend):
name = "http"
@property
def is_trained(self):
return self._get_project_info('is_trained')
@property
def modification_time(self):
mtime = self._get_project_info('modification_time')
if mtime is None:
return None
return dateutil.parser.parse(mtime)
def _get_project_info(self, key):
params = self._get_backend_params(None)
try:
req = requests.get(params['endpoint'].replace('/suggest', ''))
req.raise_for_status()
except requests.exceptions.RequestException as err:
msg = f"HTTP request failed: {err}"
raise OperationFailedException(msg) from err
try:
response = req.json()
except ValueError as err:
msg = f"JSON decode failed: {err}"
raise OperationFailedException(msg) from err
if key in response:
return response[key]
else:
return None
def _suggest(self, text, params):
data = {'text': text}
if 'project' in params:
data['project'] = params['project']
try:
req = requests.post(params['endpoint'], data=data)
req.raise_for_status()
except requests.exceptions.RequestException as err:
self.warning("HTTP request failed: {}".format(err))
return ListSuggestionResult([])
try:
response = req.json()
except ValueError as err:
self.warning("JSON decode failed: {}".format(err))
return ListSuggestionResult([])
if 'results' in response:
results = response['results']
else:
results = response
try:
subject_suggestions = [SubjectSuggestion(
uri=hit['uri'],
label=None,
notation=None,
score=hit['score'])
for hit in results if hit['score'] > 0.0]
except (TypeError, ValueError) as err:
self.warning("Problem interpreting JSON data: {}".format(err))
return ListSuggestionResult([])
return ListSuggestionResult.create_from_index(subject_suggestions,
self.project.subjects)
```
#### File: annif/backend/__init__.py
```python
def _dummy():
from . import dummy
return dummy.DummyBackend
def _ensemble():
from . import ensemble
return ensemble.EnsembleBackend
def _fasttext():
try:
from . import fasttext
return fasttext.FastTextBackend
except ImportError:
raise ValueError("fastText not available, cannot use fasttext backend")
def _http():
from . import http
return http.HTTPBackend
def _mllm():
from . import mllm
return mllm.MLLMBackend
def _nn_ensemble():
try:
from . import nn_ensemble
return nn_ensemble.NNEnsembleBackend
except ImportError:
raise ValueError("Keras and TensorFlow not available, cannot use " +
"nn_ensemble backend")
def _omikuji():
try:
from . import omikuji
return omikuji.OmikujiBackend
except ImportError:
raise ValueError("Omikuji not available, cannot use omikuji backend")
def _pav():
from . import pav
return pav.PAVBackend
def _stwfsa():
from . import stwfsa
return stwfsa.StwfsaBackend
def _svc():
from . import svc
return svc.SVCBackend
def _tfidf():
from . import tfidf
return tfidf.TFIDFBackend
def _yake():
try:
from . import yake
return yake.YakeBackend
except ImportError:
raise ValueError("YAKE not available, cannot use yake backend")
# registry of the above functions
_backend_fns = {
'dummy': _dummy,
'ensemble': _ensemble,
'fasttext': _fasttext,
'http': _http,
'mllm': _mllm,
'nn_ensemble': _nn_ensemble,
'omikuji': _omikuji,
'pav': _pav,
'stwfsa': _stwfsa,
'svc': _svc,
'tfidf': _tfidf,
'yake': _yake
}
def get_backend(backend_id):
if backend_id in _backend_fns:
return _backend_fns[backend_id]()
else:
raise ValueError("No such backend type {}".format(backend_id))
```
#### File: annif/backend/nn_ensemble.py
```python
from io import BytesIO
import shutil
import os.path
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix
import joblib
import lmdb
from tensorflow.keras.layers import Input, Dense, Add, Flatten, Dropout, Layer
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import Sequence
import tensorflow.keras.backend as K
import annif.corpus
import annif.parallel
import annif.util
from annif.exception import NotInitializedException, NotSupportedException
from annif.suggestion import VectorSuggestionResult
from . import backend
from . import ensemble
def idx_to_key(idx):
"""convert an integer index to a binary key for use in LMDB"""
return b'%08d' % idx
def key_to_idx(key):
"""convert a binary LMDB key to an integer index"""
return int(key)
class LMDBSequence(Sequence):
"""A sequence of samples stored in a LMDB database."""
def __init__(self, txn, batch_size):
self._txn = txn
cursor = txn.cursor()
if cursor.last():
# Counter holds the number of samples in the database
self._counter = key_to_idx(cursor.key()) + 1
else: # empty database
self._counter = 0
self._batch_size = batch_size
def add_sample(self, inputs, targets):
# use zero-padded 8-digit key
key = idx_to_key(self._counter)
self._counter += 1
# convert the sample into a sparse matrix and serialize it as bytes
sample = (csc_matrix(inputs), csr_matrix(targets))
buf = BytesIO()
joblib.dump(sample, buf)
buf.seek(0)
self._txn.put(key, buf.read())
def __getitem__(self, idx):
"""get a particular batch of samples"""
cursor = self._txn.cursor()
first_key = idx * self._batch_size
cursor.set_key(idx_to_key(first_key))
input_arrays = []
target_arrays = []
for key, value in cursor.iternext():
if key_to_idx(key) >= (first_key + self._batch_size):
break
input_csr, target_csr = joblib.load(BytesIO(value))
input_arrays.append(input_csr.toarray())
target_arrays.append(target_csr.toarray().flatten())
return np.array(input_arrays), np.array(target_arrays)
def __len__(self):
"""return the number of available batches"""
return int(np.ceil(self._counter / self._batch_size))
class MeanLayer(Layer):
"""Custom Keras layer that calculates mean values along the 2nd axis."""
def call(self, inputs):
return K.mean(inputs, axis=2)
class NNEnsembleBackend(
backend.AnnifLearningBackend,
ensemble.BaseEnsembleBackend):
"""Neural network ensemble backend that combines results from multiple
projects"""
name = "nn_ensemble"
MODEL_FILE = "nn-model.h5"
LMDB_FILE = 'nn-train.mdb'
DEFAULT_PARAMETERS = {
'nodes': 100,
'dropout_rate': 0.2,
'optimizer': 'adam',
'epochs': 10,
'learn-epochs': 1,
'lmdb_map_size': 1024 * 1024 * 1024
}
# defaults for uninitialized instances
_model = None
def default_params(self):
params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy()
params.update(self.DEFAULT_PARAMETERS)
return params
def initialize(self, parallel=False):
super().initialize(parallel)
if self._model is not None:
return # already initialized
if parallel:
# Don't load TF model just before parallel execution,
# since it won't work after forking worker processes
return
model_filename = os.path.join(self.datadir, self.MODEL_FILE)
if not os.path.exists(model_filename):
raise NotInitializedException(
'model file {} not found'.format(model_filename),
backend_id=self.backend_id)
self.debug('loading Keras model from {}'.format(model_filename))
self._model = load_model(model_filename,
custom_objects={'MeanLayer': MeanLayer})
def _merge_hits_from_sources(self, hits_from_sources, params):
score_vector = np.array([np.sqrt(hits.as_vector(subjects))
* weight * len(hits_from_sources)
for hits, weight, subjects
in hits_from_sources],
dtype=np.float32)
results = self._model.predict(
np.expand_dims(score_vector.transpose(), 0))
return VectorSuggestionResult(results[0])
def _create_model(self, sources):
self.info("creating NN ensemble model")
inputs = Input(shape=(len(self.project.subjects), len(sources)))
flat_input = Flatten()(inputs)
drop_input = Dropout(
rate=float(
self.params['dropout_rate']))(flat_input)
hidden = Dense(int(self.params['nodes']),
activation="relu")(drop_input)
drop_hidden = Dropout(rate=float(self.params['dropout_rate']))(hidden)
delta = Dense(len(self.project.subjects),
kernel_initializer='zeros',
bias_initializer='zeros')(drop_hidden)
mean = MeanLayer()(inputs)
predictions = Add()([mean, delta])
self._model = Model(inputs=inputs, outputs=predictions)
self._model.compile(optimizer=self.params['optimizer'],
loss='binary_crossentropy',
metrics=['top_k_categorical_accuracy'])
if 'lr' in self.params:
self._model.optimizer.learning_rate.assign(
float(self.params['lr']))
summary = []
self._model.summary(print_fn=summary.append)
self.debug("Created model: \n" + "\n".join(summary))
def _train(self, corpus, params, jobs=0):
sources = annif.util.parse_sources(self.params['sources'])
self._create_model(sources)
self._fit_model(
corpus,
epochs=int(params['epochs']),
lmdb_map_size=int(params['lmdb_map_size']),
n_jobs=jobs)
def _corpus_to_vectors(self, corpus, seq, n_jobs):
# pass corpus through all source projects
sources = dict(
annif.util.parse_sources(self.params['sources']))
# initialize the source projects before forking, to save memory
self.info(
f"Initializing source projects: {', '.join(sources.keys())}")
for project_id in sources.keys():
project = self.project.registry.get_project(project_id)
project.initialize(parallel=True)
psmap = annif.parallel.ProjectSuggestMap(
self.project.registry,
list(sources.keys()),
backend_params=None,
limit=None,
threshold=0.0)
jobs, pool_class = annif.parallel.get_pool(n_jobs)
self.info("Processing training documents...")
with pool_class(jobs) as pool:
for hits, uris, labels in pool.imap_unordered(
psmap.suggest, corpus.documents):
doc_scores = []
for project_id, p_hits in hits.items():
vector = p_hits.as_vector(self.project.subjects)
doc_scores.append(np.sqrt(vector)
* sources[project_id]
* len(sources))
score_vector = np.array(doc_scores,
dtype=np.float32).transpose()
subjects = annif.corpus.SubjectSet((uris, labels))
true_vector = subjects.as_vector(self.project.subjects)
seq.add_sample(score_vector, true_vector)
def _open_lmdb(self, cached, lmdb_map_size):
lmdb_path = os.path.join(self.datadir, self.LMDB_FILE)
if not cached and os.path.exists(lmdb_path):
shutil.rmtree(lmdb_path)
return lmdb.open(lmdb_path, map_size=lmdb_map_size, writemap=True)
def _fit_model(self, corpus, epochs, lmdb_map_size, n_jobs=1):
env = self._open_lmdb(corpus == 'cached', lmdb_map_size)
if corpus != 'cached':
if corpus.is_empty():
raise NotSupportedException(
'Cannot train nn_ensemble project with no documents')
with env.begin(write=True, buffers=True) as txn:
seq = LMDBSequence(txn, batch_size=32)
self._corpus_to_vectors(corpus, seq, n_jobs)
else:
self.info("Reusing cached training data from previous run.")
# fit the model using a read-only view of the LMDB
self.info("Training neural network model...")
with env.begin(buffers=True) as txn:
seq = LMDBSequence(txn, batch_size=32)
self._model.fit(seq, verbose=True, epochs=epochs)
annif.util.atomic_save(
self._model,
self.datadir,
self.MODEL_FILE)
def _learn(self, corpus, params):
self.initialize()
self._fit_model(
corpus,
int(params['learn-epochs']),
int(params['lmdb_map_size']))
```
#### File: Annif/annif/config.py
```python
import os.path
import configparser
import tomli
import annif
import annif.util
from glob import glob
from annif.exception import ConfigurationException
logger = annif.logger
class AnnifConfigCFG:
"""Class for reading configuration in CFG/INI format"""
def __init__(self, filename):
self._config = configparser.ConfigParser()
self._config.optionxform = annif.util.identity
with open(filename, encoding='utf-8-sig') as projf:
try:
logger.debug(
f"Reading configuration file {filename} in CFG format")
self._config.read_file(projf)
except (configparser.DuplicateOptionError,
configparser.DuplicateSectionError) as err:
raise ConfigurationException(err)
@property
def project_ids(self):
return self._config.sections()
def __getitem__(self, key):
return self._config[key]
class AnnifConfigTOML:
"""Class for reading configuration in TOML format"""
def __init__(self, filename):
with open(filename, "rb") as projf:
try:
logger.debug(
f"Reading configuration file {filename} in TOML format")
self._config = tomli.load(projf)
except tomli.TOMLDecodeError as err:
raise ConfigurationException(
f"Parsing TOML file '{filename}' failed: {err}")
@property
def project_ids(self):
return self._config.keys()
def __getitem__(self, key):
return self._config[key]
class AnnifConfigDirectory:
"""Class for reading configuration from directory"""
def __init__(self, directory):
files = glob(os.path.join(directory, '*.cfg'))
files.extend(glob(os.path.join(directory, '*.toml')))
logger.debug(f"Reading configuration files in directory {directory}")
self._config = dict()
for file in files:
source_config = parse_config(file)
for proj_id in source_config.project_ids:
self._check_duplicate_project_ids(proj_id, file)
self._config[proj_id] = source_config[proj_id]
def _check_duplicate_project_ids(self, proj_id, file):
if proj_id in self._config:
# Error message resembles configparser's DuplicateSection message
raise ConfigurationException(
f'While reading from "{file}": project ID "{proj_id}" already '
'exists in another configuration file in the directory.')
@property
def project_ids(self):
return self._config.keys()
def __getitem__(self, key):
return self._config[key]
def check_config(projects_config_path):
if os.path.exists(projects_config_path):
return projects_config_path
else:
logger.warning(
'Project configuration file or directory ' +
f'"{projects_config_path}" is missing. Please provide one. ' +
'You can set the path to the project configuration ' +
'using the ANNIF_PROJECTS environment ' +
'variable or the command-line option "--projects".')
return None
def find_config():
for path in ('projects.cfg', 'projects.toml', 'projects.d'):
if os.path.exists(path):
return path
logger.warning(
'Could not find project configuration ' +
'"projects.cfg", "projects.toml" or "projects.d". ' +
'You can set the path to the project configuration ' +
'using the ANNIF_PROJECTS environment ' +
'variable or the command-line option "--projects".')
return None
def parse_config(projects_config_path):
if projects_config_path:
projects_config_path = check_config(projects_config_path)
else:
projects_config_path = find_config()
if not projects_config_path: # not found
return None
if os.path.isdir(projects_config_path):
return AnnifConfigDirectory(projects_config_path)
elif projects_config_path.endswith('.toml'): # TOML format
return AnnifConfigTOML(projects_config_path)
else: # classic CFG/INI style format
return AnnifConfigCFG(projects_config_path)
```
#### File: Annif/annif/datadir.py
```python
import os
import os.path
class DatadirMixin:
"""Mixin class for types that need a data directory for storing files"""
def __init__(self, datadir, typename, identifier):
self._datadir_path = os.path.join(datadir, typename, identifier)
@property
def datadir(self):
if not os.path.exists(self._datadir_path):
try:
os.makedirs(self._datadir_path)
except FileExistsError:
# apparently the datadir was created by another thread!
pass
return self._datadir_path
```
#### File: Annif/annif/exception.py
```python
from click import ClickException
class AnnifException(ClickException):
"""Base Annif exception. We define this as a subclass of ClickException so
that the CLI can automatically handle exceptions. This exception cannot be
instantiated directly - subclasses should be used instead."""
def __init__(self, message, project_id=None, backend_id=None):
super().__init__(message)
self.project_id = project_id
self.backend_id = backend_id
if self.prefix is None:
raise TypeError("Cannot instantiate exception without a prefix.")
# subclasses should set this to a descriptive prefix
prefix = None
def format_message(self):
if self.project_id is not None:
return "{} project '{}': {}".format(self.prefix,
self.project_id,
self.message)
if self.backend_id is not None:
return "{} backend '{}': {}".format(self.prefix,
self.backend_id,
self.message)
return "{}: {}".format(self.prefix, self.message)
class NotInitializedException(AnnifException):
"""Exception raised for attempting to use a project or backend that
cannot be initialized, most likely since it is not yet functional
because of lack of vocabulary or training."""
prefix = "Couldn't initialize"
class ConfigurationException(AnnifException):
"""Exception raised when a project or backend is misconfigured."""
prefix = "Misconfigured"
class NotSupportedException(AnnifException):
"""Exception raised when an operation is not supported by a project or
backend."""
prefix = "Not supported"
class OperationFailedException(AnnifException):
"""Exception raised when an operation fails for some unknown reason."""
prefix = "Operation failed"
```
#### File: Annif/annif/parallel.py
```python
import multiprocessing
import multiprocessing.dummy
class BaseWorker:
"""Base class for workers that implement tasks executed via
multiprocessing. The init method can be used to store data objects that
are necessary for the operation. They will be stored in a class
attribute that is accessible to the static worker method. The storage
solution is inspired by this blog post:
https://thelaziestprogrammer.com/python/multiprocessing-pool-a-global-solution # noqa
"""
args = None
@classmethod
def init(cls, args):
cls.args = args # pragma: no cover
class ProjectSuggestMap:
"""A utility class that can be used to wrap one or more projects and
provide a mapping method that converts Document objects to suggestions.
Intended to be used with the multiprocessing module."""
def __init__(
self,
registry,
project_ids,
backend_params,
limit,
threshold):
self.registry = registry
self.project_ids = project_ids
self.backend_params = backend_params
self.limit = limit
self.threshold = threshold
def suggest(self, doc):
filtered_hits = {}
for project_id in self.project_ids:
project = self.registry.get_project(project_id)
hits = project.suggest(doc.text, self.backend_params)
filtered_hits[project_id] = hits.filter(
project.subjects, self.limit, self.threshold)
return (filtered_hits, doc.uris, doc.labels)
def get_pool(n_jobs):
"""return a suitable multiprocessing pool class, and the correct jobs
argument for its constructor, for the given amount of parallel jobs"""
if n_jobs < 1:
n_jobs = None
pool_class = multiprocessing.Pool
elif n_jobs == 1:
# use the dummy wrapper around threading to avoid subprocess overhead
pool_class = multiprocessing.dummy.Pool
else:
pool_class = multiprocessing.Pool
return n_jobs, pool_class
```
#### File: Annif/annif/project.py
```python
import enum
import os.path
from shutil import rmtree
import annif
import annif.transform
import annif.analyzer
import annif.corpus
import annif.suggestion
import annif.backend
import annif.vocab
from annif.datadir import DatadirMixin
from annif.exception import AnnifException, ConfigurationException, \
NotSupportedException, NotInitializedException
logger = annif.logger
class Access(enum.IntEnum):
"""Enumeration of access levels for projects"""
private = 1
hidden = 2
public = 3
class AnnifProject(DatadirMixin):
"""Class representing the configuration of a single Annif project."""
# defaults for uninitialized instances
_transform = None
_analyzer = None
_backend = None
_vocab = None
initialized = False
# default values for configuration settings
DEFAULT_ACCESS = 'public'
def __init__(self, project_id, config, datadir, registry):
DatadirMixin.__init__(self, datadir, 'projects', project_id)
self.project_id = project_id
self.name = config.get('name', project_id)
self.language = config['language']
self.analyzer_spec = config.get('analyzer', None)
self.transform_spec = config.get('transform', 'pass')
self.vocab_id = config.get('vocab', None)
self.config = config
self._base_datadir = datadir
self.registry = registry
self._init_access()
def _init_access(self):
access = self.config.get('access', self.DEFAULT_ACCESS)
try:
self.access = getattr(Access, access)
except AttributeError:
raise ConfigurationException(
"'{}' is not a valid access setting".format(access),
project_id=self.project_id)
def _initialize_analyzer(self):
if not self.analyzer_spec:
return # not configured, so assume it's not needed
analyzer = self.analyzer
logger.debug("Project '%s': initialized analyzer: %s",
self.project_id,
str(analyzer))
def _initialize_subjects(self):
try:
subjects = self.subjects
logger.debug("Project '%s': initialized subjects: %s",
self.project_id,
str(subjects))
except AnnifException as err:
logger.warning(err.format_message())
def _initialize_backend(self, parallel):
logger.debug("Project '%s': initializing backend", self.project_id)
try:
if not self.backend:
logger.debug("Cannot initialize backend: does not exist")
return
self.backend.initialize(parallel)
except AnnifException as err:
logger.warning(err.format_message())
def initialize(self, parallel=False):
"""Initialize this project and its backend so that they are ready to
be used. If parallel is True, expect that the project will be used
for parallel processing."""
if self.initialized:
return
logger.debug("Initializing project '%s'", self.project_id)
self._initialize_analyzer()
self._initialize_subjects()
self._initialize_backend(parallel)
self.initialized = True
def _suggest_with_backend(self, text, backend_params):
if backend_params is None:
backend_params = {}
beparams = backend_params.get(self.backend.backend_id, {})
hits = self.backend.suggest(text, beparams)
logger.debug(
'Got %d hits from backend %s',
len(hits), self.backend.backend_id)
return hits
@property
def analyzer(self):
if self._analyzer is None:
if self.analyzer_spec:
self._analyzer = annif.analyzer.get_analyzer(
self.analyzer_spec)
else:
raise ConfigurationException(
"analyzer setting is missing", project_id=self.project_id)
return self._analyzer
@property
def transform(self):
if self._transform is None:
self._transform = annif.transform.get_transform(
self.transform_spec, project=self)
return self._transform
@property
def backend(self):
if self._backend is None:
if 'backend' not in self.config:
raise ConfigurationException(
"backend setting is missing", project_id=self.project_id)
backend_id = self.config['backend']
try:
backend_class = annif.backend.get_backend(backend_id)
self._backend = backend_class(
backend_id, config_params=self.config,
project=self)
except ValueError:
logger.warning(
"Could not create backend %s, "
"make sure you've installed optional dependencies",
backend_id)
return self._backend
@property
def vocab(self):
if self._vocab is None:
if self.vocab_id is None:
raise ConfigurationException("vocab setting is missing",
project_id=self.project_id)
self._vocab = annif.vocab.AnnifVocabulary(self.vocab_id,
self._base_datadir,
self.language)
return self._vocab
@property
def subjects(self):
return self.vocab.subjects
def _get_info(self, key):
try:
be = self.backend
if be is not None:
return getattr(be, key)
except AnnifException as err:
logger.warning(err.format_message())
return None
@property
def is_trained(self):
return self._get_info('is_trained')
@property
def modification_time(self):
return self._get_info('modification_time')
def suggest(self, text, backend_params=None):
"""Suggest subjects the given text by passing it to the backend. Returns a
list of SubjectSuggestion objects ordered by decreasing score."""
if not self.is_trained:
if self.is_trained is None:
logger.warning('Could not get train state information.')
else:
raise NotInitializedException('Project is not trained.')
logger.debug('Suggesting subjects for text "%s..." (len=%d)',
text[:20], len(text))
text = self.transform.transform_text(text)
hits = self._suggest_with_backend(text, backend_params)
logger.debug('%d hits from backend', len(hits))
return hits
def train(self, corpus, backend_params=None, jobs=0):
"""train the project using documents from a metadata source"""
if corpus != 'cached':
corpus.set_subject_index(self.subjects)
corpus = self.transform.transform_corpus(corpus)
if backend_params is None:
backend_params = {}
beparams = backend_params.get(self.backend.backend_id, {})
self.backend.train(corpus, beparams, jobs)
def learn(self, corpus, backend_params=None):
"""further train the project using documents from a metadata source"""
corpus.set_subject_index(self.subjects)
if backend_params is None:
backend_params = {}
beparams = backend_params.get(self.backend.backend_id, {})
corpus = self.transform.transform_corpus(corpus)
if isinstance(
self.backend,
annif.backend.backend.AnnifLearningBackend):
self.backend.learn(corpus, beparams)
else:
raise NotSupportedException("Learning not supported by backend",
project_id=self.project_id)
def hyperopt(self, corpus, trials, jobs, metric, results_file):
"""optimize the hyperparameters of the project using a validation
corpus against a given metric"""
if isinstance(
self.backend,
annif.backend.hyperopt.AnnifHyperoptBackend):
optimizer = self.backend.get_hp_optimizer(corpus, metric)
return optimizer.optimize(trials, jobs, results_file)
raise NotSupportedException(
"Hyperparameter optimization not supported "
"by backend", project_id=self.project_id)
def dump(self):
"""return this project as a dict"""
return {'project_id': self.project_id,
'name': self.name,
'language': self.language,
'backend': {'backend_id': self.config.get('backend')},
'is_trained': self.is_trained,
'modification_time': self.modification_time
}
def remove_model_data(self):
"""remove the data of this project"""
datadir_path = self._datadir_path
if os.path.isdir(datadir_path):
rmtree(datadir_path)
logger.info('Removed model data for project {}.'
.format(self.project_id))
else:
logger.warning('No model data to remove for project {}.'
.format(self.project_id))
```
#### File: Annif/tests/test_backend_fasttext.py
```python
import logging
import pytest
import annif.backend
import annif.corpus
from annif.exception import NotSupportedException
fasttext = pytest.importorskip("annif.backend.fasttext")
def test_fasttext_default_params(project):
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={},
project=project)
expected_default_params = {
'limit': 100,
'chunksize': 1,
'dim': 100,
'lr': 0.25,
'epoch': 5,
'loss': 'hs',
}
actual_params = fasttext.params
for param, val in expected_default_params.items():
assert param in actual_params and actual_params[param] == val
def test_fasttext_train(document_corpus, project, datadir):
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'dim': 100,
'lr': 0.25,
'epoch': 20,
'loss': 'hs'},
project=project)
fasttext.train(document_corpus)
assert fasttext._model is not None
assert datadir.join('fasttext-model').exists()
assert datadir.join('fasttext-model').size() > 0
def test_fasttext_train_cached_jobs(project, datadir):
assert datadir.join('fasttext-train.txt').exists()
datadir.join('fasttext-model').remove()
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'dim': 100,
'lr': 0.25,
'epoch': 20,
'loss': 'hs'},
project=project)
fasttext.train("cached", jobs=2)
assert fasttext._model is not None
assert datadir.join('fasttext-model').exists()
assert datadir.join('fasttext-model').size() > 0
def test_fasttext_train_unknown_subject(tmpdir, datadir, project):
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'dim': 100,
'lr': 0.25,
'epoch': 20,
'loss': 'hs'},
project=project)
tmpfile = tmpdir.join('document.tsv')
tmpfile.write("nonexistent\thttp://example.com/nonexistent\n" +
"arkeologia\thttp://www.yso.fi/onto/yso/p1265")
document_corpus = annif.corpus.DocumentFile(str(tmpfile))
fasttext.train(document_corpus)
assert fasttext._model is not None
assert datadir.join('fasttext-model').exists()
assert datadir.join('fasttext-model').size() > 0
def test_fasttext_train_nodocuments(project, empty_corpus):
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'dim': 100,
'lr': 0.25,
'epoch': 20,
'loss': 'hs'},
project=project)
with pytest.raises(NotSupportedException) as excinfo:
fasttext.train(empty_corpus)
assert 'training backend fasttext with no documents' in str(excinfo.value)
def test_train_fasttext_params(document_corpus, project, caplog):
logger = annif.logger
logger.propagate = True
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 51,
'dim': 101,
'lr': 0.21,
'epoch': 21,
'loss': 'hs'},
project=project)
params = {'dim': 1, 'lr': 42.1, 'epoch': 0}
with caplog.at_level(logging.DEBUG):
fasttext.train(document_corpus, params)
parameters_heading = 'Backend fasttext: Model parameters:'
assert parameters_heading in caplog.text
for line in caplog.text.splitlines():
if parameters_heading in line:
assert "'dim': 1" in line
assert "'lr': 42.1" in line
assert "'epoch': 0" in line
def test_fasttext_train_pretrained(datadir, document_corpus, project,
pretrained_vectors):
assert pretrained_vectors.exists()
assert pretrained_vectors.size() > 0
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'dim': 100,
'lr': 0.25,
'epoch': 20,
'loss': 'hs',
'pretrainedVectors': str(pretrained_vectors)},
project=project)
fasttext.train(document_corpus)
assert fasttext._model is not None
assert datadir.join('fasttext-model').exists()
assert datadir.join('fasttext-model').size() > 0
def test_fasttext_train_pretrained_wrong_dim(datadir, document_corpus, project,
pretrained_vectors):
assert pretrained_vectors.exists()
assert pretrained_vectors.size() > 0
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'dim': 50,
'lr': 0.25,
'epoch': 20,
'loss': 'hs',
'pretrainedVectors': str(pretrained_vectors)},
project=project)
with pytest.raises(ValueError):
fasttext.train(document_corpus)
assert fasttext._model is None
def test_fasttext_suggest(project):
fasttext_type = annif.backend.get_backend("fasttext")
fasttext = fasttext_type(
backend_id='fasttext',
config_params={
'limit': 50,
'chunksize': 1,
'dim': 100,
'lr': 0.25,
'epoch': 20,
'loss': 'hs'},
project=project)
results = fasttext.suggest("""Arkeologiaa sanotaan joskus myös
muinaistutkimukseksi tai muinaistieteeksi. Se on humanistinen tiede
tai oikeammin joukko tieteitä, jotka tutkivat ihmisen menneisyyttä.
Tutkimusta tehdään analysoimalla muinaisjäännöksiä eli niitä jälkiä,
joita ihmisten toiminta on jättänyt maaperään tai vesistöjen
pohjaan.""")
assert len(results) > 0
hits = results.as_list(project.subjects)
assert 'http://www.yso.fi/onto/yso/p1265' in [
result.uri for result in hits]
assert 'arkeologia' in [result.label for result in hits]
```
#### File: Annif/tests/test_rest.py
```python
import annif.rest
def test_rest_list_projects(app):
with app.app_context():
result = annif.rest.list_projects()
project_ids = [proj['project_id'] for proj in result['projects']]
# public project should be returned
assert 'dummy-fi' in project_ids
# hidden project should not be returned
assert 'dummy-en' not in project_ids
# private project should not be returned
assert 'dummydummy' not in project_ids
# project with no access level setting should be returned
assert 'ensemble' in project_ids
def test_rest_show_project_public(app):
# public projects should be accessible via REST
with app.app_context():
result = annif.rest.show_project('dummy-fi')
assert result['project_id'] == 'dummy-fi'
def test_rest_show_project_hidden(app):
# hidden projects should be accessible if you know the project id
with app.app_context():
result = annif.rest.show_project('dummy-en')
assert result['project_id'] == 'dummy-en'
def test_rest_show_project_private(app):
# private projects should not be accessible via REST
with app.app_context():
result = annif.rest.show_project('dummydummy')
assert result.status_code == 404
def test_rest_show_project_nonexistent(app):
with app.app_context():
result = annif.rest.show_project('nonexistent')
assert result.status_code == 404
def test_rest_suggest_public(app):
# public projects should be accessible via REST
with app.app_context():
result = annif.rest.suggest(
'dummy-fi',
text='example text',
limit=10,
threshold=0.0)
assert 'results' in result
def test_rest_suggest_hidden(app):
# hidden projects should be accessible if you know the project id
with app.app_context():
result = annif.rest.suggest(
'dummy-en',
text='example text',
limit=10,
threshold=0.0)
assert 'results' in result
def test_rest_suggest_private(app):
# private projects should not be accessible via REST
with app.app_context():
result = annif.rest.suggest(
'dummydummy',
text='example text',
limit=10,
threshold=0.0)
assert result.status_code == 404
def test_rest_suggest_nonexistent(app):
with app.app_context():
result = annif.rest.suggest(
'nonexistent',
text='example text',
limit=10,
threshold=0.0)
assert result.status_code == 404
def test_rest_suggest_novocab(app):
with app.app_context():
result = annif.rest.suggest(
'novocab',
text='example text',
limit=10,
threshold=0.0)
assert result.status_code == 503
def test_rest_suggest_with_notations(app):
with app.app_context():
result = annif.rest.suggest(
'dummy-fi',
text='example text',
limit=10,
threshold=0.0)
assert result['results'][0]['notation'] is None
def test_rest_learn_empty(app):
with app.app_context():
response = annif.rest.learn('dummy-en', [])
assert response == (None, 204) # success, no output
def test_rest_learn(app):
documents = [{'text': 'the quick brown fox',
'subjects': [{'uri': 'http://example.org/fox',
'label': 'fox'}]}]
with app.app_context():
response = annif.rest.learn('dummy-en', documents)
assert response == (None, 204) # success, no output
result = annif.rest.suggest(
'dummy-en',
text='example text',
limit=10,
threshold=0.0)
assert 'results' in result
assert result['results'][0]['uri'] == 'http://example.org/fox'
assert result['results'][0]['label'] == 'fox'
def test_rest_learn_novocab(app):
with app.app_context():
result = annif.rest.learn('novocab', [])
assert result.status_code == 503
def test_rest_learn_nonexistent(app):
with app.app_context():
result = annif.rest.learn('nonexistent', [])
assert result.status_code == 404
def test_rest_learn_not_supported(app):
with app.app_context():
result = annif.rest.learn('tfidf-fi', [])
assert result.status_code == 503
```
#### File: Annif/tests/test_transform_langfilter.py
```python
import pytest
import annif.transform
pytest.importorskip("annif.transform.langfilter")
def test_lang_filter(project):
transf = annif.transform.get_transform("filter_lang", project)
text = """
Kansalliskirjasto on kaikille avoin kulttuuriperintöorganisaatio, joka
palvelee valtakunnallisesti kansalaisia, tiedeyhteisöjä ja muita
yhteiskunnan toimijoita.
The National Library of Finland is the oldest and largest scholarly
library in Finland. It is responsible for the collection, description,
preservation and accessibility of Finland’s published national heritage
and the unique collections under its care.
Nationalbiblioteket är Finlands största och äldsta vetenskapliga
bibliotek, som ansvarar för utökning, beskrivning, förvaring och
tillhandahållande av vårt nationella publikationsarv och av sina unika
samlingar.
Abc defghij klmnopqr stuwxyz abc defghij klmnopqr stuwxyz.
Turvaamme Suomessa julkaistun tai Suomea koskevan julkaistun
kulttuuriperinnön saatavuuden sekä välittämme ja tuotamme
tietosisältöjä tutkimukselle, opiskelulle, kansalaisille ja
yhteiskunnalle. Kehitämme palveluja yhteistyössä kirjastojen,
arkistojen, museoiden ja muiden toimijoiden kanssa.
"""
text = ' '.join(text.split())
text_filtered = """
Kansalliskirjasto on kaikille avoin kulttuuriperintöorganisaatio, joka
palvelee valtakunnallisesti kansalaisia, tiedeyhteisöjä ja muita
yhteiskunnan toimijoita.
Abc defghij klmnopqr stuwxyz abc defghij klmnopqr stuwxyz.
Turvaamme Suomessa julkaistun tai Suomea koskevan julkaistun
kulttuuriperinnön saatavuuden sekä välittämme ja tuotamme
tietosisältöjä tutkimukselle, opiskelulle, kansalaisille ja
yhteiskunnalle. Kehitämme palveluja yhteistyössä kirjastojen,
arkistojen, museoiden ja muiden toimijoiden kanssa.
"""
text_filtered = ' '.join(text_filtered.split())
assert transf.transform_text(text) == text_filtered
def test_lang_filter_text_min_length(project):
text = "This is just some non-Finnish text of 52 characters."
transf = annif.transform.get_transform("filter_lang", project)
assert transf.transform_text(text) == text
# Set a short text_min_length to apply language filtering:
transf = annif.transform.get_transform(
"filter_lang(text_min_length=50)", project)
assert transf.transform_text(text) == ""
def test_lang_filter_sentence_min_length(project):
text = "This is a non-Finnish sentence of 42 chars. And this of 20 chars."
transf = annif.transform.get_transform(
"filter_lang(text_min_length=50)", project)
assert transf.transform_text(text) == text
# Set a short sentence_min_length to apply language filtering:
transf = annif.transform.get_transform(
"filter_lang(text_min_length=50,sentence_min_length=30)", project)
assert transf.transform_text(text) == "And this of 20 chars."
``` |
{
"source": "jimfhahn/ils-middleware",
"score": 2
} |
#### File: tasks/folio/login.py
```python
import json
from ils_middleware.tasks.folio.request import FolioRequest
from airflow.providers.http.operators.http import SimpleHttpOperator
def FolioLogin(**kwargs) -> SimpleHttpOperator:
"""Logs into FOLIO and returns Okapi token."""
username = kwargs.get("username")
password = kwargs.get("password")
return FolioRequest(
**kwargs,
task_id="folio_login",
endpoint="authn/login",
response_filter=lambda response: response.json().get("x-okapi-token"),
data=json.dumps({"username": username, "password": password}),
)
```
#### File: tasks/folio/request.py
```python
from airflow.providers.http.operators.http import SimpleHttpOperator
def FolioRequest(**kwargs) -> SimpleHttpOperator:
tenant = kwargs.get("tenant")
conn_id = kwargs.get("conn_id")
dag = kwargs.get("dag")
token = kwargs.get("token")
response_filter = kwargs.get("response_filter")
method = kwargs.get("method")
endpoint = kwargs.get("endpoint")
data = kwargs.get("data")
task_id = kwargs.get("task_id", "folio_request")
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-okapi-tenant": tenant,
}
if token:
headers["x-okapi-token"] = token
return SimpleHttpOperator(
task_id=task_id,
http_conn_id=conn_id,
method=method,
headers=headers,
data=data,
endpoint=endpoint,
dag=dag,
response_filter=response_filter,
)
```
#### File: tasks/symphony/login.py
```python
import json
from ils_middleware.tasks.symphony.request import SymphonyRequest
from airflow.providers.http.operators.http import SimpleHttpOperator
def SymphonyLogin(**kwargs) -> SimpleHttpOperator:
login = kwargs.get("login")
password = kwargs.get("password")
return SymphonyRequest(
**kwargs,
task_id="login_symphony",
data=json.dumps({"login": login, "password": password}),
headers={"Content-Type": "application/json", "Accept": "application/json"},
endpoint="user/staff/login",
filter=lambda response: response.json().get("sessionToken")
)
```
#### File: tasks/amazon/test_sqs.py
```python
import pytest
from datetime import datetime
from airflow import DAG
from airflow.models import Variable
from ils_middleware.tasks.amazon.sqs import SubscribeOperator
@pytest.fixture
def test_dag():
start_date = datetime(2021, 9, 16)
return DAG("test_dag", default_args={"owner": "airflow", "start_date": start_date})
@pytest.fixture
def mock_variable(monkeypatch):
def mock_get(key, default=None):
if key == "SQS_STAGE":
return "http://aws.com/12345/"
monkeypatch.setattr(Variable, "get", mock_get)
def test_subscribe_operator_missing_kwargs(test_dag, mock_variable):
"""Test missing kwargs for SubscribeOperator."""
task = SubscribeOperator(dag=test_dag)
assert task is not None
assert task.sqs_queue == "None"
assert task.aws_conn_id == "aws_sqs_dev"
def test_subscribe_operator(test_dag, mock_variable):
"""Test with typical kwargs for SubscribeOperator."""
task = SubscribeOperator(queue="stanford-ils", sinopia_env="stage", dag=test_dag)
assert task.sqs_queue.startswith("http://aws.com/12345/stanford-ils")
assert task.aws_conn_id == "aws_sqs_stage"
```
#### File: tasks/folio/test_login.py
```python
import pytest
from datetime import datetime
from airflow import DAG
from ils_middleware.tasks.folio.login import FolioLogin
@pytest.fixture
def test_dag():
start_date = datetime(2021, 9, 20)
return DAG("test_dag", default_args={"owner": "airflow", "start_date": start_date})
def test_subscribe_operator_missing_kwargs(test_dag):
"""Test missing kwargs for SubscribeOperator."""
task = FolioLogin(dag=test_dag)
assert task.http_conn_id is None
def test_subscribe_operator(test_dag):
"""Test with typical kwargs for SubscribeOperator."""
task = FolioLogin(
conn_id="folio_dev_login", username="DEVSYS", password="<PASSWORD>", dag=test_dag
)
assert task.http_conn_id.startswith("folio_dev_login")
assert "DEVSYS" in task.data
```
#### File: tasks/symphony/test_new.py
```python
from datetime import datetime
import pytest
from airflow import DAG
from ils_middleware.tasks.symphony.new import NewMARCtoSymphony
@pytest.fixture
def test_dag():
start_date = datetime(2021, 9, 20)
return DAG("test_dag", default_args={"owner": "airflow", "start_date": start_date})
def test_NewMARCtoSymphony(test_dag):
"""Tests NewMARCtoSymphony"""
task = NewMARCtoSymphony(
conn_id="symphony_dev_login",
session_token="<KEY>",
library_key="GREEN",
marc_json={"leader": "basdfdaf adf", "fields": [{"tag": "245"}]},
dag=test_dag,
)
assert task.http_conn_id.startswith("symphony_dev_login")
assert task.endpoint.startswith("catalog/bib")
assert "basdfdaf" in task.data
``` |
{
"source": "jimfinoc/superClock",
"score": 3
} |
#### File: jimfinoc/superClock/superClock.py
```python
import urllib
import urllib2
import sys
import json
import time
import datetime
import requests
from Adafruit_7SegmentPlus import SevenSegment
from optparse import OptionParser
import myColorText
import commands
# Make sure your higher level directory has the JSON file called passwordFile.json
# The file should contain the information in the JSON format. See below for an example
# {"username": "<EMAIL>", "password": "<PASSWORD>!!!"}
# all temps from the Nest site are stored in degrees Celsius
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) #for using PrintColor
fileData = open('../passwordFile.json')
usernameAndPassword = json.load(fileData)
valueTimeDate = None
#print "username:" + str(usernameAndPassword['username'])
#print "password:" + str(usernameAndPassword['password'])
print "Press CTRL+Z to exit"
class Zone(datetime.tzinfo):
def __init__(self,offset,isdst,name):
self.offset = offset
self.isdst = isdst
self.name = name
def utcoffset(self, dt):
return datetime.timedelta(hours=self.offset) + self.dst(dt)
def dst(self, dt):
return datetime.timedelta(hours=1) if self.isdst else datetime.timedelta(0)
def tzname(self,dt):
return self.name
GMT = Zone(0,False,'GMT')
# True if DST is on
# Fales if now DST
EST = Zone(-5,True,'EST')
print datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M:%S %Z')
print datetime.datetime.now(GMT).strftime('%m/%d/%Y %H:%M:%S %Z')
print datetime.datetime.now(EST).strftime('%m/%d/%Y %H:%M:%S %Z')
def c_to_f(c):
return c * 9.0 / 5.0 + 32.0
class Nest:
def __init__(self, username, password, serial=None, index=0):
self.username = username
self.password = password
self.serial = serial
self.index = index
def loads(self, res):
if hasattr(json, "loads"):
res = json.loads(res)
else:
res = json.read(res)
return res
def login(self):
data = urllib.urlencode({"username": self.username, "password": self.password})
req = urllib2.Request("https://home.nest.com/user/login",
data,
{"user-agent":"Nest/1.1.0.10 CFNetwork/548.0.4"})
res = urllib2.urlopen(req).read()
res = self.loads(res)
self.transport_url = res["urls"]["transport_url"]
self.access_token = res["access_token"]
self.userid = res["userid"]
def get_status(self):
req = urllib2.Request(self.transport_url + "/v2/mobile/user." + self.userid,
headers={"user-agent":"Nest/1.1.0.10 CFNetwork/548.0.4",
"Authorization":"Basic " + self.access_token,
"X-nl-user-id": self.userid,
"X-nl-protocol-version": "1"})
res = urllib2.urlopen(req).read()
res = self.loads(res)
self.structure_id = res["structure"].keys()[0]
if (self.serial is None):
self.device_id = res["structure"][self.structure_id]["devices"][self.index]
self.serial = self.device_id.split(".")[1]
self.status = res
def show_status(self):
shared = self.status["shared"][self.serial]
device = self.status["device"][self.serial]
allvars = shared
allvars.update(device)
for k in sorted(allvars.keys()):
print k + "."*(32-len(k)) + ":", allvars[k]
# This assumes you have two Nest Thermostats. If you have more than 2, the number, index, after "None"
# below will increment accordingly. If you only have one, it should just be 0. You have to create an object
# for each nest thermostat. You could also specify the thermostats by serial number instead of the index.
def displayTemperature(segment = SevenSegment(address=0x70), temperature = None):
"this will display the temperature on the specific segment"
segment.disp.clear()
if (temperature==None):
segment.disp.clear()
segment.writeDigit(4, 0xF)
return False
else:
segment.writeDigit(0, int(temperature) / 10) # Tens
segment.writeDigit(1, int(temperature) % 10, True) # Ones
segment.writeDigit(3, int(temperature) * 10 % 10) # Tenths
segment.writeDigit(4, 0xF) # F
return True
def displayHumidity(segment = SevenSegment(address=0x70), humidity = None):
"this will display the humidity on the specific segment"
segment.disp.clear()
if (humidity==None):
# segment.writeDigit(0, 0xF)
segment.setSpecialH(0)
return False
else:
segment.setSpecialH(0) # displays an H in the 0 position
# segment.writeDigit(1, int(temperature) % 10, True) # blank
segment.writeDigit(3, int(humidity) / 10) # Tens
segment.writeDigit(4, int(humidity) % 10) # Ones
return True
def displayTime(segment = SevenSegment(address=0x70),valueTimeDate = None):
"this will display the time on the specific segment"
segment.disp.clear()
if (valueTimeDate==None):
segment.disp.clear()
return False
else:
segment.writeDigit(0, int(valueTimeDate.strftime('%H')[0])) # Thousand
segment.writeDigit(1, int(valueTimeDate.strftime('%H')[1])) # Hundred
# segment.writeDigit(2, 0xFFFF) # turn on colon
segment.writeDigit(2, 0) # turn on colon
segment.writeDigit(3, int(valueTimeDate.strftime('%M')[0])) # Ten
segment.writeDigit(4, int(valueTimeDate.strftime('%M')[1])) # Ones
return True
def displayMonthDay(segment = SevenSegment(address=0x70),valueTimeDate = None):
"this will display the day and month on the specific segment"
if (valueTimeDate==None):
segment.disp.clear()
return False
else:
segment.writeDigit(0, int(valueTimeDate.strftime('%m')[0])) # month tens
segment.writeDigit(1, int(valueTimeDate.strftime('%m')[1]),True) # month plus a decimal
# segment.writeDigit(2, 0xFFFF) # turn off colon
segment.writeDigit(3, int(valueTimeDate.strftime('%d')[0])) # day tens
segment.writeDigit(4, int(valueTimeDate.strftime('%d')[1])) # day
return True
def displayYear(segment = SevenSegment(address=0x70),valueTimeDate = None):
"this will display the year on the specific segment"
# print valueTimeDate
if (valueTimeDate==None):
segment.disp.clear()
return False
else:
segment.writeDigit(0, int(valueTimeDate.strftime('%Y')[0])) # Thousand
segment.writeDigit(1, int(valueTimeDate.strftime('%Y')[1])) # Hundred
# segment.writeDigit(2, 0) # turn off colon
segment.writeDigit(3, int(valueTimeDate.strftime('%Y')[2])) # Ten
segment.writeDigit(4, int(valueTimeDate.strftime('%Y')[3])) # Ones
return True
def create_parser():
parser = OptionParser(usage="superClock [options] command [command_options] [command_args]",
description="Commands: help",
version="1")
# parser.add_option("-c", "--celsius", dest="celsius", action="store_true", default=False,
# help="use celsius instead of farenheit")
return parser
def help():
print "syntax: superClock [options]"
print "options:"
print " --celsius ... Celsius instead of Farenheit. not yet implemented."
print ""
print "commands: help, onetime, repeat"
print " help ... this menu"
print " onetime ... default command (with help if omited)"
print " repeat ... runs forever"
#future development would allow a finite repeat numnber and a seperate mode for continous
print ""
print "examples:"
print " superClock.py help"
print " superClock.py onetime"
print " superClock.py repeat"
print ""
def main():
cmd = ""
parser = create_parser()
(opts, args) = parser.parse_args()
try:
# localip = socket.gethostbyname(socket.gethostname())
localip = commands.getoutput("hostname -I")
except:
localip = "No ip addr"
print localip
if (len(args)==0):
help()
cmd = "onetime"
else:
cmd = args[0]
print cmd
if (cmd=="help"):
help()
sys.exit(-1)
try:
print "Initalizing the displays"
segmentLevelBase = SevenSegment(address=0x70)
segmentLevelZero = SevenSegment(address=0x72)
segmentLevelOne = SevenSegment(address=0x74)
print " Setting brightness"
segmentLevelBase.disp.setBrightness(10)
segmentLevelZero.disp.setBrightness(10)
segmentLevelOne.disp.setBrightness(10)
except:
print "could not initalize the three seven segment displays"
sys.exit(-1)
print ""
print "Trying to get data from the Nest Web"
try:
print "My Nest Data"
n0 = Nest(usernameAndPassword['username'],usernameAndPassword['password'], None, 0) #Level Zero
n1 = Nest(usernameAndPassword['username'],usernameAndPassword['password'], None, 1) #Level One
print " Logging On"
n1.login()
n0.login()
except:
print " Nest.com failure"
loopingQuestion = True
while (loopingQuestion):
print ""
print "In the Loop"
print ""
print "Get the current Time"
valueTimeDate = datetime.datetime.now(EST)
print valueTimeDate
try:
print ""
print "Sending time data to the external displays"
displayTime(segmentLevelOne,valueTimeDate)
displayMonthDay(segmentLevelZero,valueTimeDate)
displayYear(segmentLevelBase, valueTimeDate)
print""
print "sleeping for 4 seconds"
time.sleep(4)
except:
print "cannot write time to sensors"
print ""
try:
print " Getting Status"
n1.get_status()
n0.get_status()
levelOneTemperature = int(c_to_f(n1.status["shared"][n1.serial]["current_temperature"]))
levelOneHumidity = n1.status["device"][n1.serial]["current_humidity"]
levelZeroTemperature = c_to_f(n0.status["shared"][n0.serial]["current_temperature"])
levelZeroHumidity = n0.status["device"][n0.serial]["current_humidity"]
except:
print " Nest.com failed. Setting Level's One and Zero to None"
levelOneTemperature = None
levelOneHumidity = None
levelZeroTemperature = None
levelZeroHumidity = None
print ""
print "Getting data from the internal web device"
try:
website = "http://10.0.1.214"
print " getting the data from the site: ", website
r = requests.get(website)
print " pulling values"
try:
levelBaseTemperature = float(r.json()["Temperature"])
except:
levelBaseTemperature = None
try:
levelBaseHumidity = float(r.json()["Humidity"])
except:
levelBaseHumidity = None
try:
levelBaseTime = str(r.json()["Local Time"])
except:
levelBaseTime = None
except:
print " error pulling data from ", website
try:
print ""
print "trying to use color output"
print "Level One Temperature"
myColorText.printColor(str(levelOneTemperature), YELLOW) #colors are for readability
print "Level One Humidity"
myColorText.printColor(str(levelOneHumidity), YELLOW) #colors are for readability
print ""
print "Level Zero Temperature"
myColorText.printColor(str(levelZeroTemperature), GREEN) #colors are for readability
print "Level Zero Humidity"
myColorText.printColor(str(levelZeroHumidity), GREEN) #colors are for readability
print ""
print "Level Base Time"
myColorText.printColor(str(levelBaseTime), RED) #colors are for readability
print "Level Base Temperature"
myColorText.printColor(str(levelBaseTemperature), RED) #colors are for readability
print "The value of the webpage temp"
myColorText.printColor(str(levelBaseHumidity), RED) #colors are for readability
print ""
except:
print " cannot print in color"
try:
print "sending temp data to the external displays"
displayTemperature(segmentLevelOne,levelOneTemperature)
displayTemperature(segmentLevelZero,levelZeroTemperature)
displayTemperature(segmentLevelBase, levelBaseTemperature)
print ""
print "sleeping for 4 more seconds"
time.sleep(4)
print "sending humid data to the external displays"
displayHumidity(segmentLevelOne,levelOneHumidity)
displayHumidity(segmentLevelZero,levelZeroHumidity)
displayHumidity(segmentLevelBase, levelBaseHumidity)
print "sleeping for another 4 seconds"
time.sleep(4)
except:
print "cannot write temp or humidity data to sensors"
try:
with open('/var/www/index.html', 'w') as f:
x = {"Local ip": localip,"Local Time": datetime.datetime.now(EST).strftime('%m/%d/%Y %H:%M:%S %Z') ,"Level One Temperature": levelOneTemperature,"Level One Humidity": levelOneHumidity,"Level Zero Temperature": levelZeroTemperature,"Level Zero Humidity": levelZeroHumidity,}
# x = {"Local ip": localip, 'Local Time' : datetime.datetime.now(EST).strftime('%m/%d/%Y %H:%M:%S %Z') , 'Temperature' : tempInF , "Location": "Basement"}
json.dump(x,f)
f.closed
except:
print "cannot open file at /var/www/index.html"
print ""
print "initial routine finished"
print ""
if (cmd=="repeat"):
loopingQuestion = True
else:
loopingQuestion = False
if __name__=="__main__":
main()
``` |
{
"source": "jimflores5/SigFigs",
"score": 3
} |
#### File: jimflores5/SigFigs/SigFigsMain.py
```python
import random
from flask import Flask, request, redirect, render_template, session, flash
import cgi
from StringSigFigs import MakeNumber, RoundValue, CheckAnswer, CheckRounding, ApplySciNotation
from CalcsWithSigFigs import addValues, subtractValues, multiplyValues, divideValues, findDecimalPlaces, addWithPlaceholders, subtractWithPlaceholders
app = Flask(__name__)
app.config['DEBUG'] = True
app.secret_key = 'yrtsimehc'
@app.route('/')
def index():
session.clear()
return render_template('index.html',title="Sig Fig Practice")
@app.route('/countingsf', methods=['POST', 'GET'])
def countingsf():
if request.method == 'POST':
answer = request.form['answer']
actualSigFigs = request.form['actualSigFigs']
value = request.form['value']
if answer==actualSigFigs:
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('countingSigFigs.html', value=value, sigFigs = actualSigFigs, answer = answer)
sigFigs = random.randrange(1,7)
power = random.randrange(-5,9)
value = MakeNumber(sigFigs,power)
return render_template('countingSigFigs.html',title="Counting Sig Figs", value=value, sigFigs = sigFigs)
@app.route('/roundingsf', methods=['POST', 'GET'])
def roundingsf():
if request.method == 'POST':
answer = request.form['answer']
origValue = request.form['value']
sigFigs = int(request.form['sigFigs'])
roundedValue = RoundValue(origValue, sigFigs)
if CheckAnswer(roundedValue, answer):
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('roundingSigFigs.html', value=origValue, sigFigs = sigFigs, answer = answer, roundedValue=roundedValue)
iffyValue = True
while iffyValue:
sigFigs = random.randrange(1,7)
power = random.randrange(-4,6)
value = MakeNumber(9,power)
result = RoundValue(value, sigFigs)
iffyValue = CheckRounding(result,sigFigs)
return render_template('roundingSigFigs.html',title="Rounding Sig Figs", value=value, sigFigs = sigFigs)
@app.route('/sfcalcs', methods=['POST', 'GET'])
def sfcalcs():
if request.method == 'POST':
answer = request.form['answer']
result = request.form['result']
value1 = request.form['value1']
value2 = request.form['value2']
operation = request.form['operation']
if CheckAnswer(result, answer):
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, result = result, answer = answer, operation=operation)
operators = ['+', '-', 'x', '/']
operation = random.randrange(4) #Randomly select +, -, * or / using integers 0 - 3, respectively.
if operation < 2: #For + and -, create 2 values between 0.001 and 90 with 1 - 6 sig figs.
iffyValue = True
while iffyValue:
sigFigs = random.randrange(1,7)
power = random.randrange(-3,2)
value = MakeNumber(sigFigs,power)
iffyValue = CheckRounding(value,sigFigs)
sigFigs1 = sigFigs
power1 = power
value1 = value
iffyValue = True
while iffyValue:
sigFigs = random.randrange(1,7)
power = random.randrange(-3,2)
value = MakeNumber(sigFigs,power)
iffyValue = CheckRounding(value,sigFigs)
sigFigs2 = sigFigs
power2 = power
value2 = value
else: #For * and /, create 2 values between 0.01 and 900 with 1 - 6 sig figs.
sigFigs1 = random.randrange(1,7)
power1 = random.randrange(-2,3)
value1 = MakeNumber(sigFigs1,power1)
sigFigs2 = random.randrange(1,7)
power2 = random.randrange(-2,3)
value2 = MakeNumber(sigFigs2,power2)
if operation == 0:
if (float(value1)>=10 and value1.find('.') == -1 and sigFigs1 < len(value1)) or (float(value2)>=10 and value2.find('.') == -1 and sigFigs2 < len(value2)):
result = addWithPlaceholders(value1,value2)
else:
result = addValues(value1,value2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
elif operation == 1 and value1 > value2:
if (float(value1)>=10 and value1.find('.') == -1 and sigFigs1 < len(value1)) or (float(value2)>=10 and value2.find('.') == -1 and sigFigs2 < len(value2)):
result = subtractWithPlaceholders(value1,value2)
else:
result = subtractValues(value1,value2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
elif operation == 1 and float(value1) < float(value2):
if (float(value1)>=10 and value1.find('.') == -1 and sigFigs1 < len(value1)) or (float(value2)>=10 and value2.find('.') == -1 and sigFigs2 < len(value2)):
result = subtractWithPlaceholders(value2,value1)
else:
result = subtractValues(value2,value1)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value2, value2 = value1, operation = operators[operation], result = result)
elif operation == 2:
result = multiplyValues(value1,sigFigs1,value2,sigFigs2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
elif float(value1)/float(value2)<1e-4:
result = divideValues(value2,sigFigs2,value1,sigFigs1)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value2, value2 = value1, operation = operators[operation], result = result)
else:
result = divideValues(value1,sigFigs1,value2,sigFigs2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
@app.route('/scinotation', methods=['POST', 'GET'])
def scinotation():
if request.method == 'POST':
sciNot = request.form['sciNot']
if sciNot=='True': #Given a value in sci notation, the user enters a number in standard notation.
answer = request.form['answer']
result = request.form['value']
sciValue = request.form['sciValue']
power = request.form['power']
if CheckAnswer(result, answer):
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('scientificNotation.html',title="Scientific Notation", value = result, sciValue=sciValue, power = power, sciNot = True, answer = answer)
else: #Given a value in standard notation, the user enters a number in sci notation.
answer = request.form['answer']
result = request.form['value']
sciValue = request.form['sciValue']
power = request.form['power']
exponent = request.form['exponent']
if CheckAnswer(power, exponent) and CheckAnswer(sciValue,answer):
flash('Correct! :-)', 'correct')
elif CheckAnswer(power, exponent) and not CheckAnswer(sciValue,answer):
flash('Correct power. Wrong decimal value.', 'error')
elif CheckAnswer(sciValue,answer) and not CheckAnswer(power, exponent):
flash('Correct decimal value. Wrong power.', 'error')
else:
flash('Both entries are incorrect. Try again, or click to reveal the answer.', 'error')
return render_template('scientificNotation.html',title="Scientific Notation", value = result, sciValue=sciValue, power = power, sciNot = False, answer = answer, exponent = exponent)
sigFigs = random.randrange(1,5)
power = random.randrange(-5,9)
value = MakeNumber(sigFigs,power)
sciValue = ApplySciNotation(value, sigFigs)
if random.randrange(2) == 0: #Flip a coin: If '0', ask the user to change sci notation into standard notation.
return render_template('scientificNotation.html',title="Scientific Notation", value = value, sciValue=sciValue, power = power, sciNot = True)
else: #Otherwise ('1'), ask the user to change standard notation into sci notation.
return render_template('scientificNotation.html',title="Scientific Notation", value=value, sciValue=sciValue, power = power, sciNot = False)
@app.route('/sftutorial1', methods=['POST', 'GET'])
def sftutorial1():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
else:
displayText=1
return render_template('sftutorial1.html',title="Sig Fig Tutorial", page = 1, displayText=displayText)
@app.route('/sftutorial2', methods=['POST', 'GET'])
def sftutorial2():
if request.method == 'POST':
firstZeroRule = request.form['firstZeroRule']
session['firstZeroRule'] = firstZeroRule
secondHalf = True
if firstZeroRule == '':
flash('Please enter a response.', 'error')
secondHalf = False
return render_template('sftutorial2.html', answer = firstZeroRule, page = 2, secondHalf = secondHalf)
return render_template('sftutorial2.html',title="Sig Fig Tutorial", page = 2, secondHalf=False)
@app.route('/sftutorial3', methods=['POST', 'GET'])
def sftutorial3():
if request.method == 'POST':
firstZeroRule = session.get('firstZeroRule', None)
secondZeroRule = request.form['secondZeroRule']
session['secondZeroRule'] = secondZeroRule
secondHalf = True
if secondZeroRule == '':
flash('Please enter a response.', 'error')
secondHalf = False
return render_template('sftutorial3.html', firstZeroRule = firstZeroRule, secondZeroRule = secondZeroRule, page = 3, secondHalf = secondHalf)
firstZeroRule = session.get('firstZeroRule', None)
return render_template('sftutorial3.html',title="Sig Fig Tutorial", page = 3, firstZeroRule = firstZeroRule, secondHalf=False)
@app.route('/sftutorial4', methods=['POST', 'GET'])
def sftutorial4():
firstZeroRule = session.get('firstZeroRule', None)
secondZeroRule = session.get('secondZeroRule', None)
return render_template('sftutorial4.html',title="Sig Fig Tutorial", page = 4, firstZeroRule=firstZeroRule, secondZeroRule=secondZeroRule)
@app.route('/sftutorial5', methods=['POST', 'GET'])
def sftutorial5():
return render_template('sftutorial5.html',title="Sig Fig Tutorial", page = 5)
@app.route('/roundingtutorial1', methods=['POST', 'GET'])
def roundingtutorial1():
return render_template('roundingtutorial1.html',title="Rounding Tutorial", page = 1)
@app.route('/roundingtutorial2', methods=['POST', 'GET'])
def roundingtutorial2():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
roundedAnswer = request.form['5SigFigs']
answers = []
numCorrect = 0
if displayText == 4 and roundedAnswer != '12.386':
flash('Not quite correct. Try again.', 'error')
displayText = 3
elif displayText>5:
correctAnswers = ['0.00798','0.0080','0.008']
for x in range(3):
answers.append(request.form[str(3-x)+'SigFigs'])
if CheckAnswer(correctAnswers[x],answers[x]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Try again.', 'error')
else:
displayText=1
roundedAnswer = ''
answers = []
numCorrect = 0
return render_template('roundingtutorial2.html',title="Rounding Tutorial", page = 2, displayText=displayText, roundedAnswer = roundedAnswer, answers = answers, numCorrect = numCorrect)
@app.route('/roundingtutorial3', methods=['POST', 'GET'])
def roundingtutorial3():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
example3 = request.form['example3']
answers = []
numCorrect = 0
if displayText == 2 and example3 != '2380':
flash('Not quite correct. Try again.', 'error')
displayText = 1
elif displayText > 3:
correctAnswers = ['0.0998','0.10','0.1']
for x in range(3):
answers.append(request.form[str(3-x)+'SigFigs'])
if CheckAnswer(correctAnswers[x],answers[x]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Try again.', 'error')
else:
displayText=1
example3 = ''
answers = []
numCorrect = 0
return render_template('roundingtutorial3.html',title="Rounding Tutorial", page = 3, displayText=displayText, answers = answers, example3 = example3, numCorrect = numCorrect)
@app.route('/roundingtutorial4', methods=['POST', 'GET'])
def roundingtutorial4():
return render_template('roundingtutorial4.html',title="Rounding Tutorial", page = 4)
@app.route('/scinottutorial1', methods=['POST', 'GET'])
def scinottutorial1():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
if displayText == 2:
decimal = request.form['decimal']
power = request.form['exponent']
decimals = ['1.5', '15', '150', '1500']
powers = ['3','2','1','0']
if decimal in decimals:
index = decimals.index(decimal)
if power != powers[index]:
flash('Incorrect power. Try again.', 'error')
displayText = 1
else:
flash('Incorrect decimal value. Try again.', 'error')
displayText = 1
else:
decimal = ''
power = ''
else:
displayText=1
decimal = ''
power = ''
return render_template('scinottutorial1.html',title="Scientific Notation Tutorial", page = 1, displayText = displayText, decimal = decimal, exponent=power)
@app.route('/scinottutorial2', methods=['POST', 'GET'])
def scinottutorial2():
if request.method == 'POST':
decimals = []
powers = []
exponents = []
values = []
sciValues = []
numCorrect = 0
for item in range(4):
decimals.append(request.form['decimal'+str(item)])
exponents.append(request.form['exponent'+str(item)])
values.append(request.form['value'+str(item)])
powers.append(request.form['power'+str(item)])
sciValues.append(request.form['sciValue'+str(item)])
if CheckAnswer(powers[item], exponents[item]) and CheckAnswer(sciValues[item],decimals[item]):
flash('Correct! :-)', 'correct')
numCorrect += 1
elif CheckAnswer(powers[item], exponents[item]) and not CheckAnswer(sciValues[item],decimals[item]):
flash('Correct power. Wrong decimal value.', 'error')
elif CheckAnswer(sciValues[item],decimals[item]) and not CheckAnswer(powers[item], exponents[item]):
flash('Correct decimal value. Wrong power.', 'error')
else:
flash('Both entries are incorrect. Try again.', 'error')
else:
values = []
sciValues = []
powers = []
decimals = []
exponents = []
numCorrect = 0
for item in range(4):
sigFigs = random.randrange(1,5)
if item <= 1:
power = random.randrange(0,7)
else:
power = random.randrange(-5,0)
value = MakeNumber(sigFigs,power)
values.append(value)
powers.append(power)
sciValues.append(ApplySciNotation(value, sigFigs))
return render_template('scinottutorial2.html',title="Scientific Notation Tutorial", page = 2, values = values, decimals = decimals, exponents = exponents, sciValues = sciValues, powers = powers, numCorrect = numCorrect)
@app.route('/scinottutorial3', methods=['POST', 'GET'])
def scinottutorial3():
if request.method == 'POST':
values = []
sciValues = []
powers = []
answers = []
numCorrect = 0
for item in range(4):
answers.append(request.form['answer'+str(item)])
values.append(request.form['value'+str(item)])
powers.append(request.form['power'+str(item)])
sciValues.append(request.form['sciValue'+str(item)])
if ',' in answers[item]:
flash('Please remove the comma(s) from your answer.', 'error')
elif CheckAnswer(values[item], answers[item]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Oops! Try again.', 'error')
else:
values = []
sciValues = []
powers = []
answers = []
numCorrect = 0
for item in range(4):
sigFigs = random.randrange(1,5)
if item <= 1:
power = random.randrange(0,7)
else:
power = random.randrange(-5,0)
powers.append(power)
values.append(MakeNumber(sigFigs,power))
sciValues.append(ApplySciNotation(values[item], sigFigs))
return render_template('scinottutorial3.html',title="Scientific Notation Tutorial", page = 3, values = values, answers = answers, sciValues = sciValues, powers = powers, numCorrect = numCorrect)
@app.route('/scinottutorial4', methods=['POST', 'GET'])
def scinottutorial4():
return render_template('scinottutorial4.html',title="Scientific Notation Tutorial", page = 4)
@app.route('/sfcalcstutorial1', methods=['POST', 'GET'])
def sfcalcstutorial1():
if request.method == 'POST':
imageText = ['Assume two students measure the length of a small tile. Their results are not the same, but the difference is small in this case.',
'To predict the length of two tiles, the students simply double their measurements. Note that the difference (uncertainty) in their results is LARGER than before.',
'For five tiles, something interesting happens with the error in the predicted lengths.',
'The difference between the results becomes too large to keep 2 decimal places, so the guess digit moves into the tenths place.',
'What if each student calculated the area of the tile?','Multiplying measurements also increases error.','Since two digits are now uncertain, we must round each answer to maintain a single guess digit.']
displayText = int(request.form['displayText']) + 1
imageName = 'SFCalcs'+str(displayText-1)+'.png'
return render_template('sfcalcstutorial1.html',title="Calculations with Sig Figs Tutorial", page = 1, displayText = displayText, imageText = imageText, imageName = imageName)
displayText = 1
return render_template('sfcalcstutorial1.html',title="Calculations with Sig Figs Tutorial", page = 1, displayText = displayText)
@app.route('/sfcalcstutorial2', methods=['POST', 'GET'])
def sfcalcstutorial2():
if request.method == 'POST':
answers = []
results = []
values = []
numCorrect = 0
for item in range(4):
values.append(request.form['value'+str(item)])
if item < 2:
answers.append(request.form['answer'+str(item)])
results.append(request.form['result'+str(item)])
if CheckAnswer(results[item], answers[item]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Try again, or click to see the answer.', 'error')
return render_template('sfcalcstutorial2.html',title="Calculations with Sig Figs Tutorial", page = 2, values = values, answers = answers, results = results, numCorrect = numCorrect)
else:
numCorrect = 0
answers = []
sigFigs = []
powers = []
values = []
results = []
for index in range(4):
sigFigs.append(random.randrange(1,7))
powers.append(random.randrange(-2,3))
values.append(MakeNumber(sigFigs[index],powers[index]))
flip = False
if index == 1:
results.append(multiplyValues(values[index-1],sigFigs[index-1],values[index],sigFigs[index]))
elif index == 3 and float(values[index-1])/float(values[index])<1e-4:
temp = values[index-1]
values[index-1]=values[index]
values[index]=temp
results.append(divideValues(values[index-1],sigFigs[index-1],values[index],sigFigs[index]))
elif index == 3:
results.append(divideValues(values[index-1],sigFigs[index-1],values[index],sigFigs[index]))
return render_template('sfcalcstutorial2.html',title="Calculations with Sig Figs Tutorial", page = 2, values = values, sigFigs = sigFigs, powers = powers, answers = answers, flip = flip, results = results, numCorrect = numCorrect)
@app.route('/sfcalcstutorial3', methods=['POST', 'GET'])
def sfcalcstutorial3():
if request.method == 'POST':
answers = []
results = []
values = []
numCorrect = 0
for item in range(4):
values.append(request.form['value'+str(item)])
if item < 2:
answers.append(request.form['answer'+str(item)])
results.append(request.form['result'+str(item)])
if CheckAnswer(results[item], answers[item]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Try again, or click to see the answer.', 'error')
return render_template('sfcalcstutorial3.html',title="Calculations with Sig Figs Tutorial", page = 3, values = values, answers = answers, results = results, numCorrect = numCorrect)
else:
numCorrect = 0
answers = []
sigFigs = []
powers = []
values = []
results = []
for index in range(4):
iffyValue = True
while iffyValue:
sigFig = random.randrange(1,7)
power = random.randrange(-3,2)
value = MakeNumber(sigFig,power)
iffyValue = CheckRounding(value,sigFig)
sigFigs.append(sigFig)
powers.append(power)
values.append(value)
if (float(values[0])>=10 and values[0].find('.') == -1 and sigFigs[0] < len(values[0])) or (float(values[1])>=10 and values[1].find('.') == -1 and sigFigs[1] < len(values[1])):
results.append(addWithPlaceholders(values[0],values[1]))
else:
results.append(addValues(values[0],values[1]))
if float(values[2]) < float(values[3]):
values[2],values[3] = values[3],values[2]
sigFigs[2],sigFigs[3] = sigFigs[3],sigFigs[2]
if (float(values[2])>=10 and values[2].find('.') == -1 and sigFigs[2] < len(values[2])) or (float(values[3])>=10 and values[3].find('.') == -1 and sigFigs[3] < len(values[3])):
results.append(subtractWithPlaceholders(values[2],values[3]))
else:
results.append(subtractValues(values[2],values[3]))
return render_template('sfcalcstutorial3.html',title="Calculations with Sig Figs Tutorial", page = 3, values = values, sigFigs = sigFigs, powers = powers, answers = answers, results = results, numCorrect = numCorrect)
@app.route('/sfcalcstutorial4', methods=['POST', 'GET'])
def sfcalcstutorial4():
if request.method == 'POST':
displayText = int(request.form['displayText'])
response = int(request.form['response'])
example = int(request.form['example'])
if displayText == 0 and response < 2:
answer = request.form['answer']
if example == 0 and answer=='3':
response += 1
else:
flash('This is NOT a trick question. Count again...', 'error')
elif displayText <= 2 and response < 1:
response += 1
example += 1
else:
response = 0
example += 1
displayText += 1
else:
displayText = 0
example = 0
response = 0
return render_template('sfcalcstutorial4.html',title="Calculations with Sig Figs Tutorial", page = 4, displayText = displayText, example = example, response = response)
if __name__ == '__main__':
app.run()
``` |
{
"source": "jimfmunro/AdventureGame",
"score": 3
} |
#### File: jimfmunro/AdventureGame/Game.py
```python
import json
import sqlite3
import cmd
import textwrap
import shutil
import tempfile
def get_room(id, dbfile='rooms.sqlite'):
ret = None
con = sqlite3.connect(dbfile)
try:
rows = con.execute("select json from rooms where id=?", (id,))
except Exception as e:
print 'DB ERROR', e
for row in rows:
jsontext = row[0]
d = json.loads(jsontext)
d['id'] = id
ret = Room(**d)
break
con.close()
return ret
class Room():
def __init__(self, id=0, name="A Room", description="An empty room", neighbors={}):
self.id = id
self.name = name
self.description = description
self.neighbors = neighbors
def _neighbor(self, direction):
if direction in self.neighbors:
return self.neighbors[direction]
else:
return None
def north(self):
return self._neighbor('n')
def south(self):
return self._neighbor('s')
def east(self):
return self._neighbor('e')
def west(self):
return self._neighbor('w')
class Game(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.dbfile = 'rooms.sqlite' #tempfile.mktemp()
#shutil.copyfile("rooms.sqlite", self.dbfile)]
self.loc = get_room(1, self.dbfile)
self.look()
def move(self, dir):
newroom = self.loc._neighbor(dir)
if newroom is None:
print("you can't go that way")
else:
self.loc = get_room(newroom, self.dbfile)
self.look()
if newroom==13:
exit()
def look(self):
print('### %s ###' % self.loc.name)
print("")
for line in textwrap.wrap(self.loc.description, 72):
print(line)
def do_up(self, args):
"""Go up"""
self.move('up')
def do_down(self, args):
"""Go down"""
self.move('down')
def do_n(self, args):
"""Go north"""
self.move('n')
def do_s(self, args):
"""Go south"""
self.move('s')
def do_e(self, args):
"""Go east"""
self.move('e')
def do_w(self, args):
"""Go west"""
self.move('w')
def do_quit(self, args):
"""Leaves the game"""
print("Thank you for playing")
return True
def do_save(self, args):
"""Saves the game"""
shutil.copyfile(self.dbfile, args)
print("The game was saved to {0}".format(args))
if __name__ == "__main__":
g = Game()
g.cmdloop()
``` |
{
"source": "jimfmunro/cookiecutter-django",
"score": 3
} |
#### File: {{cookiecutter.repo_name}}/users/test_models.py
```python
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import User
class ModelTestCase(TestCase):
def test_model_create_admin(self):
self.admin_user = User.objects.create(email='<EMAIL>',
password="<PASSWORD>", is_superuser=True)
self.non_admin_user = User.objects.create(email='<EMAIL>',
password="<PASSWORD>", is_superuser=False)
self.assertTrue(self.admin_user.is_superuser)
self.assertFalse(self.non_admin_user.is_superuser)
``` |
{
"source": "jimforit/lagou",
"score": 2
} |
#### File: apps/enterprice/models.py
```python
from django.db import models
from db.base_model import BaseModel
from user.models import User
# Create your models here.
class EnterPrice(BaseModel):
id = models.AutoField('企业ID',primary_key=True)
name = models.CharField('企业名称',max_length=50)
product_desc = models.TextField('产品介绍')
enterprice_desc = models.TextField('产品介绍')
enterprice_gm = models.CharField(max_length=20, default="50-99人")
enterprice_type = models.CharField(max_length=20,verbose_name="所属行业",default='移动互联网')
finance_stage = models.CharField(max_length=10, verbose_name="融资阶段", default="不需要融资")
logo = models.ImageField('logo',upload_to='enterprice/%Y/%m',max_length=100)
address = models.CharField('企业地址',max_length=150,)
city = models.CharField('城市',max_length=20,)
user = models.ForeignKey(User,verbose_name="用户",on_delete=models.CASCADE)
class Meta:
verbose_name = '企业'
verbose_name_plural = verbose_name
def get_position_nums(self):
#获取发布的求职岗位数量
return self.position_set.all().count()
def __str__(self):
return self.name
```
#### File: apps/interview/models.py
```python
from django.db import models
from db.base_model import BaseModel
from job.models import Position
from enterprice.models import EnterPrice
from delivery.models import Delivery
from user.models import User
# Create your models here.
class Interview(BaseModel):
interview_status_enum=(
('YM','已面试'),
('JM','将面试'),
('LY', '录用'),
('WLY', '未录用'),
)
id = models.AutoField('面试ID',primary_key=True)
position = models.ForeignKey(Position,verbose_name = '职位',on_delete=models.CASCADE)
enterprice = models.ForeignKey(EnterPrice,verbose_name = '企业',on_delete=models.CASCADE)
user = models.ForeignKey(User,verbose_name = '候选人',on_delete=models.CASCADE)
delivery = models.ForeignKey(Delivery,verbose_name = '投递',on_delete=models.CASCADE)
interview_arrangement = models.TextField('面试安排')
interview_status = models.CharField('面试状态',choices=interview_status_enum,max_length=2,default='JM')
class Meta:
verbose_name = '面试'
verbose_name_plural = verbose_name
def __str__(self):
return self.enterprice,self.position
```
#### File: apps/interview/views.py
```python
from django.shortcuts import render,redirect,reverse
from django.views import View
from db.login_mixin import LoginRequiredMixin
from resume.models import Resume
from interview.models import Interview
from job.models import Position
# Create your views here.
class MyInterviewView(LoginRequiredMixin,View):
'''面试邀请'''
def get(self,request):
interview_ym_list = Interview.objects.filter(interview_status="YM",user=request.user)
interview_jm_list = Interview.objects.filter(interview_status="JM", user=request.user)
return render(request, 'my_interviews.html',{"interview_ym_list":interview_ym_list,"interview_jm_list":interview_jm_list})
class InterviewView(LoginRequiredMixin,View):
'''面试邀请函'''
def get(self,request,resume_id):
resume = Resume.objects.get(id=resume_id)
position_list = Position.objects.filter(enterprice_id=request.user.enterprice_set.all()[0].id)
return render(request, 'interview.html',{"resume":resume,"position_list":position_list})
class InvitationView(LoginRequiredMixin,View):
'''邀约'''
def get(self,request):
if request.user.role.name == "recruiter":
return render(request, 'profile.html')
else:
resume = Resume.objects.get(user_id=request.user.id)
if resume.is_public == 0:
block = "block"
none = "none"
class_name = "plus open"
else:
block = "none"
none = "block"
class_name = "plus"
return render(request, 'invitation.html',{"block":block,"none":none,"class_name":class_name})
class InterviewRecordView(LoginRequiredMixin,View):
'''面试邀请'''
def get(self,request):
interview_jm_list = Interview.objects.filter(interview_status="JM",enterprice=request.user.enterprice_set.all()[0])
resume_list = []
for applier in interview_jm_list:
resume = applier.user.resume_set.all()[0]
resume_list.append(resume)
interview_ym_list = Interview.objects.filter(interview_status="YM",enterprice=request.user.enterprice_set.all()[0])
for applier in interview_ym_list:
resume = applier.user.resume_set.all()[0]
resume_list.append(resume)
return render(request, 'interviews_record.html',{"interview_jm_list":interview_jm_list,"interview_ym_list":interview_ym_list,"resume_list":resume_list})
class InterviewResultView(LoginRequiredMixin,View):
'''面试结果'''
def get(self,request,interview_id):
interview_detail = Interview.objects.get(id=interview_id)
resume_info = interview_detail.user.resume_set.all()[0]
return render(request, 'interview_result.html',{"interview_detail":interview_detail,"resume_info":resume_info})
class InterviewDetailView(LoginRequiredMixin,View):
'''面试详情页面'''
def get(self,request):
return render(request, 'interview_detail.html')
class InterviewCommentView(LoginRequiredMixin,View):
'''面试评价'''
def get(self,request):
return render(request, 'interview_comment.html')
```
#### File: apps/offer/views.py
```python
from django.shortcuts import render
from django.views import View
from db.login_mixin import LoginRequiredMixin
from interview.models import Interview
# Create your views here.
class OfferView(LoginRequiredMixin,View):
'''Offer提交页面'''
def get(self,request,interview_id):
interview = Interview.objects.get(id=interview_id)
resume = interview.user.resume_set.all()[0]
return render(request, 'offer.html',{"interview":interview,"resume":resume})
class OfferDetailView(LoginRequiredMixin,View):
'''招聘需求页面'''
def get(self,request):
return render(request, 'offer_detail.html')
```
#### File: apps/user/views.py
```python
from django.shortcuts import render
from django.views import View
from .forms import RegisterForm,LoginForm
from .models import User
from .models import Role
from django.contrib.auth.hashers import make_password
from django.contrib.auth import authenticate,login,logout
from db.login_mixin import LoginRequiredMixin
# Create your views here.
class LoginView(View):
'''用户登录'''
def get(self,request):
if request.user.is_authenticated:
if request.user.nick_name:
return render(request, "profile.html", {"user": request.user})
else:
return render(request, "role.html", {"user": request.user})
return render(request, 'login.html')
def post(self,request):
form = LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data["email"]
password = form.cleaned_data["password"]
user = authenticate(username=email,password=password)
if user is not None:
login(request,user)
if request.user.nick_name:
return render(request, 'profile.html',{"user":user})
else:
return render(request, 'role.html')
else:
return render(request, 'login.html', {'message': '用户名或密码错误'})
else:
return render(request, 'login.html', {'login_form': form})
class RegisterView(View):
'''用户注册'''
def post(self, request):
form = RegisterForm(request.POST)
if form.is_valid():
user = User.objects.filter(email=request.POST.get('email', ''))
if user:
message="邮箱已经注册"
return render(request, 'register.html', {'message': message})
else:
new_user = User()
new_user.email = form.cleaned_data["email"]
new_user.password = <PASSWORD>_password(form.cleaned_data["password"])
new_user.username = new_user.email
role = Role.objects.filter(name="user").first()
if role is not None:
new_user.role_id = role
else:
role = Role()
role.name = "user"
role.save()
new_user.role_id = role.id
new_user.save()
else:
msg ={}
if "email" in form.errors.keys():
msg={"message1":"请输入有效的电邮地址"}
if "password" in form.errors.keys():
msg.update({"message2":"密码不能为空"})
print(msg)
return render(request, 'register.html', {'register_form': msg})
return render(request, 'login.html', {'message': "注册成功,请登录"})
def get(self, request):
form = RegisterForm()
return render(request, 'register.html', {"form": form})
class UserView(View):
'''用户登录'''
def get(self,request):
return render(request, 'profile.html')
def post(self,request):
return render(request,'register.html')
class LagouView(View):
'''用户登录'''
def get(self,request):
return render(request, 'lagou.html')
class LogoutView(View):
'''用户登录'''
def get(self,request):
logout(request)
return render(request, 'login.html')
class PofileView(LoginRequiredMixin,View):
'''个人信息'''
def get(self,request):
return render(request, 'profile.html')
class RoleView(LoginRequiredMixin,View):
'''角色选择'''
def get(self,request):
return render(request,"role.html")
``` |
{
"source": "jim-ftw/hip-trebek",
"score": 3
} |
#### File: jim-ftw/hip-trebek/test_trebek.py
```python
import os
import unittest
import json
import trebek
import entities
import fakeredis
import time
import datetime
# Reference this SO post on getting distances between strings:
# http://stackoverflow.com/a/1471603/98562
def get_clue_json():
with open('test-json-output.json') as json_data:
clue = json.load(json_data)
return clue
def fake_fetch_random_clue():
return entities.Question(**get_clue_json())
def fake_get_year_month():
now = datetime.datetime.now()
year, month = divmod(now.month + 1, 12)
if month == 0:
month = 12
year = year -1
next_month = datetime.datetime(now.year + year, month, 1)
return "{0}-{1}".format(next_month.year, str(next_month.month).zfill(2))
_fetch_count = 0
_invalid_clue = None
def fetch_invalid_clue():
global _fetch_count, _invalid_clue
clue = get_clue_json()
if _fetch_count == 0:
clue = _invalid_clue
_fetch_count += 1
return entities.Question(**clue)
class TestTrebek(unittest.TestCase):
def setUp(self):
d = self.get_setup_json()
self.room_message = entities.HipChatRoomMessage(**d)
self.trebek_bot = self.create_bot_with_dictionary(d)
def tearDown(self):
self.trebek_bot.redis.flushall()
def get_setup_json(self):
with open('test-room-message.json') as data:
d = json.load(data)
return d
def create_bot_with_dictionary(self, room_dictionary):
bot = trebek.Trebek(entities.HipChatRoomMessage(**room_dictionary))
bot.redis = fakeredis.FakeStrictRedis()
bot.fetch_random_clue = fake_fetch_random_clue
return bot
def create_user_scores(self, bot = None):
if bot != None:
r = bot.redis
else:
r = self.trebek_bot.redis
bot = self.trebek_bot
hipchat = trebek.Trebek.hipchat_user_key
r.set(hipchat.format(1), 'Aaron')
r.set(hipchat.format(2), 'Allen')
r.set(hipchat.format(3), 'Cordarrell')
r.set(hipchat.format(4), 'Melvin')
r.set(hipchat.format(5), 'Mark')
r.set(hipchat.format(6), 'Richard')
r.set(hipchat.format(7), '<NAME>')
r.set(hipchat.format(8), 'Arian')
r.set(hipchat.format(9), 'Zach')
r.set(hipchat.format(10), '<NAME>')
r.set(hipchat.format(11), 'Alex')
r.set(hipchat.format(12), 'Michael')
r.set(hipchat.format(13), 'Reggie')
r.set(hipchat.format(14), 'Legacy Score')
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
# Regression test old score keys will still appear in lifetime loserboard
r.set("user_score:{0}".format(14), 5)
bot.get_year_month = fake_get_year_month
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
def test_when_value_not_included_default_to_200(self):
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.value, 200)
def test_when_answer_includes_html_answer_is_sanitized(self):
# example answer: <i>Let\\'s Make a Deal</i>
self.trebek_bot.fetch_random_clue = fake_fetch_random_clue
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.answer, "Let's Make a Deal")
def test_when_response_doesNot_begin_with_question_return_none(self):
response = "some test response"
assert self.trebek_bot.response_is_a_question(response) == None
def test_when_response_is_question_return_true(self):
response = "what is some test response"
assert self.trebek_bot.response_is_a_question(response)
def test_fuzzy_matching_of_answer(self):
test_clue = fake_fetch_random_clue()
self.assertFalse(self.trebek_bot.is_correct_answer("polygamist", "polyamourus"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is let's make a deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Lets Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Dela"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Mae a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is elt's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer("a ukulele", "a ukelele"))
self.assertTrue(self.trebek_bot.is_correct_answer("Scrabble", "Scrablle"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Aristotle) Onassis", "Onassis"))
self.assertTrue(self.trebek_bot.is_correct_answer("(William) Blake", "blake"))
self.assertTrue(self.trebek_bot.is_correct_answer("wings (or feathers)", "feathers"))
self.assertTrue(self.trebek_bot.is_correct_answer("A.D. (Anno Domini)", "AD"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Little Orphan) Annie", "annie"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "turtle"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "tortoise"))
# self.assertTrue(self.trebek_bot.is_correct_answer("ben affleck and matt damon", "<NAME> & <NAME>"))
def test_given_json_dictionary_hipchat_object_is_parsed(self):
with open ('test-room-message.json') as data:
d = json.load(data)
t = entities.HipChatRoomMessage(**d)
self.assertEqual(t.item.message.message, "jeopardy")
self.assertEqual(t.item.message.user_from.name, "<NAME>")
def test_message_object_trims_leading_slash_command(self):
p = {}
p['from'] = { 'id':None, 'links': None, 'mention_name':None, 'name': None, 'version': None}
p['message'] = '/trebek jeopardy me'
msg = entities.HipChatMessage(p)
self.assertEqual(msg.message, "jeopardy me")
def test_when_get_response_message_is_called_user_name_is_saved(self):
self.trebek_bot.get_response_message()
key = trebek.Trebek.hipchat_user_key.format('582174')
self.assertTrue(self.trebek_bot.redis.exists(key))
user_name = self.trebek_bot.redis.get(trebek.Trebek.hipchat_user_key.format('582174')).decode()
self.assertEqual("<NAME>", user_name)
def test_number_is_formatted_as_currency(self):
currency = self.trebek_bot.format_currency("100")
self.assertEqual("$100", currency)
currency = self.trebek_bot.format_currency("1000")
self.assertEqual("$1,000", currency)
currency = self.trebek_bot.format_currency("1000000000")
self.assertEqual("$1,000,000,000", currency)
currency = self.trebek_bot.format_currency("-100")
self.assertEqual("<span style='color: red;'>-$100</span>", currency)
currency = self.trebek_bot.format_currency("-1000000000")
self.assertEqual("<span style='color: red;'>-$1,000,000,000</span>", currency)
def test_user_requests_score_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek score"
bot = self.create_bot_with_dictionary(d)
key = "{0}:{1}".format(bot.user_score_prefix,
bot.room_message.item.message.user_from.id)
bot.redis.set(key, 500)
response = bot.get_response_message()
self.assertEqual("$500", response)
def test_user_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Leaderboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Arian: $5,430</li>"
expected += "<li><NAME>: $500</li>"
expected += "<li>Zach: $412</li>"
expected += "<li>Alex: $225</li>"
expected += "<li>Richard: $200</li></ol>"
self.assertEqual(expected, response)
def test_user_loserboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Loserboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Allen: $20</li>"
expected += "<li>Mark: $30</li>"
expected += "<li>Melvin: $50</li>"
expected += "<li>Cordarrell: $70</li>"
expected += "<li>Reggie: $87</li></ol>"
self.assertEqual(expected, response)
def test_jeopardy_round_can_start_from_nothing(self):
response = self.trebek_bot.get_response_message()
expected = "The category is <b>CLASSIC GAME SHOW TAGLINES</b> for $200: "
expected += "<b>\"CAVEAT EMPTOR. LET THE BUYER BEWARE\"</b> (Air Date: 18-Oct-2001)"
self.assertEqual(expected, response)
def test_user_cannot_answer_same_question_twice(self):
# Arrange
clue = self.trebek_bot.get_jeopardy_clue()
d = self.get_setup_json()
user_answer_key = trebek.Trebek.user_answer_key.format(
self.trebek_bot.room_id, clue.id, d['item']['message']['from']['id'])
self.trebek_bot.redis.set(user_answer_key, 'true')
self.trebek_bot.get_question()
d['item']['message']['message'] = '/trebek this is an answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = self.trebek_bot.redis
# Act
response = bot.get_response_message()
# Assert
self.assertEqual("You have already answered <NAME>. Let someone else respond.", response)
def test_given_incorrect_answer_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = '/trebek some test answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is incorrect, <NAME>. Your score is now {0}".format(score_string), response)
def test_given_correct_answer_user_score_increased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertEqual("$200", bot.format_currency(score))
self.assertEqual("That is correct, <NAME>. Your score is now $200 (Expected Answer: Let's Make a Deal)", response)
def test_given_correct_answer_nonQuestion_form_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is correct <NAME>, however responses should be in the form of a question. Your score is now {0}".format(score_string), response)
def test_given_incorrect_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek foobar"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "Time is up! The correct answer was: <b>Let's Make a Deal</b>")
def test_given_correct_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "That is correct James A, however time is up. (Expected Answer: Let's Make a Deal)")
def test_when_asked_for_answer_bot_responds_with_answer(self):
d = self.get_setup_json()
bot = self.create_bot_with_dictionary(d)
bot.get_question()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
response = bot.get_response_message()
self.assertEqual("The answer was: Let's Make a Deal", response)
def test_when_no_question_exists_answer_returns_no_active_clue(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
bot.redis.flushdb()
response = bot.get_response_message()
self.assertEqual("No active clue. Type '/trebek jeopardy' to start a round", response)
def test_when_answer_contains_HTML_word_is_filtered(self):
# e.g.: ANSWER: the <i>Stegosaurus</i>
c = {'id':1, 'title': 'foo', 'created_at': 'bar', 'updated_at': 'foobar', 'clues_count':1}
q = entities.Question(1, answer= "the <i>Stegosaurus</i>", category = c)
self.assertEqual("the Stegosaurus", q.answer)
# e.g.: ANSWER: <i>the Seagull</i>
q = entities.Question(1, answer= "<i>the Seagull</i>", category = c)
self.assertEqual("the Seagull", q.answer)
q = entities.Question(1, answer= "Theodore Roosevelt", category = c)
self.assertEqual("Theodore Roosevelt", q.answer)
def test_when_fetched_clue_is_invalid_get_new_clue(self):
global _invalid_clue, _fetch_count
_fetch_count = 0
clue = get_clue_json()
clue['invalid_count'] = 1
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertEqual(clue.invalid_count, None)
def test_when_fetched_clue_is_missing_question_get_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = ""
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertNotEqual(clue.question.strip(), "")
def test_when_fetched_clue_contains_visual_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the picture seen here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("seen here" in clue.question)
def test_when_fetched_clue_contains_audio_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the audio heard here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("heard here" in clue.question)
def test_when_new_month_arrives_score_resets_to_zero(self):
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.assertEqual("$0", self.trebek_bot.get_user_score())
def test_lifetimescore_includes_multiple_months(self):
# Seed other user's data (to reproduce bug)
self.create_user_scores()
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.trebek_bot.update_score(200)
self.assertEqual("$400", self.trebek_bot.get_user_score(True))
def test_user_lifetime_loserboard_value_includes_multiple_months(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the lifetime loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Legacy Score: $5</li>"
expected += "<li>Allen: $40</li>"
expected += "<li>Mark: $60</li>"
expected += "<li>Melvin: $100</li>"
expected += "<li>Cordarrell: $140</li></ol>"
self.assertEqual(expected, response)
def test_user_lifetime_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek lifetime leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Arian: $10,860</li>"
expected += "<li><NAME>: $1,000</li>"
expected += "<li>Zach: $824</li>"
expected += "<li>Alex: $450</li>"
expected += "<li>Richard: $400</li></ol>"
self.assertEqual(expected, response)
def main():
unittest.main()
if __name__ == '__main__':
main()
``` |
{
"source": "jimfulton/sphinxautoindex",
"score": 2
} |
#### File: lassodomain/sphinxcontrib/lassodomain.py
```python
from docutils import nodes
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.locale import l_, _
from sphinx.roles import XRefRole
from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, TypedField
from sphinx.util.nodes import make_refnode
class SingleGroupedField(Field):
"""A doc field that is grouped; i.e., all fields of that type will be
transformed into one field with its body being a comma-separated line. It
does not have an argument. Each item can be linked using the given
*bodyrolename*. SingleGroupedField should be used for doc fields that can
occur more than once, but don't require a description. If *can_collapse* is
true, this field will revert to a Field if only used once.
Example::
:import: trait_first
:import: trait_queriable
"""
is_grouped = True
def __init__(self, name, names=(), label=None, bodyrolename=None,
can_collapse=False):
Field.__init__(self, name, names, label, False, None, bodyrolename)
self.can_collapse = can_collapse
def make_field(self, types, domain, items):
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
return Field.make_field(self, types, domain, items[0])
bodynode = nodes.paragraph()
for i, (fieldarg, content) in enumerate(items):
bodynode += nodes.Text(', ') if i else None
bodynode += self.make_xref(self.bodyrolename, domain,
content[0].astext(), nodes.Text)
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
class SingleTypedField(SingleGroupedField):
"""A doc field that occurs once and can contain type information. It does
not have an argument. The type can be linked using the given
*typerolename*. Used in this domain to describe return values and types,
which are specified with :return: and :rtype: and are combined into a single
field list item.
Example::
:return: description of the return value
:rtype: optional description of the return type
"""
is_typed = True
def __init__(self, name, names=(), typenames=(), label=None,
typerolename=None, can_collapse=False):
SingleGroupedField.__init__(self, name, names, label, None, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
def make_field(self, types, domain, items):
def handle_item(fieldarg, content):
par = nodes.paragraph()
if fieldarg in types:
par += nodes.Text(' (')
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = ''.join(n.astext() for n in fieldtype)
par += self.make_xref(self.typerolename, domain, typename)
else:
par += fieldtype
par += nodes.Text(') ')
par += content
return par
fieldname = nodes.field_name('', self.label)
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
class LSObject(ObjectDescription):
"""Description of a Lasso object.
"""
doc_field_types = [
# :param name: description
# :ptype name: typename (optional)
# - or -
# :param typename name: description
TypedField('parameter', names=('param', 'parameter'),
typenames=('ptype', 'paramtype', 'type'),
label=l_('Parameters'), typerolename='type', can_collapse=True),
# :return: description
# :rtype: typename (optional)
SingleTypedField('return', names=('return', 'returns'),
typenames=('rtype', 'returntype'),
label=l_('Returns'), typerolename='type', can_collapse=True),
# :author: name <email>
SingleGroupedField('author', names=('author', 'authors'),
label=l_('Author'), can_collapse=True),
# :see: resource
Field('seealso', names=('see', 'url'),
label=l_('See also'), has_arg=False),
# :parent: typename
Field('parent', names=('parent', 'super'),
label=l_('Parent type'), has_arg=False, bodyrolename='type'),
# :import: trait_name
SingleGroupedField('import', names=('import', 'imports'),
label=l_('Imports'), bodyrolename='trait', can_collapse=True),
]
def needs_arglist(self):
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def get_signature_prefix(self, sig):
"""May return a prefix to put before the object name in the signature.
"""
return ''
def get_index_text(self, objectname, name_obj):
"""Return the text for the index entry of the object.
"""
raise NotImplementedError('must be implemented in subclasses')
def handle_signature(self, sig, signode):
"""Transform a Lasso signature into RST nodes.
"""
sig = sig.strip().replace(' ', ' ').replace(' ::', '::').replace(':: ', '::')
if '(' in sig:
if ')::' in sig:
sig, returntype = sig.rsplit('::', 1)
else:
returntype = None
prefix, arglist = sig.split('(', 1)
prefix = prefix.strip()
arglist = arglist[:-1].strip().replace(' =', '=').replace('= ', '=')
else:
if '::' in sig:
sig, returntype = sig.rsplit('::', 1)
else:
returntype = None
prefix = sig
arglist = None
if '->' in prefix:
objectprefix, name = prefix.rsplit('->', 1)
objectprefix += '->'
else:
objectprefix = None
name = prefix
objectname = self.env.ref_context.get('ls:object')
if objectprefix:
fullname = objectprefix + name
elif objectname:
fullname = objectname + '->' + name
else:
objectname = ''
fullname = name
signode['object'] = objectname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if objectprefix:
signode += addnodes.desc_addname(objectprefix, objectprefix)
signode += addnodes.desc_name(name, name)
if self.needs_arglist():
if arglist:
_pseudo_parse_arglist(signode, arglist)
else:
signode += addnodes.desc_parameterlist()
if returntype:
signode += addnodes.desc_returns(returntype, returntype)
return fullname, objectprefix
def add_target_and_index(self, name_obj, sig, signode):
refname = name_obj[0].lower()
if refname not in self.state.document.ids:
signode['names'].append(name_obj[0])
signode['ids'].append(refname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['ls']['objects']
if refname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % refname +
'other instance in ' +
self.env.doc2path(objects[refname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[refname] = self.env.docname, self.objtype
objectname = self.env.ref_context.get('ls:object')
indextext = self.get_index_text(objectname, name_obj)
if indextext:
self.indexnode['entries'].append(('single', indextext, refname, ''))
def before_content(self):
# needed for automatic qualification of members (reset in subclasses)
self.objname_set = False
def after_content(self):
if self.objname_set:
self.env.ref_context['ls:object'] = None
class LSDefinition(LSObject):
"""Description of an object definition (type, trait, thread).
"""
def get_signature_prefix(self, sig):
return self.objtype + ' '
def get_index_text(self, objectname, name_obj):
return _('%s (%s)') % (name_obj[0], self.objtype)
def before_content(self):
LSObject.before_content(self)
if self.names:
self.env.ref_context['ls:object'] = self.names[0][0]
self.objname_set = True
class LSTag(LSObject):
"""Description of an object with a signature (method, member).
"""
def needs_arglist(self):
return True
def get_index_text(self, objectname, name_obj):
name = name_obj[0].split('->')[-1]
if objectname or name_obj[1]:
objectname = name_obj[0].split('->')[0]
return _('%s() (%s member)') % (name, objectname)
else:
return _('%s() (method)') % name
class LSTraitTag(LSTag):
"""Description of a tag within a trait (require, provide).
"""
def get_signature_prefix(self, sig):
return self.objtype + ' '
def get_index_text(self, objectname, name_obj):
name = name_obj[0].split('->')[-1]
return _('%s() (%s %s)') % (name, objectname, self.objtype)
class LSXRefRole(XRefRole):
"""Provides cross reference links for Lasso objects.
"""
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['ls:object'] = env.ref_context.get('ls:object')
if not has_explicit_title:
title = title.lstrip('->')
target = target.lstrip('~')
if title[0:1] == '~':
title = title[1:]
arrow = title.rfind('->')
if arrow != -1:
title = title[arrow+2:]
if target[0:2] == '->':
target = target[2:]
refnode['refspecific'] = True
if '(' in target:
target = target.partition('(')[0]
if title.endswith('()'):
title = title[:-2]
return title, target
class LassoDomain(Domain):
"""Lasso language domain.
"""
name = 'ls'
label = 'Lasso'
object_types = {
'method': ObjType(l_('method'), 'meth'),
'member': ObjType(l_('member'), 'meth'),
'provide': ObjType(l_('provide'), 'meth'),
'require': ObjType(l_('require'), 'meth'),
'type': ObjType(l_('type'), 'type'),
'trait': ObjType(l_('trait'), 'trait'),
'thread': ObjType(l_('thread'), 'thread'),
}
directives = {
'method': LSTag,
'member': LSTag,
'provide': LSTraitTag,
'require': LSTraitTag, # name and signature only
'type': LSDefinition,
'trait': LSDefinition,
'thread': LSDefinition,
}
roles = {
'meth': LSXRefRole(fix_parens=True),
'type': LSXRefRole(),
'trait': LSXRefRole(),
'thread': LSXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for fullname, (fn, _) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
def find_obj(self, env, obj, name, typ, searchorder=0):
if name[-2:] == '()':
name = name[:-2]
objects = self.data['objects']
newname = None
if searchorder == 1:
if obj and obj + '->' + name in objects:
newname = obj + '->' + name
else:
newname = name
else:
if name in objects:
newname = name
elif obj and obj + '->' + name in objects:
newname = obj + '->' + name
return newname, objects.get(newname)
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
objectname = node.get('ls:object')
searchorder = node.hasattr('refspecific') and 1 or 0
name, obj = self.find_obj(env, objectname, target.lower(), typ,
searchorder)
if not obj:
return None
return make_refnode(builder, fromdocname, obj[0], name, contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target, node,
contnode):
objectname = node.get('ls:object')
name, obj = self.find_obj(env, objectname, target.lower(), None, 1)
if not obj:
return []
return [('ls:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0],
name, contnode, name))]
def get_objects(self):
for refname, (docname, type) in self.data['objects'].items():
yield (refname, refname, type, docname, refname, 1)
def setup(app):
app.add_domain(LassoDomain)
``` |
{
"source": "jim-fun/evidently",
"score": 3
} |
#### File: evidently/utils/test_utils.py
```python
import unittest
import numpy as np
from evidently.utils import NumpyEncoder
class TestNumpyEncoder(unittest.TestCase):
def setUp(self) -> None:
self.encoder = NumpyEncoder()
def test_int_convert(self):
for _type in (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64):
self.assertEqual(self.encoder.default(_type(55)), 55)
def test_float_convert(self):
np.testing.assert_almost_equal(self.encoder.default(np.float16(1.5)), 1.5)
for _type in (np.float, np.float_, np.float32, np.float64):
np.testing.assert_almost_equal(self.encoder.default(_type(.2)), .2)
def test_bool_covert(self):
self.assertEqual(self.encoder.default(np.bool(1)), True)
self.assertEqual(self.encoder.default(np.bool(0)), False)
self.assertEqual(self.encoder.default(np.bool_(1)), True)
self.assertEqual(self.encoder.default(np.bool_(0)), False)
def test_array_covert(self):
self.assertEqual(self.encoder.default(np.array([0, 1, 2.1])), [0, 1, 2.1])
self.assertEqual(self.encoder.default(np.empty((0, 0))), [])
self.assertEqual(self.encoder.default(
np.array([[0, 1, 2.1], [0, 1, 2.1], [0, 1, 2.1]])),
[[0, 1, 2.1], [0, 1, 2.1], [0, 1, 2.1]])
self.assertEqual(self.encoder.default(np.ones((2, 3))), [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
def test_none_covert(self):
self.assertIsNone(self.encoder.default(np.void(3)))
``` |
{
"source": "jimga150/HealthNet",
"score": 3
} |
#### File: HealthNet/appointments/tests.py
```python
from django.contrib.auth.models import User
from django.test import TestCase
from .models import *
from core.models import Patient, Doctor, Hospital
class AppointmentTests(TestCase):
"""
Tests for appointments
"""
def test_create_appointment(self):
"""
Checks if appointment can be created
:return: True if the appointment is created
"""
date = 10 / 12 / 20
pat = Patient(User, date, "Male", "AB-", 10, 10, None, None, None, None)
doc = Doctor(User, "634-1242")
hosp = Hospital(name = 'Hospital 1')
app = Appointment(patient=pat, doctor=doc, hospital=hosp, appointmentStart='1800-01-01 08:00:00',
appointmentNotes='Note!')
print(app.hospital.name)
self.assertEqual(app.hospital.name != 'Hospital 2', True)
```
#### File: HealthNet/appointments/views.py
```python
import datetime
from core.models import Log
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.http import HttpResponseForbidden
from django.shortcuts import render
from django.utils import timezone
from django.views.generic import DeleteView
from django.views.generic import UpdateView
from core.views import is_doctor, is_patient, is_admin
from .forms import AppointmentForm
from .models import Appointment
def is_doc_or_patient(user):
"""
Helper function that checks if a user is a doctor or a patient
:param user: The user to be checked
:return: True if user is a doctor or a patient
"""
return is_doctor(user) or is_patient(user)
def is_not_admin(user):
"""
Helper function that checks if a user is a doctor or a patient
:param user: The user to be checked
:return: True if user is a doctor or a patient
"""
return not is_admin(user)
@login_required
@user_passes_test(is_not_admin)
def appointment_main(request):
"""
Appointment_main renders the main appointment page. It detects the user type (admin, patient, nurse, etc)
and shows them the appropriate options
:param request: The request with user information
:return: The page to be rendered
"""
parent = "core/landing/baselanding.html"
if request.user.groups.filter(name='Patient').exists():
appointments = Appointment.objects.filter(patient=request.user.patient).order_by('appointmentStart')
return render(request, 'appointments/patientappointment.html', {'appointments': appointments, 'parent': parent})
elif request.user.groups.filter(name='Doctor').exists():
appointments = Appointment.objects.filter(doctor=request.user.doctor)
return render(request, 'appointments/patientappointment.html', {'appointments': appointments, 'parent': parent})
if request.user.groups.filter(name='Nurse').exists():
date = datetime.date.today()
start_week = date - datetime.timedelta(date.weekday())
end_week = start_week + datetime.timedelta(7)
appointments = Appointment.objects.filter(hospital=request.user.nurse.hospital,
appointmentStart__range=[start_week, end_week])
return render(request, 'appointments/patientappointment.html', {'appointments': appointments, 'parent': parent})
else:
return HttpResponseForbidden()
@login_required
@user_passes_test(is_not_admin)
def CreateAppointment(request):
"""
View to create an appointment. It ensures that the appointment is valid, and that there is not an appointment
already at the time with the designated doctor
:param request: The request with user information
:return: The page to be rendered
"""
created = False
already_exists = False
parent = get_parent(request)
if request.method == 'POST':
appointment_form = AppointmentForm(data=request.POST)
if appointment_form.is_valid():
appointment = appointment_form.save(commit=False)
if Appointment.objects.filter(
Q(appointmentStart=appointment.appointmentStart) & Q(doctor=appointment.doctor)).exists():
already_exists = True
else:
appointment_form.save()
# Register Log
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
"Appointment Created by " + request.user.username)
log.save()
created = True
else:
print("Error")
print(appointment_form.errors)
else:
appointment_form = AppointmentForm()
return render(request, "appointments/patientappointmentform.html",
{'appointment_form': appointment_form, 'registered': created, 'already_exists': already_exists,
'parent': parent})
class EditAppointment(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
"""
EditAppointment extends UpdateView, which is the generic class for editing preexisting objects
This allows for a user to change their appointments
"""
model = Appointment
template_name = 'appointments/appointmentedit.html'
form_class = AppointmentForm
success_url = reverse_lazy('appointment_home')
def test_func(self):
return is_not_admin(self.request.user)
class DeleteAppointment(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
"""
DeleteAppointment extends DeleteView, which is the generic class for deleting objects
DeleteAppointment will delete the appointment, and is only visible to doctors and patients
"""
model = Appointment
success_url = reverse_lazy('appointment_home')
template_name = 'appointments/appointmentdelete.html'
def test_func(self):
return is_not_admin(self.request.user)
def get_parent(request):
"""
A helper method that returns the appropriate parent for the designated user type
:param request: The request with user information
:return: The parent that a template will extend
"""
parent = 'core/landing/Patient.html'
if request.user.groups.filter(name='Doctor').exists():
parent = 'core/landing/Doctor.html'
elif request.user.groups.filter(name='Nurse').exists():
parent = 'core/landing/Nurse.html'
elif request.user.groups.filter(name='Admin').exists():
parent = 'core/landing/Admin.html'
return parent
```
#### File: calendarium/tests/tests.py
```python
import json
from django.forms.models import model_to_dict
from django.test import TestCase
from django.utils.timezone import timedelta
from mixer.backend.django import mixer
from ..constants import FREQUENCIES, OCCURRENCE_DECISIONS
from ..forms import OccurrenceForm
from ..utils import now
from django.template.defaultfilters import slugify
from ..models import Event, EventCategory, Occurrence, Rule
from django.template import Context, Template
from django.utils import timezone
from ..templatetags.calendarium_tags import get_upcoming_events, get_week_URL
from django_libs.tests.mixins import ViewRequestFactoryTestMixin
from .. import views
class OccurrenceFormTestCase(TestCase):
"""Test for the ``OccurrenceForm`` form class."""
longMessage = True
def setUp(self):
# single
self.event = mixer.blend('calendarium.Event', rule=None,
end_recurring_period=None)
self.event_occurrence = next(self.event.get_occurrences(
self.event.start))
def test_form(self):
"""Test if ``OccurrenceForm`` is valid and saves correctly."""
# Test for event
data = model_to_dict(self.event_occurrence)
initial = data.copy()
data.update({
'decision': OCCURRENCE_DECISIONS['all'],
'created_by': None,
'category': None,
'title': 'changed'})
form = OccurrenceForm(data=data, initial=initial)
self.assertTrue(form.is_valid(), msg=(
'The OccurrenceForm should be valid'))
form.save()
event = Event.objects.get(pk=self.event.pk)
self.assertEqual(event.title, 'changed', msg=(
'When save is called, the event\'s title should be "changed".'))
class EventModelManagerTestCase(TestCase):
"""Tests for the ``EventModelManager`` custom manager."""
longMessage = True
def setUp(self):
# event that only occurs once
self.event = mixer.blend('calendarium.Event', rule=None, start=now(),
end=now() + timedelta(hours=1))
# event that occurs for one week daily with one custom occurrence
self.event_daily = mixer.blend('calendarium.Event')
self.occurrence = mixer.blend(
'calendarium.Occurrence', event=self.event, original_start=now(),
original_end=now() + timedelta(days=1), title='foo_occurrence')
class EventTestCase(TestCase):
"""Tests for the ``Event`` model."""
longMessage = True
def setUp(self):
self.not_found_event = mixer.blend(
'calendarium.Event', start=now() - timedelta(hours=24),
end=now() - timedelta(hours=24),
creation_date=now() - timedelta(hours=24), rule=None)
self.event = mixer.blend(
'calendarium.Event', start=now(), end=now(),
creation_date=now())
# category=mixer.blend('calendarium.EventCategory'))
self.event_wp = mixer.blend(
'calendarium.Event', start=now(), end=now(),
creation_date=now(),
)
self.occurrence = mixer.blend(
'calendarium.Occurrence', original_start=now(),
original_end=now() + timedelta(days=1), event=self.event,
title='foo_occurrence')
self.single_time_event = mixer.blend('calendarium.Event', rule=None)
def test_get_title(self):
"""Test for ``__str__`` method."""
title = "The Title"
event = mixer.blend(
'calendarium.Event', start=now(), end=now(),
creation_date=now(),
title=title)
self.assertEqual(title, str(event), msg=(
'Method ``__str__`` did not output event title.'))
def test_get_absolute_url(self):
"""Test for ``get_absolute_url`` method."""
event = mixer.blend(
'calendarium.Event', start=now(), end=now(),
creation_date=now())
event.save()
self.assertTrue(str(event.pk) in str(event.get_absolute_url()), msg=(
'Method ``get_absolute_url`` did not contain event id.'))
def test_create_occurrence(self):
"""Test for ``_create_occurrence`` method."""
occurrence = self.event._create_occurrence(now())
self.assertEqual(type(occurrence), Occurrence, msg=(
'Method ``_create_occurrence`` did not output the right type.'))
class EventCategoryTestCase(TestCase):
"""Tests for the ``EventCategory`` model."""
longMessage = True
def test_instantiation(self):
"""Test for instantiation of the ``EventCategory`` model."""
event_category = EventCategory()
self.assertTrue(event_category)
def test_get_name(self):
"""Test for ``__str__`` method."""
name = "The Name"
event_category = EventCategory(name=name)
self.assertEqual(name, str(event_category), msg=(
'Method ``__str__`` did not output event category name.'))
def test_get_slug(self):
"""Test slug in ``save`` method."""
name = "The Name"
event_category = EventCategory(name=name)
event_category.save()
self.assertEqual(slugify(name), str(event_category.slug), msg=(
'Method ``save`` did not set event category slug as expected.'))
class EventRelationTestCase(TestCase):
"""Tests for the ``EventRelation`` model."""
longMessage = True
def test_instantiation(self):
"""Test for instantiation of the ``EventRelation`` model."""
event_relation = mixer.blend('calendarium.EventRelation')
self.assertTrue(event_relation)
class OccurrenceTestCase(TestCase):
"""Tests for the ``Occurrence`` model."""
longMessage = True
def test_instantiation(self):
"""Test for instantiation of the ``Occurrence`` model."""
occurrence = Occurrence()
self.assertTrue(occurrence)
def test_delete_period(self):
"""Test for the ``delete_period`` function."""
occurrence = mixer.blend('calendarium.Occurrence')
occurrence.delete_period('all')
self.assertEqual(Occurrence.objects.all().count(), 0, msg=(
'Should delete only the first occurrence.'))
event = mixer.blend(
'calendarium.Event', start=now() - timedelta(hours=0),
end=now() - timedelta(hours=0))
occurrence = mixer.blend(
'calendarium.Occurrence', event=event,
start=now() - timedelta(hours=0), end=now() - timedelta(hours=0))
occurrence.delete_period('this one')
self.assertEqual(Occurrence.objects.all().count(), 0, msg=(
'Should delete only the first occurrence.'))
event = mixer.blend(
'calendarium.Event', start=now() - timedelta(hours=0),
end=now() - timedelta(hours=0))
event.save()
occurrence = mixer.blend(
'calendarium.Occurrence', event=event,
start=now() - timedelta(hours=0), end=now() - timedelta(hours=0))
occurrence.delete_period('following')
self.assertEqual(Event.objects.all().count(), 0, msg=(
'Should delete the event and the occurrence.'))
occurrence_1 = mixer.blend(
'calendarium.Occurrence', start=now(),
end=now() + timedelta(days=1),
original_start=now() + timedelta(hours=1))
occurrence_2 = mixer.blend(
'calendarium.Occurrence', start=now(),
end=now() + timedelta(days=1),
original_start=now() + timedelta(hours=1))
occurrence_2.event = occurrence_1.event
occurrence_2.save()
occurrence_2.delete_period('this one')
occurrence_3 = mixer.blend(
'calendarium.Occurrence', start=now(),
end=now() + timedelta(days=1),
original_start=now() + timedelta(hours=1))
occurrence_3.event = occurrence_1.event
occurrence_3.save()
occurrence_4 = mixer.blend(
'calendarium.Occurrence', start=now(),
end=now() + timedelta(days=1),
original_start=now() + timedelta(hours=1))
occurrence_4.event = occurrence_1.event
occurrence_4.save()
occurrence_3.delete_period('this one')
occurrence_1.delete_period('following')
self.assertEqual(Occurrence.objects.all().count(), 0, msg=(
'Should delete all occurrences with this start date.'))
class RuleTestCase(TestCase):
"""Tests for the ``Rule`` model."""
longMessage = True
def test_instantiation(self):
"""Test for instantiation of the ``Rule`` model."""
rule = Rule()
self.assertTrue(rule)
class RenderUpcomingEventsTestCase(TestCase):
"""Tests for the ``render_upcoming_events`` tag."""
longMessage = True
def setUp(self):
self.occurrence = mixer.blend(
'calendarium.Occurrence',
start=timezone.now() + timezone.timedelta(days=1),
end=timezone.now() + timezone.timedelta(days=2),
original_start=timezone.now() + timezone.timedelta(seconds=20),
event__start=timezone.now() + timezone.timedelta(days=1),
event__end=timezone.now() + timezone.timedelta(days=2),
event__title='foo',
)
def test_render_tag(self):
t = Template('{% load calendarium_tags %}{% render_upcoming_events %}')
self.assertIn('foo', t.render(Context()))
class GetUpcomingEventsTestCase(TestCase):
"""Tests for the ``get_upcoming_events`` tag."""
longMessage = True
def setUp(self):
self.occurrence = mixer.blend(
'calendarium.Occurrence',
start=timezone.now() + timezone.timedelta(days=1),
end=timezone.now() + timezone.timedelta(days=2),
original_start=timezone.now() + timezone.timedelta(seconds=20),
event__start=timezone.now() + timezone.timedelta(days=1),
event__end=timezone.now() + timezone.timedelta(days=2),
)
def test_tag(self):
result = get_upcoming_events()
self.assertEqual(len(result), 1)
class GetWeekURLTestCase(TestCase):
"""Tests for the ``get_week_URL`` tag."""
longMessage = True
def test_tag(self):
result = get_week_URL(
timezone.datetime.strptime('2016-02-07', '%Y-%m-%d'))
self.assertEqual(result, u'/calendar/2016/week/5/')
class CalendariumRedirectViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``CalendariumRedirectView`` view."""
view_class = views.CalendariumRedirectView
def test_view(self):
resp = self.client.get(self.get_url())
self.assertEqual(resp.status_code, 200)
class MonthViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``MonthView`` view class."""
view_class = views.MonthView
def get_view_kwargs(self):
return {'year': self.year, 'month': self.month}
def setUp(self):
self.year = now().year
self.month = now().month
def test_view(self):
"""Test for the ``MonthView`` view class."""
# regular call
resp = self.is_callable()
self.assertEqual(
resp.template_name[0], 'calendarium/calendar_month.html', msg=(
'Returned the wrong template.'))
self.is_postable(data={'next': True}, to_url_name='calendar_month')
self.is_postable(data={'previous': True}, to_url_name='calendar_month')
self.is_postable(data={'today': True}, to_url_name='calendar_month')
# called with a invalid category pk
self.is_callable(data={'category': 'abc'})
# called with a non-existant category pk
self.is_callable(data={'category': '999'})
# called with a category pk
category = mixer.blend('calendarium.EventCategory')
self.is_callable(data={'category': category.pk})
# called with wrong values
self.is_not_callable(kwargs={'year': 2000, 'month': 15})
class WeekViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``WeekView`` view class."""
view_class = views.WeekView
def get_view_kwargs(self):
return {'year': self.year, 'week': self.week}
def setUp(self):
self.year = now().year
# current week number
self.week = now().date().isocalendar()[1]
def test_view(self):
"""Tests for the ``WeekView`` view class."""
resp = self.is_callable()
self.assertEqual(
resp.template_name[0], 'calendarium/calendar_week.html', msg=(
'Returned the wrong template.'))
self.is_postable(data={'next': True}, to_url_name='calendar_week')
self.is_postable(data={'previous': True}, to_url_name='calendar_week')
self.is_postable(data={'today': True}, to_url_name='calendar_week')
resp = self.is_callable(ajax=True)
self.assertEqual(
resp.template_name[0], 'calendarium/partials/calendar_week.html',
msg=('Returned the wrong template for AJAX request.'))
self.is_not_callable(kwargs={'year': self.year, 'week': '60'})
class DayViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``DayView`` view class."""
view_class = views.DayView
def get_view_kwargs(self):
return {'year': self.year, 'month': self.month, 'day': self.day}
def setUp(self):
self.year = 2001
self.month = 2
self.day = 15
def test_view(self):
"""Tests for the ``DayView`` view class."""
resp = self.is_callable()
self.assertEqual(
resp.template_name[0], 'calendarium/calendar_day.html', msg=(
'Returned the wrong template.'))
self.is_postable(data={'next': True}, to_url_name='calendar_day')
self.is_postable(data={'previous': True}, to_url_name='calendar_day')
self.is_postable(data={'today': True}, to_url_name='calendar_day')
self.is_not_callable(kwargs={'year': self.year, 'month': '14',
'day': self.day})
class EventUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``EventUpdateView`` view class."""
view_class = views.EventUpdateView
def get_view_kwargs(self):
return {'pk': self.event.pk}
def setUp(self):
self.event = mixer.blend('calendarium.Event')
self.user = mixer.blend('auth.User', is_superuser=True)
def test_view(self):
self.is_callable(user=self.user)
class EventCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``EventCreateView`` view class."""
view_class = views.EventCreateView
def setUp(self):
self.user = mixer.blend('auth.User', is_superuser=True)
def test_view(self):
self.is_callable(user=self.user)
self.is_callable(user=self.user, data={'delete': True})
self.assertEqual(Event.objects.all().count(), 0)
class EventDetailViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``EventDetailView`` view class."""
view_class = views.EventDetailView
def get_view_kwargs(self):
return {'pk': self.event.pk}
def setUp(self):
self.event = mixer.blend('calendarium.Event')
def test_view(self):
self.is_callable()
class OccurrenceViewTestCaseMixin(object):
"""Mixin to avoid repeating code for the Occurrence views."""
def get_view_kwargs(self):
return {
'pk': self.event.pk,
'year': self.event.start.date().year,
'month': self.event.start.date().month,
'day': self.event.start.date().day,
}
def setUp(self):
self.rule = mixer.blend('calendarium.Rule', name='daily')
self.event = mixer.blend(
'calendarium.Event', created_by=mixer.blend('auth.User'),
start=now() - timedelta(days=1), end=now() + timedelta(days=5),
rule=self.rule)
class OccurrenceDetailViewTestCase(
OccurrenceViewTestCaseMixin, ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``OccurrenceDetailView`` view class."""
view_class = views.OccurrenceDetailView
class OccurrenceUpdateViewTestCase(
OccurrenceViewTestCaseMixin, ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``OccurrenceUpdateView`` view class."""
view_class = views.OccurrenceUpdateView
class UpcomingEventsAjaxViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``UpcomingEventsAjaxView`` view class."""
view_class = views.UpcomingEventsAjaxView
def test_view(self):
self.is_callable()
def test_view_with_count(self):
self.is_callable(data={'count': 5})
def test_view_with_category(self):
cat = mixer.blend('calendarium.EventCategory')
self.is_callable(data={'category': cat.slug})
```
#### File: HealthNet/core/forms.py
```python
from django.utils import timezone
from django import forms
from django.contrib.auth.models import User
from datetimewidget.widgets import DateWidget
from .models import Patient, Hospital, Doctor, Nurse
class UserRegForm(forms.ModelForm):
"""
Form for user registration
"""
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'password', 'email', 'first_name', 'last_name')
def __init__(self, *args, **kwargs):
super(UserRegForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({'class': 'form-control'})
self.fields['password'].widget.attrs.update({'class': 'form-control'})
self.fields['email'].widget.attrs.update({'class': 'form-control'})
self.fields['first_name'].widget.attrs.update({'class': 'form-control'})
self.fields['last_name'].widget.attrs.update({'class': 'form-control'})
def is_valid(self):
return super(UserRegForm, self).is_valid() and \
User.objects.all().filter(email=self.cleaned_data['email']).count() == 0
class UserUpdateForm(forms.ModelForm):
"""
Form for user updates
"""
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
class PatientRegForm(forms.ModelForm):
"""
Form for patient registration
Note: Seperate from user registration form
"""
now = timezone.now()
birthday = forms.DateField(widget=DateWidget(usel10n=True, bootstrap_version=3))
preferred_hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=False)
# hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True)
emergency_contact = forms.EmailField(label="Emergency Contact Email Address:")
class Meta:
model = Patient
fields = ('birthday', 'sex', 'blood_type', 'height', 'weight', 'allergies', 'medical_history', 'insurance_info',
'emergency_contact', 'preferred_hospital') # , 'hospital')
def __init__(self, *args, **kwargs):
super(PatientRegForm, self).__init__(*args, **kwargs)
self.fields['birthday'].widget.attrs.update({'class': 'form-control'})
self.fields['sex'].widget.attrs.update({'class': 'form-control'})
self.fields['blood_type'].widget.attrs.update({'class': 'form-control'})
self.fields['height'].widget.attrs.update({'class': 'form-control'})
self.fields['weight'].widget.attrs.update({'class': 'form-control'})
self.fields['allergies'].widget.attrs.update({'class': 'form-control'})
self.fields['insurance_info'].widget.attrs.update({'class': 'form-control'})
self.fields['emergency_contact'].widget.attrs.update({'class': 'form-control'})
self.fields['preferred_hospital'].widget.attrs.update({'class': 'form-control'})
self.fields['medical_history'].widget.attrs.update({'class': 'form-control'})
# self.fields['hospital'].widget.attrs.update({'class': 'form-control'})
class NurseRegForm(forms.ModelForm):
"""
Form for Nurse registration
Note: Seperate from user registration form
"""
now = timezone.now()
phoneNum = forms.IntegerField(label="Phone Number")
hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True, label="Hospital")
class Meta:
model = Nurse
fields = ('phoneNum', 'hospital')
class DoctorRegForm(forms.ModelForm):
"""
Form for Doctor registration
Note: Seperate from user registration form
"""
phoneNum = forms.IntegerField(label="Phone Number")
hospital = forms.ModelChoiceField(queryset=Hospital.objects.all(), required=True, label="Hospital")
class Meta:
model = Doctor
fields = ('phoneNum', 'hospital')
class LoginForm(forms.ModelForm):
"""
Form for logging in
"""
class Meta:
model = User
username = forms.CharField(max_length=50)
password = forms.CharField(max_length=50)
fields = ["username", "password"]
class PatientForm(forms.ModelForm):
"""
Form for accessing Patient Data
"""
class Meta:
model = Patient
fields = ['birthday', 'sex', 'height', 'weight', 'allergies', 'medical_history', 'insurance_info',
'emergency_contact', 'preferred_hospital']
class PatientMediForm(forms.ModelForm):
"""
Form for accessing Patient Medical Data
"""
class Meta:
model = Patient
fields = ('sex', 'blood_type', 'height', 'weight', 'allergies', 'medical_history')
class UploadFileForm(forms.Form):
"""
Form for Uploading Files
"""
file = forms.FileField()
class NewHospitalForm(forms.ModelForm):
"""
Form for creating a new Hospital
"""
class Meta:
model = Hospital
fields = ('name',)
```
#### File: HealthNet/core/views.py
```python
from io import TextIOWrapper
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template import RequestContext
from django.views.generic import UpdateView, CreateView, DetailView
from prescriptions.models import Prescription
from .forms import *
from .models import *
def is_patient(user):
"""
Helper function that checks if a user is a patient
:param user: The user to be checked
:return: True if user is a patient
"""
if user:
return user.groups.filter(name='Patient').count() != 0
return False
def is_doctor(user):
"""
Helper function that checks if a user is a doctor
:param user: The user to be checked
:return: True if user is a doctor
"""
if user:
return user.groups.filter(name='Doctor').count() != 0
return False
def is_nurse(user):
"""
Helper function that checks if a user is a nurse
:param user: The user to be checked
:return: True if user is a nurse
"""
if user:
return user.groups.filter(name='Nurse').count() != 0
return False
def is_doctor_or_nurse(user):
"""
Uses above functions combined to fit the @user_passes_test mixin
:param user: The User in question
:return: True if the user is a Doctor or Nurse
"""
return is_doctor(user) or is_nurse(user)
def not_patient(user):
"""
Users is_patient funtion to test if user is of patient type
:param user: The User in question
:return: True if user is not a patient
"""
return not is_patient(user)
def is_admin(user):
"""
Helper function that checks if a user is an admin
:param user: The user to be checked
:return: True if user is an admin
"""
if user:
return user.groups.filter(name='Admin').count() != 0
return False
def user_login(request):
"""
Renders the user login page, and redirects the user to the appropriate landing page
:param request: The request with user information
:return: The page to be rendered
"""
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
# Register Log
log = Log.objects.create_Log(user, user.username, timezone.now(), user.username + " logged in")
log.save()
return HttpResponseRedirect(reverse('landing'))
else:
return HttpResponse("Your Account has been Deactivated")
else:
print("Invalid login: {0}".format(username))
context = RequestContext(request)
context['login_failure'] = True
return render(request, 'core/login.html', context)
else:
return render(request, 'core/login.html', RequestContext(request))
@login_required
def user_logout(request):
"""
Logs out a user, and logs it
:param request: The request with user information
:return: The page to be rendered
"""
# Register Log
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
request.user.username + " logged out")
log.save()
logout(request)
return HttpResponseRedirect(reverse('login'))
@login_required
@user_passes_test(is_patient)
def patient_landing(request):
"""
Renders the patient landing page
:param request: The request with user information
:return: The page to be rendered
"""
return render(request, 'core/landing/Patient.html')
@login_required
def profile(request):
"""
Displays the user Profile Information
:param request: The request with user information
:return: The page to be rendered
"""
parent = get_parent(request)
return render(request, 'core/landing/pages/profile.html', {'parent': parent})
def QueryListtoString(query):
"""
Used to convert Query lists to readable strings, used in the following Medical Information Export function.
:param query: the query to convert
:return: the readable string
"""
ans = ""
for q in query.iterator():
ans = ans + str(q) + '\n'
return ans
def MediInfoExport(Patient_exporting: Patient, assoc_user: User, is_email):
"""
Generic getter for a patient's complete medical information into a readable format in a String
:param Patient_exporting: The Patient exporting their info
:param assoc_user: The Patient's associated User
:param is_email: True if this is being sent in an email (adds greeting), false otherwise
:return: The complete text export
"""
Name = 'Name: ' + str(assoc_user.get_full_name())
Email = 'Email: ' + str(assoc_user.email)
Birthday = 'Birthday: ' + str(Patient_exporting.birthday)
Gender = 'Sex: ' + str(dict(Patient_exporting.SEX_CHOICE)[Patient_exporting.sex])
Blood_Type = 'Blood-Type: ' + str(dict(Patient_exporting.BLOOD_TYPE)[Patient_exporting.blood_type])
Height = 'Height: ' + str(Patient_exporting.height)
Weight = 'Weight: ' + str(Patient_exporting.weight) + ' lbs'
Allergies = 'Allergies: \r\n' + str(Patient_exporting.allergies)
Medical_History = 'Medical-History: \r\n' + str(Patient_exporting.medical_history)
Prescriptions = 'Prescriptions: \r\n' + \
str(QueryListtoString(Prescription.objects.all().filter(patient=Patient_exporting)))
Insurance_Info = 'Insurance-Info: ' + str(Patient_exporting.insurance_info)
Preferred_Hospital = 'Preferred-Hospital: ' + str(Patient_exporting.preferred_hospital)
PHospital = 'Current-Hospital: ' + str(Patient_exporting.hospital)
Emergency_Contact = 'Emergency-Contact: ' + str(Patient_exporting.emergency_contact)
ans = Name + '\r\n' + \
Email + '\r\n' + \
Birthday + '\r\n' + \
Gender + '\r\n' + \
Blood_Type + '\r\n' + \
Height + '\r\n' + \
Weight + '\r\n\r\n' + \
Allergies + '\r\n\r\n' + \
Medical_History + '\r\n\r\n' + \
Prescriptions + '\r\n\r\n' + \
Insurance_Info + '\r\n' + \
Preferred_Hospital + '\r\n' + \
PHospital + '\r\n' + \
Emergency_Contact + '\r\n'
if is_email:
return 'Hello ' + str(assoc_user.first_name) + \
', \n\n\tYou are receiving this email as an export of your medical information from ' + \
str(Patient_exporting.hospital) + '. Below you\'ll find the medical record export. ' \
'Thank you for using HealthNet!\n\n' + ans
return ans
@login_required
@user_passes_test(is_patient)
def email(request):
"""
Sends the patient an email with a full summary of their medical information.
:param request: The request with user information
:return: The success landing page
"""
Pat = Patient.objects.all().get(user=request.user)
if request.user.email_user('Medical Information Export: ' + request.user.get_full_name(),
MediInfoExport(Pat, request.user, True),
'<EMAIL>',
fail_silently=True,
):
return render(request, 'core/landing/pages/email_success.html')
else:
return render(request, 'core/landing/pages/profile.html', {'parent': get_parent(request)})
@login_required
@user_passes_test(is_patient)
def download(request):
"""
Serves patients full summary as a downloadable text file.
:param request: The request with user information
:return: Downloadable text file, in lieu of a conventional response
"""
Pat = Patient.objects.all().get(user=request.user)
content = MediInfoExport(Pat, request.user, False)
response = HttpResponse(content, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename="%s_Info.txt"' % \
str(request.user.get_full_name()).replace(' ', '-')
return response
def listtostring(listin):
"""
Converts a simple list into a space separated sentence, effectively reversing str.split(" ")
:param listin: the list to convert
:return: the readable string
"""
ans = ""
for l in listin:
ans = ans + str(l) + " "
return ans.strip()
def read_new_Patient(filename, encoding, doctor_user):
"""
Reads in a new Patient from the specific file, assumes that the patient instance already exists and is associated
with an existing user, but not necessarily populated.
:param doctor_user: User of Doctor signing off on Patient import, used when constructing Prescriptions
:param filename: Name of the file to read from
:param encoding: UTF-8, ANSI, etc etc
:return: The newly populated Patient class (after its been saved)
"""
# print("reading new patient...")
file = TextIOWrapper(filename.file, encoding=encoding)
new_patient = None
Allergies_mode = False
Prescriptions_mode = False
Medical_History_mode = False
Allergies = ''
Medical_History = ''
Prescriptions = []
for line in file.readlines():
print("Line: " + line)
words = line.strip().split(" ")
print(words)
instance_var = words[0]
# print("Current variable is " + instance_var)
if Allergies_mode:
if line.strip() != '':
# print('found allergy: ' + line.strip())
Allergies = Allergies + line.strip()
else:
# print('And that\'s it for allergies')
Allergies_mode = False
new_patient.allergies = Allergies
elif Medical_History_mode:
if line.strip() != '':
# print('found medical history: ' + line.strip())
Medical_History = Medical_History + line.strip()
else:
# print('And that\'s it for medical history')
Medical_History_mode = False
new_patient.medical_history = Medical_History
elif Prescriptions_mode:
if line.strip() != '':
# print('found prescription: ' + line.strip())
Prescriptions.append(line.strip())
else:
# print('And that\'s it for prescriptions')
Prescriptions_mode = False
for p in Prescriptions:
Prescription.fromString(p, new_patient.id, doctor_user)
if instance_var == 'Email:':
Email = words[1]
print("found email: " + Email)
user = User.objects.get(email=Email)
new_patient = Patient.objects.get(user=user)
print(new_patient)
elif instance_var == 'Birthday:':
print("found b-day: " + words[1])
new_patient.birthday = words[1]
elif instance_var == 'Sex:':
print("found sex: " + words[1])
new_patient.sex = words[1]
elif instance_var == 'Blood-Type:':
print("found b-type: " + words[1])
new_patient.blood_type = words[1]
elif instance_var == 'Height:':
print("found height: " + words[1])
new_patient.height = words[1]
elif instance_var == 'Weight:':
print("found weight: " + words[1])
new_patient.weight = words[1]
elif instance_var == 'Allergies:':
print("found Allergies")
Allergies_mode = True
elif instance_var == 'Medical-History::':
print("found Medical History")
Medical_History_mode = True
elif instance_var == 'Prescriptions:':
print("found prescriptions")
Prescriptions_mode = True
elif instance_var == 'Insurance-Info:':
insurance = listtostring(words[1:])
print("found Insurance: " + insurance)
new_patient.insurance_info = insurance
elif instance_var == 'Preferred-Hospital:':
p_hospital = listtostring(words[1:])
print("found hospital: " + p_hospital)
new_patient.preferred_hospital = Hospital.objects.get(name=p_hospital)
elif instance_var == 'Emergency-Contact:':
print("found e-contact: " + words[1])
new_patient.emergency_contact = words[1]
# elif instance_var == 'Current-Hospital:':
# c_hospital = listtostring(words[1:])
# print("found hospital: " + c_hospital)
# new_patient.hospital = Hospital.objects.get(name=c_hospital)
return new_patient.save()
@login_required
@user_passes_test(is_doctor_or_nurse)
def upload_patient_info(request):
"""
View for uploading a text file with pateint information
:param request: request with possible file upload
:return: the current page again with possible confirmation
"""
uploaded = False
if request.method == 'POST':
attempted = True
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
read_new_Patient(request.FILES['file'], request.encoding, request.user)
uploaded = True
else:
uploaded = False
else:
attempted = False
form = UploadFileForm()
return render(request, 'core/upload_patient.html', {'form': form, 'attempted': attempted,
'upload_success': uploaded})
@login_required
@user_passes_test(is_nurse)
def nurse_landing(request):
"""
Renders the patient landing page
:param request: The request with user information
:return: The page to be rendered
"""
return render(request, 'core/landing/Nurse.html')
@login_required
@user_passes_test(is_doctor)
def doctor_landing(request):
"""
Renders the doctor landing page
:param request: The request with user information
:return: The page to be rendered
"""
return render(request, 'core/landing/Doctor.html')
@login_required
@user_passes_test(is_admin)
def admin_landing(request):
"""
Renders the admin landing page
:param request: The request with user information
:return: The page to be rendered
"""
return render(request, 'core/landing/Admin.html')
@login_required
@user_passes_test(is_admin)
def registerStaff(request):
"""
Renders the registration selection page
:param request: The request with user information
:return: The page to be rendered
"""
return render(request, 'core/landing/pages/registration_select.html')
@login_required
@user_passes_test(is_admin)
def register_nurse_page(request):
"""
Registers a nurse as well as it's one-to-one user. Error checks fields, and ensures that all data is valid before
creation
:param request: The request with user information
:return: The page to be rendered
"""
registered = False
if request.method == 'POST':
user_form = UserRegForm(data=request.POST)
other_form = NurseRegForm(data=request.POST)
if user_form.is_valid() and other_form.is_valid():
group = Group.objects.get(name='Nurse')
user = user_form.save()
if User.objects.all().filter(email=user.email).count() > 0:
# throw an error, this became an issue
pass
user.set_password(<PASSWORD>)
user.groups.add(group)
user.save()
nprofile = other_form.save(commit=False)
nprofile.user = user
nprofile.save()
# Register Log
log = Log.objects.create_Log(nprofile.user, nprofile.user.username, timezone.now(), "Nurse Registered")
log.save()
registered = True
# else:
# print("Error")
# print(user_form.errors, other_form.errors)
else:
user_form = UserRegForm()
other_form = NurseRegForm()
return render(request, "core/landing/pages/registrationPages/staff_registration.html",
{'user_form': user_form, 'other_form': other_form, 'registered': registered, 'stafftype': "nurse"})
@login_required
@user_passes_test(is_admin)
def register_doctor_page(request):
"""
Registers a doctor as well as it's one-to-one user. Error checks fields, and ensures that all data is valid before
creation
:param request: The request with user information
:return: The page to be rendered
"""
registered = False
if request.method == 'POST':
user_form = UserRegForm(data=request.POST)
other_form = DoctorRegForm(data=request.POST)
if user_form.is_valid() and other_form.is_valid():
group = Group.objects.get(name='Doctor')
user = user_form.save()
if User.objects.all().filter(email=user.email).count() > 0:
# throw an error, this became an issue
pass
user.set_password(<PASSWORD>)
user.groups.add(group)
user.save()
dprofile = other_form.save(commit=False)
dprofile.user = user
dprofile.save()
# Register Log
log = Log.objects.create_Log(dprofile.user, dprofile.user.username, timezone.now(), "Doctor Registered")
log.save()
registered = True
# else:
# print("Error")
# print(user_form.errors, other_form.errors)
else:
user_form = UserRegForm()
other_form = DoctorRegForm()
return render(request, "core/landing/pages/registrationPages/staff_registration.html",
{'user_form': user_form, 'other_form': other_form, 'registered': registered, 'stafftype': "doctor"})
@login_required
@user_passes_test(is_admin)
def register_admin_page(request):
"""
Registers a admin as well as it's one-to-one user. Error checks fields, and ensures that all data is valid before
creation
:param request: The request with user information
:return: The page to be rendered
"""
registered = False
if request.method == 'POST':
user_form = UserRegForm(data=request.POST)
if user_form.is_valid():
group = Group.objects.get(name='Admin')
user = user_form.save()
if User.objects.all().filter(email=user.email).count() > 0:
# throw an error, this became an issue
pass
user.set_password(<PASSWORD>)
user.groups.add(group)
user.save()
sadmin = Admin.objects.create(user=user)
sadmin.save()
# Register Log
log = Log.objects.create_Log(sadmin.user, sadmin.user.username, timezone.now(), "Admin Registered")
log.save()
registered = True
# else:
# print("Error")
# print(user_form.errors)
else:
user_form = UserRegForm()
return render(request, "core/landing/pages/registrationPages/admin_registration.html",
{'user_form': user_form, 'registered': registered, 'stafftype': "admin"})
@login_required
def landing(request):
"""
Renders the landing page
:param request: The request with user information
:return: The page to be rendered
"""
context = {}
parent = get_parent(request)
if 'Patient' in parent:
context['Patient_ID'] = Patient.objects.all().get(user=request.user).id
print("Patient! id is " + str(context['Patient_ID']))
return render(request, 'core/landing/baselanding.html', context)
def register_patient_page(request):
"""
Registers a patient as well as it's one-to-one user. Error checks fields, and ensures that all data is valid before
creation
:param request: The request with user information
:return: The page to be rendered
"""
registered = False
context = {}
if request.method == 'POST':
user_form = UserRegForm(data=request.POST)
patient_form = PatientRegForm(data=request.POST)
if user_form.is_valid() and patient_form.is_valid():
group = Group.objects.get(name='Patient')
user = user_form.save()
user.set_password(user.password)
user.groups.add(group)
user.save()
pprofile = patient_form.save(commit=False)
pprofile.user = user
if User.objects.filter(email=pprofile.emergency_contact).exists():
pprofile.emergency_contact_user = User.objects.get(email=pprofile.emergency_contact)
pprofile.hospital = pprofile.preferred_hospital
pprofile.save()
# Register Log
log = Log.objects.create_Log(pprofile.user, pprofile.user.username, timezone.now(), "Patient Registered")
log.save()
registered = True
else:
print("Error")
print(user_form.errors, patient_form.errors)
else:
user_form = UserRegForm()
patient_form = PatientRegForm()
context['user_form'] = user_form
context['patient_form'] = patient_form
context['registered'] = registered
return render(request, "core/registerpatient.html", context)
def main(request):
"""
Renders the main page
:param request: The request with user information
:return: The page to be rendered
"""
# return render(request, 'core/main/base.html')
return render(request, 'core/main/homepage.html')
@login_required
def patient_tests(request):
"""
Renders the patient test page, nto yet implemented
:param request: The request with user information
:return: The page to be rendered
"""
return redirect(reverse('results_home'))
@login_required
def editownprofile(request):
"""
Allows user to update their profile information, plus certain User info
:param request: The request with possible form info
:return: The Edit page
"""
parent = get_parent(request)
person = None
if 'Patient' in parent:
person = Patient.objects.get(user=request.user)
elif 'Doctor' in parent:
person = Doctor.objects.get(user=request.user)
elif 'Nurse' in parent:
person = Nurse.objects.get(user=request.user)
if request.method == 'POST':
user_form = UserUpdateForm(data=request.POST, instance=request.user)
person_form = None
if 'Patient' in parent:
person_form = PatientForm(data=request.POST, instance=person)
elif 'Doctor' in parent:
person_form = DoctorRegForm(data=request.POST, instance=person)
elif 'Nurse' in parent:
person_form = NurseRegForm(data=request.POST, instance=person)
if user_form.is_valid():
user = user_form.save()
if person_form is not None and person_form.is_valid():
pprofile = person_form.save(commit=False)
pprofile.user = user
if person is Patient and User.objects.filter(email=pprofile.emergency_contact).exists():
pprofile.emergency_contact_user = User.objects.get(email=pprofile.emergency_contact)
pprofile.save()
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
"User updated own info")
log.save()
else:
# print("Error")
# print(user_form.errors, person_form.errors)
pass
return render(request, 'core/landing/pages/profile.html', {'parent': parent})
else:
user_form = UserUpdateForm(instance=request.user)
person_form = None
if 'Patient' in parent:
person_form = PatientForm(instance=person)
elif 'Doctor' in parent:
person_form = DoctorRegForm(instance=person)
elif 'Nurse' in parent:
person_form = NurseRegForm(instance=person)
return render(request, 'core/landing/pages/edit_profile.html',
{'user_form': user_form, 'patient_form': person_form})
class EditPatientMediInfo(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
"""
EditPatientMediInfo extends UpdateView, which is the generic class for editing preexisting objects
This allows for a user to change their information
"""
model = Patient
template_name = 'core/edit_medi_info.html'
form_class = PatientMediForm
def get_object(self, queryset=None):
p_id = self.kwargs['patient_id']
patient = Patient.objects.get(pk=p_id)
log = Log.objects.create_Log(self.request.user, self.request.user.username, timezone.now(), "Patient(\"" +
patient.user.get_full_name() + "\", id " + p_id + ") Medical Info updated")
log.save()
return patient
def test_func(self):
return is_doctor(self.request.user)
@login_required
@user_passes_test(not_patient)
def view_patients(request):
"""
Simple view for fetching all the patients in the system onto a list and viewing them by name
:param request: The request
:return: rendered list page with patient context
"""
labels = ["Name", "Email", "Birthday", "Hospital", "Admission Status"]
context = {"Patients": Patient.objects.all(), "Labels": labels}
return render(request, 'core/view_patients.html', context)
@login_required
@user_passes_test(is_admin)
def logs(request):
"""
Shows all log objects
:param request: The request with user information
:return: The page to be rendered
"""
Logs = Log.objects.order_by('time').reverse()
return render(request, 'core/logs.html', {'logs': Logs})
def get_parent(request):
"""
A helper method that returns the appropriate parent for the designated user type
:param request: The request with user information
:return: The parent that a template will extend
"""
parent = 'core/landing/Patient.html'
if request.user.groups.filter(name='Doctor').exists():
parent = 'core/landing/Doctor.html'
elif request.user.groups.filter(name='Nurse').exists():
parent = 'core/landing/Nurse.html'
elif request.user.groups.filter(name='Admin').exists():
parent = 'core/landing/Admin.html'
return parent
def swag(request):
return render(request, 'core/landing/pages/registrationPages/swag.html')
@login_required
@user_passes_test(is_doctor_or_nurse)
def admitPatient(request, patient_id):
"""
Allows Doctors and Nurses to admnit existing Patients
:param request: self explanatory
:param patient_id: ID number of the Patient to admit
:return: view_patients list, after Patient has been admitted
"""
patient = Patient.objects.get(pk=patient_id)
if patient.admitted:
patient.admitted = False
else:
patient.admitted = True
patient.save()
labels = ["Name", "Email", "Birthday", "Hospital", "Admission Status"]
context = {"Patients": Patient.objects.all(), "Labels": labels}
return render(request, 'core/view_patients.html', context)
class NewHospital(CreateView, UserPassesTestMixin, LoginRequiredMixin):
"""
View class for Hospital admins to make new hospital instances.
"""
model = Hospital
template_name = 'core/new_hospital.html'
form_class = NewHospitalForm
success_url = reverse_lazy('landing')
def test_func(self):
return is_admin(self.request.user)
class ViewPatientMediInfo(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Viewer for patient Medical Info by nurses or Doctors
"""
model = Patient
template_name = 'core/view_medi_info.html'
def get_context_data(self, **kwargs):
context = super(ViewPatientMediInfo, self).get_context_data(**kwargs)
p_id = self.kwargs['patient_id']
context['Patient'] = Patient.objects.get(pk=p_id)
context['is_doctor'] = is_doctor(self.request.user)
return context
def get_object(self, queryset=None):
p_id = self.kwargs['patient_id']
patient = Patient.objects.get(pk=p_id)
log = Log.objects.create_Log(self.request.user, self.request.user.username, timezone.now(), "Patient(\"" +
patient.user.get_full_name() + "\", id " + p_id + ") Medical Info viewed")
log.save()
return patient
def test_func(self):
return is_doctor_or_nurse(self.request.user)
```
#### File: HealthNet/messaging/models.py
```python
from django.db import models
from django.contrib.auth.models import User
class MessageManager:
"""
Message Manager that helps create Messages without too much code
"""
def createMessage(self, sender, recipient, text, subject):
message = self.createMessage(sender=sender, recipient=recipient, text=text, subject=subject)
return message
class Message(models.Model):
"""
Message Model that creates a messsage object, using the sender, recipient, text, subject of the message
and stores it in the database
"""
sender = models.ForeignKey(User, related_name='sender')
recipient = models.ForeignKey(User, related_name='recipient')
text = models.CharField(max_length=1000)
subject = models.CharField(max_length=100, null=False)
date = models.DateTimeField()
viewed = models.BooleanField(default=False)
objects = MessageManager()
def __str__(self):
return self.subject
```
#### File: HealthNet/messaging/views.py
```python
from django.contrib.auth.models import User
from django.shortcuts import render
from .models import Message
from .forms import MessageForm
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import UpdateView, DeleteView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
@login_required
def index(request):
"""
displays the main page of the messaging system
:param request: Self explanatory
:return: render containing the html page and all the messages for the user
"""
messages = Message.objects.filter(recipient=request.user).order_by('date').reverse()
return render(request, "messages_main.html", {'messages': messages})
@login_required
def createMessage(request):
"""
Creates a message that can be sent to other users
:param request: Self explanatory
:return: render containing the html page and the info needed for the message to be sent
"""
if request.method == 'POST':
message_form = MessageForm(request.POST)
if message_form.is_valid():
message = message_form.save(commit=False)
message.date = timezone.now()
message.sender = request.user
message_form.save()
return HttpResponseRedirect(reverse_lazy('messages_home'))
else:
message_form = MessageForm()
message_form.fields['recipient'].queryset=User.objects.all().exclude(pk=request.user.id)
return render(request, 'messages_create.html', {'message_form': message_form})
class UpdateMessage(LoginRequiredMixin, UpdateView):
"""
Allows for messages to be edited
"""
model = Message
template_name = 'messages_edit.html'
form_class = MessageForm
success_url = reverse_lazy('messages_home')
class DeleteMessage(LoginRequiredMixin, DeleteView):
"""
Allows for messages to be deleted
"""
model = Message
template_name = 'messages_delete.html'
success_url = reverse_lazy('messages_home')
```
#### File: HealthNet/prescriptions/models.py
```python
from django.db import models
from django.utils import timezone
from core.models import Patient, Doctor
class Prescription(models.Model):
"""
Defines a Prescription
"""
patient = models.ForeignKey(Patient, verbose_name='Patient')
doctor = models.ForeignKey(Doctor, verbose_name="Doctor")
date_prescribed = models.DateTimeField()
drug = models.CharField(max_length=500)
dosage = models.DecimalField(max_digits=6, decimal_places=3)
DOSE_CHOICES = (
("mg", "milligrams"),
("ug", "micrograms"),
("pg", "picograms"),
("g", "grams")
)
Dose_units = models.CharField(max_length=10, choices=DOSE_CHOICES, default="mg")
rate = models.PositiveSmallIntegerField()
TIME_CHOICES = (
("/D", "per Day"),
("/h", "per Hour"),
("/W", "per Week"),
("/M", "per Month")
)
Time_units = models.CharField(max_length=9, choices=TIME_CHOICES, default="/D")
def __str__(self):
return str(self.patient) + ": " + str(self.rate) + " dose(s) of " + str(self.dosage) + " " + \
str(dict(self.DOSE_CHOICES)[self.Dose_units]) + " of " + str(self.drug) + " " + \
str(dict(self.TIME_CHOICES)[self.Time_units])
@classmethod
def fromString(cls, Prescription_string, patient_id, doctor_user):
"""
Constructs an instance of a Prescription by reading one of its own __str__ exports.
:param Prescription_string: String representation of the Prescription
:param patient_id: ID of the patient being prescribed to
:param doctor_user: User of Doctor who is signing off on this import.
:return: Saved Prescription
"""
words = Prescription_string.split(" ")
if len(words) != 11:
print("Cannot make prescription from String [" + Prescription_string + "]")
return None
patient = Patient.objects.get(pk=patient_id)
rate = int(words[2])
dosage = float(words[5])
Dose_units = "mg"
Dose_units_verbose = words[6]
for d in Prescription.DOSE_CHOICES:
if d[1] == Dose_units_verbose:
Dose_units = d[0]
drug = words[8]
Time_units = "/D"
Time_units_verbose = words[9] + " " + words[10]
for t in Prescription.TIME_CHOICES:
if t[1] == Time_units_verbose:
Time_units = t[0]
tosave = cls(patient=patient, doctor=Doctor.objects.get(user=doctor_user), date_prescribed=timezone.now(),
drug=drug, dosage=dosage, Dose_units=Dose_units, rate=rate, Time_units=Time_units)
tosave.save()
return tosave
```
#### File: HealthNet/prescriptions/views.py
```python
from django.shortcuts import redirect
from .forms import PrescriptionForm
from core.views import is_doctor, is_nurse, is_admin, is_patient
from core.models import *
from .models import Prescription
from django.contrib.auth.decorators import login_required, user_passes_test
from django.utils import timezone
from django.shortcuts import render
from django.core.urlresolvers import reverse
def not_admin(user):
"""
:param user: The User in question
:return: True if the user is anything but an Admin
"""
return not is_admin(user)
def is_doctor_or_nurse(user):
"""
:param user: The User in question
:return: True if the user is a Doctor or Nurse
"""
return is_doctor(user) or is_nurse(user)
@login_required
@user_passes_test(is_doctor)
def new_prescription(request):
"""
Page for the form a doctor fills out to prescribe a drug
:param request: the request with possible form submission
:return: Prescription form or redirect to listing page (below)
"""
if request.method == 'POST':
prescription_form = PrescriptionForm(data=request.POST)
validity = prescription_form.is_valid()
if validity:
prescription = prescription_form.save(commit=False)
prescription.date_prescribed = timezone.now()
prescription.doctor = Doctor.objects.all().get(user=request.user)
prescription.save()
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
"Prescription filled out")
log.save()
else:
print("Error")
print(prescription_form.errors)
if 'submit_singular' in request.POST and validity:
return redirect('prescriptions')
elif 'submit_another' in request.POST:
prescription_form = PrescriptionForm()
else:
prescription_form = PrescriptionForm()
context = {"prescription_form": prescription_form}
return render(request, 'prescriptions/makenew.html', context)
def get_prescription_list_for(cpatient):
"""
Generic getter for a specific patient's prescription list
:param cpatient: Patient to fetch list for
:return: context of Prescription list
"""
Prescriptions = Prescription.objects.all().filter(patient=cpatient)
per = []
for p in Prescriptions.iterator():
per.append(str(dict(p.TIME_CHOICES)[p.Time_units]))
p_list = zip(Prescriptions, per)
return {"Labels": ["Doctor", "Drug", "Dosage", "Rate"], "Name": str(cpatient), "Prescriptions": p_list}
@login_required
@user_passes_test(not_admin)
def prescriptions(request):
"""
Lists either all patients in the hospital with links to their prescription lists, or the prescriptions applied to a
single defined patient.
:param request: The request sent in, not used here
:return: List page rendering
"""
context = {}
if is_doctor(request.user) or is_nurse(request.user):
context["Labels"] = ["Name", "Prescriptions"]
patients = Patient.objects.all()
prescription_nums = []
for pat in patients.iterator():
prescription_nums.append(Prescription.objects.filter(patient=pat).count())
context["Patients"] = zip(patients, prescription_nums)
elif is_patient(request.user):
cpatient = Patient.objects.get(user=request.user)
context = get_prescription_list_for(cpatient)
context["is_doctor"] = is_doctor(request.user)
context["is_doctor"] = is_doctor(request.user)
return render(request, 'prescriptions/list.html', context)
@login_required
@user_passes_test(is_doctor_or_nurse)
def prescriptions_list(request, patient_id):
"""
Page that doctors and nurses are sent to when accessing a single patient's prescription list.
:param request: The request sent in, not used here
:param patient_id: ID of the patient who's being listed
:return: List page rendering
"""
cpatient = Patient.objects.get(pk=patient_id)
context = get_prescription_list_for(cpatient)
context["is_doctor"] = is_doctor(request.user)
return render(request, 'prescriptions/list.html', context)
@login_required
@user_passes_test(is_doctor)
def delete_prescription(request, prescription_id):
"""
Page for confirming/deleting a single prescription
:param request: The request sent in, not used here
:param prescription_id: ID number of the prescription in question
:return: Redirect or confirmation page
"""
prescription = Prescription.objects.get(pk=prescription_id)
patient_id = prescription.patient.id
if request.method == 'POST':
prescription.delete()
return redirect(reverse('list prescriptions for patient', kwargs={'patient_id': patient_id}))
context = {"Prescription": prescription, 'patient_id': patient_id}
return render(request, 'prescriptions/delete.html', context)
```
#### File: HealthNet/sysstats/tests.py
```python
from django.core.urlresolvers import reverse
from django.test import TestCase
from core.models import *
class TheOnlyTestThisNeeds(TestCase):
"""
The only test that this needs
"""
@classmethod
def setUpTestData(cls):
Group.objects.create(name='Patient')
Group.objects.create(name='Doctor')
Group.objects.create(name='Nurse')
Group.objects.create(name='Admin')
cls.hospital = Hospital.objects.create(name="Hospital1")
cls.P_user = User.objects.create(username="TestPatient", password="password", email="<EMAIL>",
first_name="PTest", last_name="LastName")
cls.N_user = User.objects.create(username="TestNurse", password="password", email="<EMAIL>",
first_name="NTest", last_name="LastName")
cls.D_user = User.objects.create(username="TestDoctor", password="password", email="<EMAIL>",
first_name="DTest", last_name="LastName")
cls.A_user = User.objects.create(username="TestAdmin", password="password", email="<EMAIL>",
first_name="ATest", last_name="LastName")
cls.patient = Patient.objects.create(user=cls.P_user, birthday="1980-1-1", sex="M", blood_type="B-",
height="65", weight="180", allergies="Pollen",
medical_history="Type 2 Diabetes", insurance_info="Geico",
hospital=cls.hospital, emergency_contact="<EMAIL>"
)
# print(cls.P_user.groups)
cls.nurse = Nurse.objects.create(user=cls.N_user, hospital=cls.hospital, phoneNum="2408937770")
# print(cls.N_user.groups)
cls.doctor = Doctor.objects.create(user=cls.D_user, hospital=cls.hospital, phoneNum="4438483228")
# print(cls.D_user.groups)
cls.admin = Admin.objects.create(user=cls.A_user)
# print(cls.A_user.groups)
def test_denying_nologinuser(self):
response = self.client.get(reverse('statistics'), follow=True)
self.assertRedirects(response, '/login/?next=/stats/')
response = self.client.post(reverse('statistics'), follow=True)
self.assertRedirects(response, '/login/?next=/stats/')
def test_valid_call(self):
self.client.login(username='TestAdmin', password='password')
response = self.client.get(reverse('statistics'))
self.assertTrue(response.status_code < 400)
```
#### File: HealthNet/testResults/views.py
```python
from django.contrib.auth.decorators import login_required, user_passes_test
from django.shortcuts import render
from core.views import is_patient, is_doctor
from .forms import ResultForm
from .models import Results
from core.models import Log, Patient
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import UpdateView, DeleteView
from django.utils import timezone
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
def is_doctor_or_patient(user):
"""
Checks if user logged in is of type doctor or patient
:param user: user logged in
:return: True if user is a doctor or patient
"""
return is_doctor(user) or is_patient(user)
@login_required
@user_passes_test(is_doctor_or_patient)
def index(request):
"""
displays the main page of the test results system
:param request: Self explanatory
:return: render containing the html page and all the tests for the user
"""
results = Results.objects.order_by('date').reverse()
return render(request, "results_main.html", {'results': results})
@login_required
@user_passes_test(is_doctor)
def createResult(request):
"""
Creates a Test Result that can be released to a specific patient
:param request: Self explanatory
:return: render containing the html page and the info needed for the test result
"""
if request.method == 'POST':
results_form = ResultForm(request.POST, request.FILES)
if results_form.is_valid():
result = results_form.save(commit=False)
result.doctor = request.user
result.date = timezone.now()
result.file = request.FILES['files']
results_form.save()
# Register Log
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
request.user.username + " created Test Result")
log.save()
return HttpResponseRedirect(reverse_lazy('results_home'))
else:
results_form = ResultForm()
return render(request, 'results_create.html', {'results_form' : results_form})
class UpdateTest(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
"""
Allows for edits to be made to the Test Result
"""
model = Results
template_name = 'results_edit.html'
form_class = ResultForm
success_url = reverse_lazy('results_home')
def test_func(self):
return is_doctor(self.request.user)
class DeleteTest(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
"""
Allows for the test result to be deleted
"""
model = Results
template_name = 'results_delete.html'
success_url = reverse_lazy('results_home')
def test_func(self):
return is_doctor(self.request.user)
@login_required
@user_passes_test(is_patient)
def view_for_patient(request):
"""
Display specifically for patients as to make sure they can't create tests themselves
:param request: Self explanatory
:return: render containing the html page and all the tests for the patient
"""
patient = Patient.objects.all().get(user=request.user)
results = Results.objects.all().filter(patient=patient).filter(released=True).order_by('date').reverse()
print(str(results))
return render(request, "results_main.html", {'results': results})
``` |
{
"source": "JIM-GLITCH/ant-simulator",
"score": 3
} |
#### File: JIM-GLITCH/ant-simulator/runtest-searchmap.py
```python
import subprocess
mapnumber=4
antComAlgorithm=1
numAnts=100
mapMaxX =160
mapMaxY =320
trial=1
TEST='searchmap'
def run_one_test(trial):
print("map {} algrithm {} trial {}: running..".format(mapnumber,antComAlgorithm,trial))
taskFinishedTime=subprocess.call(['love','./', str(mapnumber),str(antComAlgorithm),str(numAnts),str(mapMaxX),str(mapMaxY),TEST,str(trial) ], shell=True)
# with open("map{}_algorithm{}_ants{}.txt".format(mapnumber,antComAlgorithm,numAnts),'a') as f:
# f.write('{}\n'.format(taskFinishedTime))
return taskFinishedTime
for mapMaxX,mapMaxY in [(160,320),(320,320),(320,800),(640,800)]:
# for numAnts in [800,400,200,100]:
# for trial in range(1,10+1):
run_one_test(trial)
``` |
{
"source": "jimgong92/allezViens",
"score": 3
} |
#### File: jimgong92/allezViens/communication.py
```python
from flask.ext.mail import Mail, Message
from run import mail
#Sends email notifying user about posted route
def sendValidationEmail(to, url):
header = 'Allez Viens Validation'
sender = '<EMAIL>'
replyTo = '<EMAIL>'
url = 'allez-viens.herokuapp.com/trip/' + url
body = "Please click <a href='" + url + "'>this link</a> to validate and edit your route.</br> If you did not request this, please disregard this email."
sendEmail([to], replyTo, sender, header, body)
#Sends email notifying user about pick
def sendPickNotificationEmail(to, replyTo, url):
header = 'Allez Viens User Contacted You'
sender = '<EMAIL>'
url = 'allez-viens.herokuapp.com/trip/' + url
body = "A user at Allez Viens has expressed interest in riding with you regarding <a href='" + url + "'>this route.</a> <br><br>Replying to this message will reply directly to the user."
sendEmail([to], replyTo, sender, header, body)
#Sends email via smtp service using template
def sendEmail(to, replyTo, sender, header, body):
msg = Message(
header,
recipients=to,
reply_to = replyTo,
sender = sender
)
msg.body = "body"
msg.html = body
mail.send(msg)
#For Development, uncomment to use function from command line
# with app.app_context():
# mail.send(msg)
``` |
{
"source": "jimgoo/auto-sklearn",
"score": 2
} |
#### File: components/regression/libsvm_svr.py
```python
import resource
import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.conditions import InCondition
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LibSVM_SVR(AutoSklearnRegressionAlgorithm):
def __init__(self, kernel, C, epsilon, tol, shrinking, gamma=0.1,
degree=3, coef0=0.0, verbose=False,
max_iter=-1, random_state=None):
self.kernel = kernel
self.C = C
self.epsilon = epsilon
self.tol = tol
self.shrinking = shrinking
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
self.estimator = None
def fit(self, X, Y):
import sklearn.svm
try:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft > 0:
soft /= 1024 * 1024
maxrss = resource.getrusage(resource.RUSAGE_SELF)[2] / 1024
cache_size = (soft - maxrss) / 1.5
else:
cache_size = 200
except Exception:
cache_size = 200
self.C = float(self.C)
self.epsilon = float(self.epsilon)
self.tol = float(self.tol)
self.shrinking = self.shrinking == 'True'
self.degree = int(self.degree)
self.gamma = float(self.gamma)
if self.coef0 is None:
self.coef0 = 0.0
else:
self.coef0 = float(self.coef0)
self.verbose = int(self.verbose)
self.max_iter = int(self.max_iter)
self.estimator = sklearn.svm.SVR(
kernel=self.kernel,
C=self.C,
epsilon=self.epsilon,
tol=self.tol,
shrinking=self.shrinking,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
cache_size=cache_size,
verbose=self.verbose,
max_iter=self.max_iter
)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
self.scaler.fit(Y.reshape((-1, 1)))
Y_scaled = self.scaler.transform(Y.reshape((-1, 1))).ravel()
self.estimator.fit(X, Y_scaled)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
if self.scaler is None:
raise NotImplementedError
Y_pred = self.estimator.predict(X)
return self.scaler.inverse_transform(Y_pred)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'SVR',
'name': 'Support Vector Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'prefers_data_normalized': True,
'is_deterministic': True,
'input': (SPARSE, DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# Copied from libsvm_c
C = UniformFloatHyperparameter(
name="C", lower=0.03125, upper=32768, log=True, default=1.0)
kernel = CategoricalHyperparameter(
name="kernel", choices=['linear', 'poly', 'rbf', 'sigmoid'],
default="rbf")
degree = UniformIntegerHyperparameter(
name="degree", lower=1, upper=5, default=3)
# Changed the gamma value to 0.0 (is 0.1 for classification)
gamma = UniformFloatHyperparameter(
name="gamma", lower=3.0517578125e-05, upper=8, log=True, default=0.1)
# TODO this is totally ad-hoc
coef0 = UniformFloatHyperparameter(
name="coef0", lower=-1, upper=1, default=0)
# probability is no hyperparameter, but an argument to the SVM algo
shrinking = CategoricalHyperparameter(
name="shrinking", choices=["True", "False"], default="True")
tol = UniformFloatHyperparameter(
name="tol", lower=1e-5, upper=1e-1, default=1e-3, log=True)
max_iter = UnParametrizedHyperparameter("max_iter", -1)
# Random Guess
epsilon = UniformFloatHyperparameter(name="epsilon", lower=0.001,
upper=1, default=0.1, log=True)
cs = ConfigurationSpace()
cs.add_hyperparameters([C, kernel, degree, gamma, coef0, shrinking,
tol, max_iter, epsilon])
degree_depends_on_kernel = InCondition(child=degree, parent=kernel,
values=('poly', 'rbf', 'sigmoid'))
gamma_depends_on_kernel = InCondition(child=gamma, parent=kernel,
values=('poly', 'rbf'))
coef0_depends_on_kernel = InCondition(child=coef0, parent=kernel,
values=('poly', 'sigmoid'))
cs.add_conditions([degree_depends_on_kernel, gamma_depends_on_kernel,
coef0_depends_on_kernel])
return cs
```
#### File: test/test_metric/test_metrics.py
```python
import unittest
import numpy as np
import sklearn.metrics
import autosklearn.metrics.classification_metrics
class TestScorer(unittest.TestCase):
def test_predict_scorer_binary(self):
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
scorer = autosklearn.metrics._PredictScorer(
'accuracy', sklearn.metrics.accuracy_score, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
scorer = autosklearn.metrics._PredictScorer(
'bac', autosklearn.metrics.classification_metrics.balanced_accuracy,
1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
scorer = autosklearn.metrics._PredictScorer(
'accuracy', sklearn.metrics.accuracy_score, -1, {})
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0)
def test_predict_scorer_multiclass(self):
y_true = np.array([0, 1, 2])
y_pred = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
scorer = autosklearn.metrics._PredictScorer(
'accuracy', sklearn.metrics.accuracy_score, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
y_pred = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.333333333)
y_pred = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.333333333)
scorer = autosklearn.metrics._PredictScorer(
'bac', autosklearn.metrics.classification_metrics.balanced_accuracy,
1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.333333333)
scorer = autosklearn.metrics._PredictScorer(
'accuracy', sklearn.metrics.accuracy_score, -1, {})
y_pred = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0)
def test_predict_scorer_multilabel(self):
y_true = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_pred = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
scorer = autosklearn.metrics._PredictScorer(
'accuracy', sklearn.metrics.accuracy_score, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.25)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.25)
scorer = autosklearn.metrics._PredictScorer(
'bac', autosklearn.metrics.classification_metrics.balanced_accuracy,
1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
scorer = autosklearn.metrics._PredictScorer(
'accuracy', sklearn.metrics.accuracy_score, -1, {})
y_pred = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0)
def test_predict_scorer_regression(self):
y_true = np.arange(0, 1.01, 0.1)
y_pred = y_true.copy()
scorer = autosklearn.metrics._PredictScorer(
'r2', sklearn.metrics.r2_score, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
y_pred = np.ones(y_true.shape) * np.mean(y_true)
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.0)
def test_proba_scorer_binary(self):
y_true = [0, 0, 1, 1]
y_pred = [[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]
scorer = autosklearn.metrics._ProbaScorer(
'accuracy', sklearn.metrics.log_loss, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.0)
y_pred = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.69314718055994529)
y_pred = [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.69314718055994529)
scorer = autosklearn.metrics._ProbaScorer(
'accuracy', sklearn.metrics.log_loss, -1, {})
y_pred = [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -0.69314718055994529)
def test_proba_scorer_multiclass(self):
y_true = [0, 1, 2]
y_pred = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
scorer = autosklearn.metrics._ProbaScorer(
'accuracy', sklearn.metrics.log_loss, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.0)
y_pred = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0986122886681098)
y_pred = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0986122886681096)
scorer = autosklearn.metrics._ProbaScorer(
'accuracy', sklearn.metrics.log_loss, -1, {})
y_pred = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0986122886681096)
def test_proba_scorer_multilabel(self):
y_true = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_pred = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
scorer = autosklearn.metrics._ProbaScorer(
'accuracy', sklearn.metrics.log_loss, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.34657359027997314)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.69314718055994529)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.69314718055994529)
scorer = autosklearn.metrics._ProbaScorer(
'accuracy', sklearn.metrics.log_loss, -1, {})
y_pred = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -0.34657359027997314)
def test_threshold_scorer_binary(self):
y_true = [0, 0, 1, 1]
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
scorer = autosklearn.metrics._ThresholdScorer(
'accuracy', sklearn.metrics.roc_auc_score, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
scorer = autosklearn.metrics._ThresholdScorer(
'accuracy', sklearn.metrics.roc_auc_score, -1, {})
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0)
def test_threshold_scorer_multilabel(self):
y_true = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_pred = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
scorer = autosklearn.metrics._ThresholdScorer(
'accuracy', sklearn.metrics.roc_auc_score, 1, {})
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
y_pred = np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
y_pred = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 0.5)
scorer = autosklearn.metrics._ThresholdScorer(
'accuracy', sklearn.metrics.roc_auc_score, -1, {})
y_pred = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0)
def test_sign_flip(self):
y_true = np.arange(0, 1.01, 0.1)
y_pred = y_true.copy()
scorer = autosklearn.metrics.make_scorer(
'r2', sklearn.metrics.r2_score, greater_is_better=True)
score = scorer(y_true, y_pred + 1.0)
self.assertAlmostEqual(score, -9.0)
score = scorer(y_true, y_pred + 0.5)
self.assertAlmostEqual(score, -1.5)
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, 1.0)
scorer = autosklearn.metrics.make_scorer(
'r2', sklearn.metrics.r2_score, greater_is_better=False)
score = scorer(y_true, y_pred + 1.0)
self.assertAlmostEqual(score, 9.0)
score = scorer(y_true, y_pred + 0.5)
self.assertAlmostEqual(score, 1.5)
score = scorer(y_true, y_pred)
self.assertAlmostEqual(score, -1.0)
class TestMetricsDoNotAlterInput(unittest.TestCase):
def test_regression_metrics(self):
for metric, scorer in autosklearn.metrics.REGRESSION_METRICS.items():
y_true = np.random.random(100).reshape((-1, 1))
y_pred = y_true.copy() + np.random.randn(100, 1) * 0.1
y_true_2 = y_true.copy()
y_pred_2 = y_pred.copy()
self.assertTrue(np.isfinite(scorer(y_true_2, y_pred_2)))
np.testing.assert_array_almost_equal(y_true, y_true_2,
err_msg=metric)
np.testing.assert_array_almost_equal(y_pred, y_pred_2,
err_msg=metric)
def test_classification_metrics(self):
for metric, scorer in autosklearn.metrics.CLASSIFICATION_METRICS.items():
y_true = np.random.randint(0, 2, size=(100, 1))
y_pred = np.random.random(200).reshape((-1, 2))
y_pred = np.array([y_pred[i] / np.sum(y_pred[i])
for i in range(100)])
y_true_2 = y_true.copy()
y_pred_2 = y_pred.copy()
try:
self.assertTrue(np.isfinite(scorer(y_true_2, y_pred_2)))
np.testing.assert_array_almost_equal(y_true, y_true_2,
err_msg=metric)
np.testing.assert_array_almost_equal(y_pred, y_pred_2,
err_msg=metric)
except ValueError as e:
if e.args[0] == 'Sample-based precision, recall, fscore is ' \
'not meaningful outside multilabel ' \
'classification. See the accuracy_score instead.':
pass
else:
raise e
``` |
{
"source": "jimgoo/Merlion",
"score": 2
} |
#### File: ts_datasets/anomaly/nab.py
```python
import datetime
import glob
import json
import logging
import os
import re
import requests
import numpy as np
import pandas as pd
import tqdm
from ts_datasets.anomaly.base import TSADBaseDataset
logger = logging.getLogger(__name__)
class NAB(TSADBaseDataset):
"""
Wrapper to load datasets found in the Numenta Anomaly Benchmark
(https://github.com/numenta/NAB).
The NAB contains a range of datasets and are categorized by their domains.
"""
valid_subsets = [
"all",
"artificial",
"artificialWithAnomaly",
"realAWSCloudwatch",
"realAdExchange",
"realKnownCause",
"realTraffic",
"realTweets",
]
def __init__(self, subset="all", rootdir=None):
"""
:param subset: One of the elements in subsets.
:param rootdir: The root directory at which the dataset can be found.
"""
super().__init__()
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
self.subset = subset
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "nab")
if subset == "artificial":
subsets = ["artificialNoAnomaly", "artificialWithAnomaly"]
elif subset == "all":
subsets = [
"artificialNoAnomaly",
"artificialWithAnomaly",
"realAWSCloudwatch",
"realAdExchange",
"realKnownCause",
"realTraffic",
"realTweets",
]
else:
subsets = [subset]
self.download(rootdir, subsets)
dsetdirs = [os.path.join(rootdir, s) for s in subsets]
labelfile = os.path.join(rootdir, "labels/combined_windows.json")
with open(labelfile) as json_file:
label_list = json.load(json_file)
csvs = sum([sorted(glob.glob(f"{d}/*.csv")) for d in dsetdirs], [])
for i, csv in enumerate(sorted(csvs)):
df = pd.read_csv(csv)
df.iloc[:, 0] = pd.to_datetime(df.iloc[:, 0])
df = df.sort_values(by="timestamp")
if len(df["timestamp"][df["timestamp"].diff() == datetime.timedelta(0)]) != 0:
df = df.drop_duplicates(subset="timestamp", keep="first")
logger.warning(f"Time series {csv} (index {i}) has timestamp duplicates. Kept first values.")
all_dt = np.unique(np.diff(df["timestamp"])).astype(int)
gcd_dt = all_dt[0]
for dt in all_dt[1:]:
gcd_dt = np.gcd(gcd_dt, dt)
gcd_dt = pd.to_timedelta(gcd_dt)
labels = self.load_labels(csv, label_list, gcd_dt)
df["anomaly"] = df["timestamp"].apply(lambda x: x in labels)
df = df.set_index("timestamp")
# First 15% of data is "probationary", i.e. model can use it to
# warm-start without being tested. See Figure 2 of the NAB
# paper https://arxiv.org/pdf/1510.03336.pdf
n = len(df) * 0.15
df["trainval"] = pd.Series(np.arange(len(df)) <= n, index=df.index)
md_cols = ["anomaly", "trainval"]
self.metadata.append(df[md_cols])
self.time_series.append(df[[c for c in df.columns if c not in md_cols]])
@staticmethod
def load_labels(datafile, label_list, freq):
filename = "/".join(re.sub(r"\\", "/", datafile).split("/")[-2:])
label_list = label_list[filename]
labels = pd.DatetimeIndex([])
for lp in label_list:
start = pd.to_datetime(lp[0])
end = pd.to_datetime(lp[1])
labels = labels.append(pd.date_range(start=start, end=end, freq=freq))
return labels
@property
def max_lead_sec(self):
"""
The anomalies in the NAB dataset are already windows which permit early
detection. So we explicitly disallow any earlier detection.
"""
return 0
def download(self, rootdir, subsets):
csvs = [
"artificialNoAnomaly/art_daily_no_noise.csv",
"artificialNoAnomaly/art_daily_perfect_square_wave.csv",
"artificialNoAnomaly/art_daily_small_noise.csv",
"artificialNoAnomaly/art_flatline.csv",
"artificialNoAnomaly/art_noisy.csv",
"artificialWithAnomaly/art_daily_flatmiddle.csv",
"artificialWithAnomaly/art_daily_jumpsdown.csv",
"artificialWithAnomaly/art_daily_jumpsup.csv",
"artificialWithAnomaly/art_daily_nojump.csv",
"artificialWithAnomaly/art_increase_spike_density.csv",
"artificialWithAnomaly/art_load_balancer_spikes.csv",
"realAWSCloudwatch/ec2_cpu_utilization_24ae8d.csv",
"realAWSCloudwatch/ec2_cpu_utilization_53ea38.csv",
"realAWSCloudwatch/ec2_cpu_utilization_5f5533.csv",
"realAWSCloudwatch/ec2_cpu_utilization_77c1ca.csv",
"realAWSCloudwatch/ec2_cpu_utilization_825cc2.csv",
"realAWSCloudwatch/ec2_cpu_utilization_ac20cd.csv",
"realAWSCloudwatch/ec2_cpu_utilization_c6585a.csv",
"realAWSCloudwatch/ec2_cpu_utilization_fe7f93.csv",
"realAWSCloudwatch/ec2_disk_write_bytes_1ef3de.csv",
"realAWSCloudwatch/ec2_disk_write_bytes_c0d644.csv",
"realAWSCloudwatch/ec2_network_in_257a54.csv",
"realAWSCloudwatch/ec2_network_in_5abac7.csv",
"realAWSCloudwatch/elb_request_count_8c0756.csv",
"realAWSCloudwatch/grok_asg_anomaly.csv",
"realAWSCloudwatch/iio_us-east-1_i-a2eb1cd9_NetworkIn.csv",
"realAWSCloudwatch/rds_cpu_utilization_cc0c53.csv",
"realAWSCloudwatch/rds_cpu_utilization_e47b3b.csv",
"realAdExchange/exchange-2_cpc_results.csv",
"realAdExchange/exchange-2_cpm_results.csv",
"realAdExchange/exchange-3_cpc_results.csv",
"realAdExchange/exchange-3_cpm_results.csv",
"realAdExchange/exchange-4_cpc_results.csv",
"realAdExchange/exchange-4_cpm_results.csv",
"realKnownCause/ambient_temperature_system_failure.csv",
"realKnownCause/cpu_utilization_asg_misconfiguration.csv",
"realKnownCause/ec2_request_latency_system_failure.csv",
"realKnownCause/machine_temperature_system_failure.csv",
"realKnownCause/nyc_taxi.csv",
"realKnownCause/rogue_agent_key_hold.csv",
"realKnownCause/rogue_agent_key_updown.csv",
"realTraffic/TravelTime_387.csv",
"realTraffic/TravelTime_451.csv",
"realTraffic/occupancy_6005.csv",
"realTraffic/occupancy_t4013.csv",
"realTraffic/speed_6005.csv",
"realTraffic/speed_7578.csv",
"realTraffic/speed_t4013.csv",
"realTweets/Twitter_volume_AAPL.csv",
"realTweets/Twitter_volume_AMZN.csv",
"realTweets/Twitter_volume_CRM.csv",
"realTweets/Twitter_volume_CVS.csv",
"realTweets/Twitter_volume_FB.csv",
"realTweets/Twitter_volume_GOOG.csv",
"realTweets/Twitter_volume_IBM.csv",
"realTweets/Twitter_volume_KO.csv",
"realTweets/Twitter_volume_PFE.csv",
"realTweets/Twitter_volume_UPS.csv",
]
labelfile = "labels/combined_windows.json"
path = os.path.join(rootdir, labelfile)
if not os.path.isfile(path):
print("Downloading label file...")
os.makedirs(os.path.dirname(path), exist_ok=True)
url = f"https://github.com/numenta/NAB/raw/master/{labelfile}"
r = requests.get(url, stream=True)
with open(path, "wb") as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
csvs = [f for f in csvs if not os.path.isfile(os.path.join(rootdir, f)) and f.split("/")[0] in subsets]
for csv in tqdm.tqdm(csvs, desc="NAB Download", disable=len(csvs) == 0):
path = os.path.join(rootdir, csv)
if not os.path.isfile(path):
os.makedirs(os.path.dirname(path), exist_ok=True)
url = f"https://github.com/numenta/NAB/raw/master/data/{csv}"
r = requests.get(url, stream=True)
with open(path, "wb") as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
``` |
{
"source": "jimgoo/zipline-fork",
"score": 2
} |
#### File: tests/pipeline/test_factor.py
```python
from nose_parameterized import parameterized
from numpy import arange, array, empty, eye, nan, ones, datetime64
from numpy.random import randn, seed
from zipline.errors import UnknownRankMethod
from zipline.pipeline import Factor, Filter, TermGraph
from zipline.pipeline.factors import RSI
from zipline.utils.test_utils import check_allclose, check_arrays
from .base import BasePipelineTestCase
class F(Factor):
inputs = ()
window_length = 0
class Mask(Filter):
inputs = ()
window_length = 0
class FactorTestCase(BasePipelineTestCase):
def setUp(self):
super(FactorTestCase, self).setUp()
self.f = F()
def test_bad_input(self):
with self.assertRaises(UnknownRankMethod):
self.f.rank("not a real rank method")
def test_rank_ascending(self):
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=float)
expected_ranks = {
'ordinal': array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
'average': array([[1.5, 3., 4., 5., 1.5],
[2.5, 4., 5., 1., 2.5],
[3.5, 5., 1., 2., 3.5],
[4.5, 1., 2., 3., 4.5],
[1.5, 3., 4., 5., 1.5]]),
'min': array([[1., 3., 4., 5., 1.],
[2., 4., 5., 1., 2.],
[3., 5., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 3., 4., 5., 1.]]),
'max': array([[2., 3., 4., 5., 2.],
[3., 4., 5., 1., 3.],
[4., 5., 1., 2., 4.],
[5., 1., 2., 3., 5.],
[2., 3., 4., 5., 2.]]),
'dense': array([[1., 2., 3., 4., 1.],
[2., 3., 4., 1., 2.],
[3., 4., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 2., 3., 4., 1.]]),
}
def check(terms):
graph = TermGraph(terms)
results = self.run_graph(
graph,
initial_workspace={self.f: data},
mask=self.build_mask(ones((5, 5))),
)
for method in terms:
check_arrays(results[method], expected_ranks[method])
check({meth: self.f.rank(method=meth) for meth in expected_ranks})
check({
meth: self.f.rank(method=meth, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': self.f.rank()})
check({'ordinal': self.f.rank(ascending=True)})
def test_rank_descending(self):
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=float)
expected_ranks = {
'ordinal': array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
'average': array([[4.5, 3., 2., 1., 4.5],
[3.5, 2., 1., 5., 3.5],
[2.5, 1., 5., 4., 2.5],
[1.5, 5., 4., 3., 1.5],
[4.5, 3., 2., 1., 4.5]]),
'min': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 5., 3.],
[2., 1., 5., 4., 2.],
[1., 5., 4., 3., 1.],
[4., 3., 2., 1., 4.]]),
'max': array([[5., 3., 2., 1., 5.],
[4., 2., 1., 5., 4.],
[3., 1., 5., 4., 3.],
[2., 5., 4., 3., 2.],
[5., 3., 2., 1., 5.]]),
'dense': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 4., 3.],
[2., 1., 4., 3., 2.],
[1., 4., 3., 2., 1.],
[4., 3., 2., 1., 4.]]),
}
def check(terms):
graph = TermGraph(terms)
results = self.run_graph(
graph,
initial_workspace={self.f: data},
mask=self.build_mask(ones((5, 5))),
)
for method in terms:
check_arrays(results[method], expected_ranks[method])
check({
meth: self.f.rank(method=meth, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': self.f.rank(ascending=False)})
def test_rank_after_mask(self):
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=float)
mask_data = ~eye(5, dtype=bool)
initial_workspace = {self.f: data, Mask(): mask_data}
graph = TermGraph(
{
"ascending_nomask": self.f.rank(ascending=True),
"ascending_mask": self.f.rank(ascending=True, mask=Mask()),
"descending_nomask": self.f.rank(ascending=False),
"descending_mask": self.f.rank(ascending=False, mask=Mask()),
}
)
expected = {
"ascending_nomask": array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
"descending_nomask": array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
# Diagonal should be all nans, and anything whose rank was less
# than the diagonal in the unmasked calc should go down by 1.
"ascending_mask": array([[nan, 2., 3., 4., 1.],
[2., nan, 4., 1., 3.],
[2., 4., nan, 1., 3.],
[3., 1., 2., nan, 4.],
[1., 2., 3., 4., nan]]),
"descending_mask": array([[nan, 3., 2., 1., 4.],
[2., nan, 1., 4., 3.],
[2., 1., nan, 4., 3.],
[1., 4., 3., nan, 2.],
[4., 3., 2., 1., nan]]),
}
results = self.run_graph(
graph,
initial_workspace,
mask=self.build_mask(ones((5, 5))),
)
for method in results:
check_arrays(expected[method], results[method])
@parameterized.expand([
# Test cases computed by doing:
# from numpy.random import seed, randn
# from talib import RSI
# seed(seed_value)
# data = abs(randn(15, 3))
# expected = [RSI(data[:, i])[-1] for i in range(3)]
(100, array([41.032913785966, 51.553585468393, 51.022005016446])),
(101, array([43.506969935466, 46.145367530182, 50.57407044197])),
(102, array([46.610102205934, 47.646892444315, 52.13182788538])),
])
def test_rsi(self, seed_value, expected):
rsi = RSI()
today = datetime64(1, 'ns')
assets = arange(3)
out = empty((3,), dtype=float)
seed(seed_value) # Seed so we get deterministic results.
test_data = abs(randn(15, 3))
out = empty((3,), dtype=float)
rsi.compute(today, assets, out, test_data)
check_allclose(expected, out)
```
#### File: zipline/assets/asset_writer.py
```python
from abc import (
ABCMeta,
abstractmethod,
)
from collections import namedtuple
import re
import pandas as pd
import numpy as np
from six import with_metaclass
import sqlalchemy as sa
from zipline.errors import SidAssignmentError
from zipline.assets._assets import Asset
SQLITE_MAX_VARIABLE_NUMBER = 999
# Define a namedtuple for use with the load_data and _load_data methods
AssetData = namedtuple('AssetData', 'equities futures exchanges root_symbols')
# Default values for the equities DataFrame
_equities_defaults = {
'symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'exchange': None,
}
# Default values for the futures DataFrame
_futures_defaults = {
'symbol': None,
'root_symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'exchange': None,
'notice_date': None,
'expiration_date': None,
'auto_close_date': None,
'contract_multiplier': 1,
}
# Default values for the exchanges DataFrame
_exchanges_defaults = {
'timezone': None,
}
# Default values for the root_symbols DataFrame
_root_symbols_defaults = {
'root_symbol_id': None,
'sector': None,
'description': None,
'exchange': None,
}
# Fuzzy symbol delimiters that may break up a company symbol and share class
_delimited_symbol_delimiter_regex = r'[./\-_]'
_delimited_symbol_default_triggers = frozenset({np.nan, None, ''})
def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
( str, str , str )
A tuple of ( company_symbol, share_class_symbol, fuzzy_symbol)
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return ('', '', '')
split_list = re.split(pattern=_delimited_symbol_delimiter_regex,
string=symbol,
maxsplit=1)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
# Strip all fuzzy characters from the symbol to get the fuzzy symbol
fuzzy_symbol = re.sub(pattern=_delimited_symbol_delimiter_regex,
repl='',
string=symbol)
return (company_symbol, share_class_symbol, fuzzy_symbol)
def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are the default values to insert in the
DataFrame if no user data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
need = desired_cols - cols
# Combine the users supplied data with our required columns.
output = pd.concat(
(data_subset, pd.DataFrame(
{k: defaults[k] for k in need},
data_subset.index,
)),
axis=1,
copy=False
)
return output
class AssetDBWriter(with_metaclass(ABCMeta)):
"""
Class used to write arbitrary data to SQLite database.
Concrete subclasses will implement the logic for a specific
input datatypes by implementing the _load_data method.
Methods
-------
write_all(engine, allow_sid_assignment=True, constraints=False)
Write the data supplied at initialization to the database.
init_db(engine, constraints=False)
Create the SQLite tables (called by write_all).
load_data()
Returns data in standard format.
"""
CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER
def __init__(self, equities=None, futures=None, exchanges=None,
root_symbols=None):
if equities is None:
equities = self.defaultval()
self._equities = equities
if futures is None:
futures = self.defaultval()
self._futures = futures
if exchanges is None:
exchanges = self.defaultval()
self._exchanges = exchanges
if root_symbols is None:
root_symbols = self.defaultval()
self._root_symbols = root_symbols
@abstractmethod
def defaultval(self):
raise NotImplementedError
def write_all(self,
engine,
allow_sid_assignment=True):
""" Write pre-supplied data to SQLite.
Parameters
----------
engine : Engine
An SQLAlchemy engine to a SQL database.
allow_sid_assignment: bool, optional
If True then the class can assign sids where necessary.
constraints : bool, optional
If True then create SQL ForeignKey and PrimaryKey constraints.
"""
self.allow_sid_assignment = allow_sid_assignment
# Begin an SQL transaction.
with engine.begin() as txn:
# Create SQL tables.
self.init_db(txn)
# Get the data to add to SQL.
data = self.load_data()
# Write the data to SQL.
self._write_exchanges(data.exchanges, txn)
self._write_root_symbols(data.root_symbols, txn)
self._write_futures(data.futures, txn)
self._write_equities(data.equities, txn)
def _write_df_to_table(self, df, tbl, bind):
df.to_sql(
tbl.name,
bind.connection,
index_label=[col.name for col in tbl.primary_key.columns][0],
if_exists='append',
chunksize=self.CHUNK_SIZE,
)
def _write_assets(self, assets, asset_tbl, asset_type, bind):
self._write_df_to_table(assets, asset_tbl, bind)
pd.DataFrame({self.asset_router.c.sid.name: assets.index.values,
self.asset_router.c.asset_type.name: asset_type}).to_sql(
self.asset_router.name,
bind.connection,
if_exists='append',
index=False,
chunksize=self.CHUNK_SIZE,
)
def _write_exchanges(self, exchanges, bind):
self._write_df_to_table(exchanges, self.futures_exchanges, bind)
def _write_root_symbols(self, root_symbols, bind):
self._write_df_to_table(root_symbols, self.futures_root_symbols, bind)
def _write_futures(self, futures, bind):
self._write_assets(futures, self.futures_contracts, 'future', bind)
def _write_equities(self, equities, bind):
self._write_assets(equities, self.equities, 'equity', bind)
def init_db(self, engine):
"""Connect to database and create tables.
Parameters
----------
engine : Engine
An engine to a SQL database.
constraints : bool, optional
If True, create SQL ForeignKey and PrimaryKey constraints.
"""
metadata = sa.MetaData(bind=engine)
self.equities = sa.Table(
'equities',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text, index=True),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text, index=True),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('exchange', sa.Text),
)
self.futures_exchanges = sa.Table(
'futures_exchanges',
metadata,
sa.Column(
'exchange',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('timezone', sa.Text),
)
self.futures_root_symbols = sa.Table(
'futures_root_symbols',
metadata,
sa.Column(
'root_symbol',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('root_symbol_id', sa.Integer),
sa.Column('sector', sa.Text),
sa.Column('description', sa.Text),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey(self.futures_exchanges.c.exchange),
),
)
self.futures_contracts = sa.Table(
'futures_contracts',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text, unique=True, index=True),
sa.Column(
'root_symbol',
sa.Text,
sa.ForeignKey(self.futures_root_symbols.c.root_symbol),
index=True
),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey(self.futures_exchanges.c.exchange),
),
sa.Column('notice_date', sa.Integer, nullable=False),
sa.Column('expiration_date', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer, nullable=False),
sa.Column('contract_multiplier', sa.Float),
)
self.asset_router = sa.Table(
'asset_router',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True),
sa.Column('asset_type', sa.Text),
)
# Create the SQL tables if they do not already exist.
metadata.create_all(checkfirst=True)
return metadata
def load_data(self):
"""
Returns a standard set of pandas.DataFrames:
equities, futures, exchanges, root_symbols
"""
data = self._load_data()
###############################
# Generate equities DataFrame #
###############################
# HACK: If company_name is provided, map it to asset_name
if ('company_name' in data.equities.columns
and 'asset_name' not in data.equities.columns):
data.equities['asset_name'] = data.equities['company_name']
if 'file_name' in data.equities.columns:
data.equities['symbol'] = data.equities['file_name']
equities_output = _generate_output_dataframe(
data_subset=data.equities,
defaults=_equities_defaults,
)
# Split symbols to company_symbols and share_class_symbols
tuple_series = equities_output['symbol'].apply(split_delimited_symbol)
split_symbols = pd.DataFrame(
tuple_series.tolist(),
columns=['company_symbol', 'share_class_symbol', 'fuzzy_symbol'],
index=tuple_series.index
)
equities_output = equities_output.join(split_symbols)
# Upper-case all symbol data
equities_output['symbol'] = \
equities_output.symbol.str.upper()
equities_output['company_symbol'] = \
equities_output.company_symbol.str.upper()
equities_output['share_class_symbol'] = \
equities_output.share_class_symbol.str.upper()
equities_output['fuzzy_symbol'] = \
equities_output.fuzzy_symbol.str.upper()
# Convert date columns to UNIX Epoch integers (nanoseconds)
for date_col in ('start_date', 'end_date', 'first_traded'):
equities_output[date_col] = \
self.dt_to_epoch_ns(equities_output[date_col])
##############################
# Generate futures DataFrame #
##############################
futures_output = _generate_output_dataframe(
data_subset=data.futures,
defaults=_futures_defaults,
)
# Convert date columns to UNIX Epoch integers (nanoseconds)
for date_col in ('start_date', 'end_date', 'first_traded',
'notice_date', 'expiration_date', 'auto_close_date'):
futures_output[date_col] = \
self.dt_to_epoch_ns(futures_output[date_col])
# Convert symbols and root_symbols to upper case.
futures_output['symbol'] = futures_output.symbol.str.upper()
futures_output['root_symbol'] = futures_output.root_symbol.str.upper()
################################
# Generate exchanges DataFrame #
################################
exchanges_output = _generate_output_dataframe(
data_subset=data.exchanges,
defaults=_exchanges_defaults,
)
###################################
# Generate root symbols DataFrame #
###################################
root_symbols_output = _generate_output_dataframe(
data_subset=data.root_symbols,
defaults=_root_symbols_defaults,
)
return AssetData(equities=equities_output,
futures=futures_output,
exchanges=exchanges_output,
root_symbols=root_symbols_output)
@staticmethod
def dt_to_epoch_ns(dt_series):
index = pd.to_datetime(dt_series.values)
try:
index = index.tz_localize('UTC')
except TypeError:
index = index.tz_convert('UTC')
return index.view(np.int64)
@abstractmethod
def _load_data(self):
"""
Subclasses should implement this method to return data in a standard
format: a pandas.DataFrame for each of the following tables:
equities, futures, exchanges, root_symbols.
For each of these DataFrames the index columns should be the integer
unique identifier for the table, which are sid, sid, exchange_id and
root_symbol_id respectively.
"""
raise NotImplementedError('load_data')
class AssetDBWriterFromList(AssetDBWriter):
"""
Class used to write list data to SQLite database.
"""
defaultval = list
def _load_data(self):
# 0) Instantiate empty dictionaries
_equities, _futures, _exchanges, _root_symbols = {}, {}, {}, {}
# 1) Populate dictionaries
# Return the largest sid in our database, if one exists.
id_counter = sa.select(
[sa.func.max(self.asset_router.c.sid)]
).execute().scalar()
# Base sid creation on largest sid in database, or 0 if
# no sids exist.
if id_counter is None:
id_counter = 0
else:
id_counter += 1
for output, data in [(_equities, self._equities),
(_futures, self._futures), ]:
for identifier in data:
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
output[sid] = metadata
elif hasattr(identifier, '__int__'):
output[identifier.__int__()] = {'symbol': None}
else:
if self.allow_sid_assignment:
output[id_counter] = {'symbol': identifier}
id_counter += 1
else:
raise SidAssignmentError(identifier=identifier)
exchange_counter = 0
for identifier in self._exchanges:
if hasattr(identifier, '__int__'):
_exchanges[identifier.__int__()] = {}
else:
_exchanges[exchange_counter] = {'exchange': identifier}
exchange_counter += 1
root_symbol_counter = 0
for identifier in self._root_symbols:
if hasattr(identifier, '__int__'):
_root_symbols[identifier.__int__()] = {}
else:
_root_symbols[root_symbol_counter] = \
{'root_symbol': identifier}
root_symbol_counter += 1
# 2) Convert dictionaries to pandas.DataFrames.
_equities = pd.DataFrame.from_dict(_equities, orient='index')
_futures = pd.DataFrame.from_dict(_futures, orient='index')
_exchanges = pd.DataFrame.from_dict(_exchanges, orient='index')
_root_symbols = pd.DataFrame.from_dict(_root_symbols, orient='index')
# 3) Return the data inside a named tuple.
return AssetData(equities=_equities,
futures=_futures,
exchanges=_exchanges,
root_symbols=_root_symbols)
class AssetDBWriterFromDictionary(AssetDBWriter):
"""
Class used to write dictionary data to SQLite database.
Expects to be initialised with dictionaries in the following format:
{id_0: {attribute_1 : ...}, id_1: {attribute_2: ...}, ...}
"""
defaultval = dict
def _load_data(self):
_equities = pd.DataFrame.from_dict(self._equities, orient='index')
_futures = pd.DataFrame.from_dict(self._futures, orient='index')
_exchanges = pd.DataFrame.from_dict(self._exchanges, orient='index')
_root_symbols = pd.DataFrame.from_dict(self._root_symbols,
orient='index')
return AssetData(equities=_equities,
futures=_futures,
exchanges=_exchanges,
root_symbols=_root_symbols)
class AssetDBWriterFromDataFrame(AssetDBWriter):
"""
Class used to write pandas.DataFrame data to SQLite database.
"""
defaultval = pd.DataFrame
def _load_data(self):
# Check whether identifier columns have been provided.
# If they have, set the index to this column.
# If not, assume the index already cotains the identifier information.
for df, id_col in [
(self._equities, 'sid'),
(self._futures, 'sid'),
(self._exchanges, 'exchange'),
(self._root_symbols, 'root_symbol'),
]:
if id_col in df.columns:
df.set_index([id_col], inplace=True)
return AssetData(equities=self._equities,
futures=self._futures,
exchanges=self._exchanges,
root_symbols=self._root_symbols)
``` |
{
"source": "jimgreen/Viscid",
"score": 2
} |
#### File: Viscid/doc/public_doccheck.py
```python
from __future__ import print_function
import os
import sys
import types # from types import BufferType, ModuleType
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")))
import viscid
# from viscid.plot import vpyplot as vlt
# from viscid.plot import vlab
def main():
doc_fname = os.path.join(os.path.dirname(sys.argv[0]), "functions.rst")
with open(doc_fname, 'r') as fin:
doc = fin.read()
without_sphinx = []
without_docstr = []
sphinx_instance_blklst = []
sphinx_module_blklst = []
docstr_instance_blklst = []
docstr_module_blklst = []
for attr_name in dir(viscid):
if attr_name.startswith("_"):
continue
attr = getattr(viscid, attr_name)
if callable(attr):
if not hasattr(attr, "__name__"):
setattr(attr, "__name__", attr_name)
if "`viscid.{0}`".format(attr_name) not in doc:
if any(isinstance(attr, t) for t in sphinx_instance_blklst):
pass
elif any(m in attr.__module__ for m in sphinx_module_blklst):
pass
else:
without_sphinx.append(attr)
if not attr.__doc__:
if any(isinstance(attr, t) for t in docstr_instance_blklst):
pass
elif any(m in attr.__module__ for m in docstr_module_blklst):
pass
else:
without_docstr.append(attr)
N = 62
# this is annoying, and it's clearly not motivating me to update the docs
without_sphinx = [] # !!!!!!!!!!!!!!!!!
if without_docstr or without_sphinx:
print("*" * N, file=sys.stderr)
print(" documentation issues... ", file=sys.stderr)
else:
print("*" * N, file=sys.stderr)
print(" all public functions are doumented ", file=sys.stderr)
print("*" * N, file=sys.stderr)
if without_docstr:
err_str = "*" * N + "\n"
err_str += "The following public functions are missing docstrings\n"
err_str += "-" * N
for fn in without_docstr:
err_str += "\n - {0}.{1}".format(fn.__module__, fn.__name__)
print(err_str, file=sys.stderr)
if without_sphinx:
err_str = "*" * N + "\n"
err_str += "The following public functions are not present in\n"
err_str += "`{0}`:\n".format(doc_fname)
err_str += "-" * N
for fn in without_sphinx:
err_str += "\n - viscid.{0}".format(fn.__name__)
print(err_str, file=sys.stderr)
if without_docstr or without_sphinx:
print("*" * N, file=sys.stderr)
if without_docstr and without_sphinx:
ret = 4
elif without_docstr:
ret = 1
elif without_sphinx:
ret = 0 # temporarily allow functions that are not in functinos.rst
else:
ret = 0
return ret
if __name__ == "__main__":
sys.exit(main())
##
## EOF
##
```
#### File: Viscid/tests/misc_ecfc_slice_save.py
```python
from __future__ import division, print_function
import os
import sys
import viscid
from viscid.plot import vpyplot as vlt
def main():
f = viscid.load_file("~/dev/work/tmedium/*.3d.[-1].xdmf")
grid = f.get_grid()
gslc = "x=-26f:12.5f, y=-15f:15f, z=-15f:15f"
# gslc = "x=-12.5f:26f, y=-15f:15f, z=-15f:15f"
b_cc = f['b_cc'][gslc]
b_cc.name = "b_cc"
b_fc = f['b_fc'][gslc]
b_fc.name = "b_fc"
e_cc = f['e_cc'][gslc]
e_cc.name = "e_cc"
e_ec = f['e_ec'][gslc]
e_ec.name = "e_ec"
pp = f['pp'][gslc]
pp.name = 'pp'
pargs = dict(logscale=True, earth=True)
# vlt.clf()
# ax1 = vlt.subplot(211)
# vlt.plot(f['pp']['y=0f'], **pargs)
# # vlt.plot(viscid.magnitude(f['b_cc']['y=0f']), **pargs)
# # vlt.show()
# vlt.subplot(212, sharex=ax1, sharey=ax1)
# vlt.plot(viscid.magnitude(viscid.fc2cc(f['b_fc'])['y=0f']), **pargs)
# vlt.show()
basename = './tmediumR.3d.{0:06d}'.format(int(grid.time))
viscid.save_fields(basename + '.h5', [b_cc, b_fc, e_cc, e_ec, pp])
f2 = viscid.load_file(basename + ".xdmf")
pargs = dict(logscale=True, earth=True)
vlt.clf()
ax1 = vlt.subplot(211)
vlt.plot(f2['pp']['y=0f'], style='contour', levels=5, colorbar=None,
colors='k', **pargs)
vlt.plot(viscid.magnitude(f2['b_cc']['y=0f']), **pargs)
vlt.subplot(212, sharex=ax1, sharey=ax1)
vlt.plot(viscid.magnitude(viscid.fc2cc(f2['b_fc'])['y=0f']), **pargs)
vlt.show()
os.remove(basename + '.h5')
os.remove(basename + '.xdmf')
return 0
if __name__ == "__main__":
sys.exit(main())
##
## EOF
##
```
#### File: Viscid/tests/test_ascii.py
```python
from __future__ import print_function
import argparse
import sys
import os
import matplotlib.pyplot as plt
from viscid_test_common import next_plot_fname
import viscid
from viscid import vutil
from viscid.plot import vpyplot as vlt
def _main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--show", "--plot", action="store_true")
args = vutil.common_argparse(parser)
f = viscid.load_file(os.path.join(viscid.sample_dir, "test.asc"))
vlt.plot(f['c1'], show=False)
plt.savefig(next_plot_fname(__file__))
if args.show:
vlt.show()
return 0
if __name__ == "__main__":
sys.exit(_main())
##
## EOF
##
```
#### File: Viscid/tests/test_div.py
```python
from __future__ import print_function
import argparse
import sys
from timeit import default_timer as time
import matplotlib.pyplot as plt
import numpy as np
from viscid_test_common import next_plot_fname
import viscid
from viscid import logger
from viscid import vutil
from viscid.plot import vpyplot as vlt
try:
import numexpr as ne
HAS_NUMEXPR = True
except ImportError:
HAS_NUMEXPR = False
def run_div_test(fld, exact, title='', show=False, ignore_inexact=False):
t0 = time()
result_numexpr = viscid.div(fld, preferred="numexpr", only=False)
t1 = time()
logger.info("numexpr magnitude runtime: %g", t1 - t0)
result_diff = viscid.diff(result_numexpr, exact)['x=1:-1, y=1:-1, z=1:-1']
if not ignore_inexact and not (result_diff.data < 5e-5).all():
logger.warn("numexpr result is far from the exact result")
logger.info("min/max(abs(numexpr - exact)): %g / %g",
np.min(result_diff.data), np.max(result_diff.data))
planes = ["y=0f", "z=0f"]
nrows = 2
ncols = len(planes)
ax = plt.subplot2grid((nrows, ncols), (0, 0))
ax.axis("equal")
for i, p in enumerate(planes):
plt.subplot2grid((nrows, ncols), (0, i), sharex=ax, sharey=ax)
vlt.plot(result_numexpr, p, show=False)
plt.subplot2grid((nrows, ncols), (1, i), sharex=ax, sharey=ax)
vlt.plot(result_diff, p, show=False)
plt.suptitle(title)
vlt.auto_adjust_subplots(subplot_params=dict(top=0.9))
plt.savefig(next_plot_fname(__file__))
if show:
vlt.mplshow()
def _main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--prof", action="store_true")
parser.add_argument("--show", "--plot", action="store_true")
args = vutil.common_argparse(parser)
dtype = 'float64'
# use 512 512 256 to inspect memory related things
x = np.array(np.linspace(-0.5, 0.5, 256), dtype=dtype)
y = np.array(np.linspace(-0.5, 0.5, 256), dtype=dtype)
z = np.array(np.linspace(-0.5, 0.5, 64), dtype=dtype)
v = viscid.empty([x, y, z], name="V", nr_comps=3, center="cell",
layout="interlaced")
exact_cc = viscid.empty([x, y, z], name="exact_cc", center='cell')
Xcc, Ycc, Zcc = exact_cc.get_crds_cc(shaped=True) # pylint: disable=W0612
if HAS_NUMEXPR:
v['x'] = ne.evaluate("(sin(Xcc))") # + Zcc
v['y'] = ne.evaluate("(cos(Ycc))") # + Xcc# + Zcc
v['z'] = ne.evaluate("-((sin(Zcc)))") # + Xcc# + Ycc
exact_cc[:, :, :] = ne.evaluate("cos(Xcc) - sin(Ycc) - cos(Zcc)")
else:
v['x'] = (np.sin(Xcc)) # + Zcc
v['y'] = (np.cos(Ycc)) # + Xcc# + Zcc
v['z'] = -((np.sin(Zcc))) # + Xcc# + Ycc
exact_cc[:, :, :] = np.cos(Xcc) - np.sin(Ycc) - np.cos(Zcc)
if args.prof:
print("Without boundaries")
viscid.timeit(viscid.div, v, bnd=False, timeit_repeat=10,
timeit_print_stats=True)
print("With boundaries")
viscid.timeit(viscid.div, v, bnd=True, timeit_repeat=10,
timeit_print_stats=True)
logger.info("node centered tests")
v_nc = v.as_centered('node')
exact_nc = viscid.empty_like(v_nc['x'])
X, Y, Z = exact_nc.get_crds_nc(shaped=True) # pylint: disable=W0612
if HAS_NUMEXPR:
exact_nc[:, :, :] = ne.evaluate("cos(X) - sin(Y) - cos(Z)")
else:
exact_nc[:, :, :] = np.cos(X) - np.sin(Y) - np.cos(Z)
# FIXME: why is the error so much larger here?
run_div_test(v_nc, exact_nc, title='Node Centered', show=args.show,
ignore_inexact=True)
logger.info("cell centered tests")
v_cc = v_nc.as_centered('cell')
run_div_test(v_cc, exact_cc, title="Cell Centered", show=args.show)
return 0
if __name__ == "__main__":
sys.exit(_main())
##
## EOF
##
```
#### File: Viscid/viscid/amr_field.py
```python
from __future__ import print_function
import numpy as np
import viscid
# from viscid.compat import string_types
from viscid.field import Field
try:
from viscid.calculator import cycalc
_HAS_CYCALC = True
except ImportError:
# in case cycalc isn't built
_HAS_CYCALC = False
__all__ = ["is_list_of_fields"]
def is_list_of_fields(lst):
"""is a sequence a sequence of Field objects?"""
for item in lst:
if not isinstance(item, Field):
return False
return True
class _FieldListCallableAttrWrapper(object):
objs = None
attrname = None
post_func = None
def __init__(self, objs, attrname, post_func=None):
# print(">>> runtime wrapping:", attrname)
for o in objs:
if not hasattr(o, attrname):
raise AttributeError("{0} has no attribute {1}"
"".format(o, attrname))
self.objs = objs
self.attrname = attrname
self.post_func = post_func
def __call__(self, *args, **kwargs):
lst = [getattr(o, self.attrname)(*args, **kwargs) for o in self.objs]
if self.post_func:
return self.post_func(lst)
else:
return lst
class AMRField(object):
"""Field-like
Contains an AMRSkeleton and a list of Fields. This mimiks a Field,
but it is NOT a subclass of Field. Many methods of Field are
wrapped and return a new AMRField.
If an attribute of Field is not explicitly wrapped, this class will
try to runtime-wrap that method and return a new AMRField or a list
containing the result. This will not work for special methods since
python will not send those through __getattr__ or __getattribute__.
"""
_TYPE = "amr"
skeleton = None
patches = None
nr_patches = None
def __init__(self, fields, skeleton):
if not is_list_of_fields(fields):
raise TypeError("AMRField can only contain Fields:", fields)
self.skeleton = skeleton
self.patches = fields
self.nr_patches = len(fields)
@property
def xl(self):
return np.min(self.skeleton.xl, axis=0)
@property
def xh(self):
return np.max(self.skeleton.xh, axis=0)
def get_slice_extent(self, selection):
extent = self.patches[0]._src_crds.get_slice_extent(selection)
for i in range(3):
if np.isnan(extent[0, i]):
extent[0, i] = self.xl[i]
if np.isnan(extent[1, i]):
extent[1, i] = self.xh[i]
return extent
###########
## slicing
def _prepare_amr_slice(self, selection):
""" return list of patches that contain selection """
# FIXME: it's not good to reach in to src_field[0]'s private methods
# like this, but it's also not good to implement these things twice
# print("??", len(self.patches))
if len(self.patches) == 0:
raise ValueError("AMR field must contain patches to be slicable")
selection, _ = self.patches[0]._prepare_slice(selection)
extent = self.patches[0]._src_crds.get_slice_extent(selection)
inds = []
# these are patches that look like they contain selection
# but might not due to finite precision errors when
# calculating xh
maybe = []
for i, fld in enumerate(self.patches):
# - if xl - atol > the extent of the slice in any direction, then
# there's no overlap
# - if xh <= the lower corner of the slice in any direction, then
# there's no overlap
# the atol and equals are done to match cases where extent overlaps
# the lower corner, but not the upper corner
# logic goes this way cause extent has NaNs in
# dimensions that aren't specified in selection... super-kludge
# also, temporarily disable warnings on NaNs in numpy
invalid_err_level = np.geterr()['invalid']
np.seterr(invalid='ignore')
atol = 100 * np.finfo(fld.crds.xl_nc.dtype).eps
if (not np.any(np.logical_or(fld.crds.xl_nc - atol > extent[1],
fld.crds.xh_nc <= extent[0]))):
if np.any(np.isclose(fld.crds.xh_nc, extent[0], atol=atol)):
maybe.append(i)
else:
inds.append(i)
np.seterr(invalid=invalid_err_level)
# if we found some maybes, but no real hits, then use the maybes
if maybe and not inds:
inds = maybe
if len(inds) == 0:
viscid.logger.error("selection {0} not in any patch @ time {1}"
"".format(selection, self.patches[0].time))
if self.skeleton:
s = (" skeleton: xl= {0} xh = {1}"
"".format(self.skeleton.global_xl,
self.skeleton.global_xh))
viscid.logger.error(s)
inds = None
flds = None
elif len(inds) == 1:
inds = inds[0]
flds = self.patches[inds]
else:
flds = [self.patches[i] for i in inds]
return flds, inds
def _finalize_amr_slice(self, fld_lst): # pylint: disable=no-self-use
skeleton = None # FIXME
for fld in fld_lst:
if isinstance(fld, (int, float, np.number)):
m = ("Trying to make an AMRField where 1+ patches "
"is just a number... You probably slice_reduced "
"a field down to a scalar value")
viscid.logger.error(m)
return AMRField(fld_lst, skeleton)
def patch_indices(self, selection):
"""get the indices of the patches that overlap selection
Args:
selection (slice, str): anything that can slice a field
Returns:
list of indices
"""
_, inds = self._prepare_amr_slice(selection)
return inds
def slice(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
return fld_lst.slice(selection)
fld_lst = [fld.slice(selection) for fld in fld_lst]
return self._finalize_amr_slice(fld_lst)
def slice_reduce(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
return fld_lst.slice_reduce(selection)
fld_lst = [fld.slice_reduce(selection) for fld in fld_lst]
return self._finalize_amr_slice(fld_lst)
def slice_and_keep(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
return fld_lst.slice_and_keep(selection)
fld_lst = [fld.slice_and_keep(selection) for fld in fld_lst]
return self._finalize_amr_slice(fld_lst)
def interpolated_slice(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
raise RuntimeError("can't interpolate to that slice?")
ret_lst = [fld.interpolated_slice(selection) for fld in fld_lst]
return self._finalize_amr_slice(ret_lst)
###################
## special methods
def __getitem__(self, item):
return self.slice(item)
def __setitem__(self, key, value):
raise NotImplementedError()
def __delitem__(self, item):
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
"""clear all caches"""
for blk in self.patches:
blk.clear_cache()
return None
def wrap_field_method(self, attrname, *args, **kwargs):
"""Wrap methods whose args are Fields and return a Field"""
# make sure all args have same number of patches as self
is_field = [None] * len(args)
for i, arg in enumerate(args):
try:
if arg.nr_patches != self.nr_patches and arg.nr_patches != 1:
raise ValueError("AMR fields in math operations must "
"have the same number of patches")
is_field[i] = True
except AttributeError:
is_field[i] = False
lst = [None] * self.nr_patches
other = [None] * len(args)
# FIXME: There must be a better way
for i, patch in enumerate(self.patches):
for j, arg in enumerate(args):
if is_field[j]:
try:
other[j] = arg.patches[i]
except IndexError:
other[j] = arg.patches[0]
else:
other[j] = arg
lst[i] = getattr(patch, attrname)(*other, **kwargs)
if np.asarray(lst[0]).size == 1:
# operation reduced to scalar
arr = np.array(lst)
return getattr(arr, attrname)(**kwargs)
else:
return AMRField(lst, self.skeleton)
# TODO: as of numpy 1.10, this will be called on ufuncs... this
# will help some of the FIXMEs in __array__
# def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
# pass
def __array__(self, *args, **kwargs):
# FIXME: This is heinously inefficient for large arrays because it
# makes an copy of all the arrays... but I don't see
# a way around this because ufuncs expect a single array
# FIXME: adding a dimension to the arrays will break cases like
# np.sum(fld, axis=-1), cause that -1 will now be the patch
# dimension
patches = [patch.__array__(*args, **kwargs) for patch in self.patches]
for i, patch in enumerate(patches):
patches[i] = np.expand_dims(patch, 0)
# the vstack will copy all the arrays, this is what __numpy_ufunc__
# will be able to avoid
arr = np.vstack(patches)
# roll the patch dimension to the last dimension... this is for ufuncs
# that take an axis argument... this way axis will only be confused
# if it's negative, this is the main reason to use __numpy_ufunc__
# in the future
arr = np.rollaxis(arr, 0, len(arr.shape))
return arr
def __array_wrap__(self, arr, context=None): # pylint: disable=unused-argument
# print(">> __array_wrap__", arr.shape, context)
flds = []
for i in range(arr.shape[-1]):
patch_arr = arr[..., i]
fld = self.patches[i].__array_wrap__(patch_arr, context=context)
flds.append(fld)
return AMRField(flds, self.skeleton)
def __add__(self, other):
return self.wrap_field_method("__add__", other)
def __sub__(self, other):
return self.wrap_field_method("__sub__", other)
def __mul__(self, other):
return self.wrap_field_method("__mul__", other)
def __div__(self, other):
return self.wrap_field_method("__div__", other)
def __truediv__(self, other):
return self.wrap_field_method("__truediv__", other)
def __floordiv__(self, other):
return self.wrap_field_method("__floordiv__", other)
def __mod__(self, other):
return self.wrap_field_method("__mod__", other)
def __divmod__(self, other):
return self.wrap_field_method("__divmod__", other)
def __pow__(self, other):
return self.wrap_field_method("__pow__", other)
def __lshift__(self, other):
return self.wrap_field_method("__lshift__", other)
def __rshift__(self, other):
return self.wrap_field_method("__rshift__", other)
def __and__(self, other):
return self.wrap_field_method("__and__", other)
def __xor__(self, other):
return self.wrap_field_method("__xor__", other)
def __or__(self, other):
return self.wrap_field_method("__or__", other)
def __radd__(self, other):
return self.wrap_field_method("__radd__", other)
def __rsub__(self, other):
return self.wrap_field_method("__rsub__", other)
def __rmul__(self, other):
return self.wrap_field_method("__rmul__", other)
def __rdiv__(self, other):
return self.wrap_field_method("__rdiv__", other)
def __rtruediv__(self, other):
return self.wrap_field_method("__rtruediv__", other)
def __rfloordiv__(self, other):
return self.wrap_field_method("__rfloordiv__", other)
def __rmod__(self, other):
return self.wrap_field_method("__rmod__", other)
def __rdivmod__(self, other):
return self.wrap_field_method("__rdivmod__", other)
def __rpow__(self, other):
return self.wrap_field_method("__rpow__", other)
def __iadd__(self, other):
return self.wrap_field_method("__iadd__", other)
def __isub__(self, other):
return self.wrap_field_method("__isub__", other)
def __imul__(self, other):
return self.wrap_field_method("__imul__", other)
def __idiv__(self, other):
return self.wrap_field_method("__idiv__", other)
def __itruediv__(self, other):
return self.wrap_field_method("__itruediv__", other)
def __ifloordiv__(self, other):
return self.wrap_field_method("__ifloordiv__", other)
def __imod__(self, other):
return self.wrap_field_method("__imod__", other)
def __ipow__(self, other):
return self.wrap_field_method("__ipow__", other)
def __neg__(self):
return self.wrap_field_method("__neg__")
def __pos__(self):
return self.wrap_field_method("__pos__")
def __abs__(self):
return self.wrap_field_method("__abs__")
def __invert__(self):
return self.wrap_field_method("__invert__")
def __lt__(self, other):
return self.wrap_field_method("__lt__", other)
def __le__(self, other):
return self.wrap_field_method("__le__", other)
def __eq__(self, other):
return self.wrap_field_method("__eq__", other)
def __ne__(self, other):
return self.wrap_field_method("__ne__", other)
def __gt__(self, other):
return self.wrap_field_method("__gt__", other)
def __ge__(self, other):
return self.wrap_field_method("__ge__", other)
def any(self, **kwargs):
return self.wrap_field_method("any", **kwargs)
def all(self, **kwargs):
return self.wrap_field_method("all", **kwargs)
def argmax(self, **kwargs):
return self.wrap_field_method("argmax", **kwargs)
def argmin(self, **kwargs):
return self.wrap_field_method("argmin", **kwargs)
def argpartition(self, **kwargs):
return self.wrap_field_method("argpartition", **kwargs)
def argsort(self, **kwargs):
return self.wrap_field_method("argsort", **kwargs)
def cumprod(self, **kwargs):
return self.wrap_field_method("cumprod", **kwargs)
def cumsum(self, **kwargs):
return self.wrap_field_method("cumsum", **kwargs)
def max(self, **kwargs):
return self.wrap_field_method("max", **kwargs)
def mean(self, **kwargs):
return self.wrap_field_method("mean", **kwargs)
def min(self, **kwargs):
return self.wrap_field_method("min", **kwargs)
def partition(self, **kwargs):
return self.wrap_field_method("partition", **kwargs)
def prod(self, **kwargs):
return self.wrap_field_method("prod", **kwargs)
def std(self, **kwargs):
return self.wrap_field_method("std", **kwargs)
def sum(self, **kwargs):
return self.wrap_field_method("sum", **kwargs)
def __getattr__(self, name):
# define a callback to finalize
# print("!! getting attr::", name)
if callable(getattr(self.patches[0], name)):
def _wrap(lst):
try:
return AMRField(lst, self.skeleton)
except TypeError:
return lst
return _FieldListCallableAttrWrapper(self.patches, name, _wrap)
else:
# return [getattr(fld, name) for fld in self.patches]
ret0 = getattr(self.patches[0], name)
# Check that all patches have the same value. Maybe this should
# have a debugging flag attached to it since it will take time.
try:
all_same = all(getattr(blk, name) == ret0
for blk in self.patches[1:])
except ValueError:
all_same = all(np.all(getattr(blk, name) == ret0)
for blk in self.patches[1:])
if not all_same:
raise ValueError("different patches of the AMRField have "
"different values for attribute: {0}"
"".format(name))
return ret0
##
## EOF
##
```
#### File: viscid/calculator/mpause.py
```python
from __future__ import print_function, division
import os
import numpy as np
import viscid
__all__ = ["paraboloid", "paraboloid_normal", "fit_paraboloid",
"get_mp_info", "find_mp_edges"]
_dtf = 'f8'
_paraboloid_dt = np.dtype([('x0', _dtf), ('y0', _dtf), ('z0', _dtf),
('ax', _dtf), ('ay', _dtf), ('az', _dtf)])
def paraboloid(y, z, x0, y0, z0, ax, ay, az):
"""Generic paraboloid function"""
return ax * (((y - y0) / ay)**2 + ((z - z0) / az)**2) + x0
def paraboloid_normal(y, z, x0, y0, z0, ax, ay, az, normalize=True): # pylint: disable=unused-argument
"""Normal vector of a generic paraboloid"""
dyF = 2.0 * (y - y0) / ay**2
dzF = 2.0 * (z - z0) / az**2
dxF = (-1.0 / ax) * np.ones_like(dyF)
normal = np.array([dxF, dyF, dzF])
if normalize:
normal = normal / np.linalg.norm(normal, axis=0)
return normal
def fit_paraboloid(fld, p0=(9.0, 0.0, 0.0, 1.0, -1.0, -1.0), tolerance=0.0):
"""Fit paraboloid it GSE coordinates x ~ y**2 + z**2
Args:
fld (:py:class:`viscid.field.ScalarField`): field of x values
p0 (sequence): initial guess for parabaloid
(x0, y0, z0, ax, ay, az), where (x0, y0, z0) is the nose
location (should be subsolar for 0 dipole tilt), and the
ay, az, and az coefficients determine the curvature
Returns:
numpy.recarray: record array of parameters with length 2; the
1st value is the fit value, and the 2nd is one sigma of
the fit
"""
from scipy.optimize import curve_fit
def paraboloid_yz(yz, x0, y0, z0, ax, ay, az):
return paraboloid(yz[0], yz[1], x0, y0, z0, ax, ay, az)
Y, Z = fld.meshgrid_flat(prune=True)
popt, pcov = curve_fit(paraboloid_yz, np.vstack((Y, Z)),
fld.data.reshape(-1), p0=p0)
perr = np.sqrt(np.diag(pcov))
parab = np.recarray([2], dtype=_paraboloid_dt)
parab[:] = [popt, perr]
if tolerance:
for n in parab.dtype.names:
if n != "ax" and np.abs(parab[1][n] / parab[0][n]) > tolerance:
viscid.logger.warn("paraboloid parameter {0} didn't converge to "
"within {1:g}%\n{0} = {2:g} +/- {3:g}"
"".format(n, 100 * tolerance, parab[0][n],
parab[1][n]))
return parab
def get_mp_info(pp, b, j, e, cache=True, cache_dir=None,
slc="x=5.5f:11.0f, y=-4.0f:4.0f, z=-3.6f:3.6f",
fit="mp_xloc", fit_p0=(9.0, 0.0, 0.0, 1.0, -1.0, -1.0)):
"""Get info about m-pause as flattened fields
Notes:
The first thing this function does is mask locations where
the GSE-y current density < 1e-4. This masks out the bow
shock and current free regions. This works for southward IMF,
but it is not very general.
Parameters:
pp (ScalarcField): pressure
b (VectorField): magnetic field
j (VectorField): current density
e (VectorField, None): electric field (same centering as b). If
None, then the info that requires E will be filled with NaN
cache (bool, str): Save to and load from cache, if "force",
then don't load from cache if it exists, but do save a
cache at the end
cache_dir (str): Directory for cache, if None, same directory
as that file to which the grid belongs
slc (str): slice that gives a box that contains the m-pause
fit (str): to which resulting field should the paraboloid be fit,
defaults to mp_xloc, but pp_max_xloc might be useful in some
circumstances
fit_p0 (tuple): Initial guess vector for paraboloid fit
Returns:
dict: Unless otherwise noted, the entiries are 2D (y-z) fields
- **mp_xloc** location of minimum abs(Bz), this works
better than max of J^2 for FTEs
- **mp_sheath_edge** location where Jy > 0.1 * Jy when
coming in from the sheath side
- **mp_sphere_edge** location where Jy > 0.1 * Jy when
coming in from the sphere side
- **mp_width** difference between m-sheath edge and
msphere edge
- **mp_shear** magnetic shear taken 6 grid points into
the m-sheath / m-sphere
- **pp_max** max pp
- **pp_max_xloc** location of max pp
- **epar_max** max e parallel
- **epar_max_xloc** location of max e parallel
- **paraboloid** numpy.recarray of paraboloid fit. The
parameters are given in the 0th element, and
the 1st element contains the 1-sigma values for the fit
Raises:
RuntimeError: if using MHD crds instead of GSE crds
"""
if not cache_dir:
cache_dir = pp.find_info("_viscid_dirname", "./")
run_name = pp.find_info("run", None)
if cache and run_name:
t = pp.time
mp_fname = "{0}/{1}.mpause.{2:06.0f}".format(cache_dir, run_name, t)
else:
mp_fname = ""
try:
force = cache.strip().lower() == "force"
except AttributeError:
force = False
try:
if force or not mp_fname or not os.path.isfile(mp_fname + ".xdmf"):
raise IOError()
mp_info = {}
with viscid.load_file(mp_fname + ".xdmf") as dat:
fld_names = ["mp_xloc", "mp_sheath_edge", "mp_sphere_edge",
"mp_width", "mp_shear", "pp_max", "pp_max_xloc",
"epar_max", "epar_max_xloc"]
for fld_name in fld_names:
mp_info[fld_name] = dat[fld_name]["x=0"]
except (IOError, KeyError):
mp_info = {}
crd_system = viscid.as_crd_system(b, None)
if crd_system != 'gse':
raise RuntimeError("get_mp_info can't work in MHD crds, "
"switch to GSE please")
if j.nr_patches == 1:
pp_block = pp[slc]
b_block = b[slc]
j_block = j[slc]
if e is None:
e_block = np.nan * viscid.empty_like(j_block)
else:
e_block = e[slc]
else:
# interpolate an amr grid so we can proceed
obnd = pp.get_slice_extent(slc)
dx = np.min(pp.skeleton.L / pp.skeleton.n, axis=0)
nx = np.ceil((obnd[1] - obnd[0]) / dx)
vol = viscid.seed.Volume(obnd[0], obnd[1], nx, cache=True)
pp_block = vol.wrap_field(viscid.interp_trilin(pp, vol),
name="P").as_cell_centered()
b_block = vol.wrap_field(viscid.interp_trilin(b, vol),
name="B").as_cell_centered()
j_block = vol.wrap_field(viscid.interp_trilin(j, vol),
name="J").as_cell_centered()
if e is None:
e_block = np.nan * viscid.empty_like(j_block)
else:
e_block = vol.wrap_field(viscid.interp_trilin(e, vol),
name="E").as_cell_centered()
# jsq = viscid.dot(j_block, j_block)
bsq = viscid.dot(b_block, b_block)
# extract ndarrays and mask out bow shock / current free regions
maskval = 1e-4
jy_mask = j_block['y'].data < maskval
masked_bsq = 1.0 * bsq
masked_bsq.data = np.ma.masked_where(jy_mask, bsq)
xcc = j_block.get_crd_cc('x')
nx = len(xcc)
mp_xloc = np.argmin(masked_bsq, axis=0) # indices
mp_xloc = mp_xloc.wrap(xcc[mp_xloc.data]) # location
pp_max = np.max(pp_block, axis=0)
pp_max_xloc = np.argmax(pp_block, axis=0) # indices
pp_max_xloc = pp_max_xloc.wrap(xcc[pp_max_xloc.data]) # location
epar = viscid.project(e_block, b_block)
epar_max = np.max(epar, axis=0)
epar_max_xloc = np.argmax(epar, axis=0) # indices
epar_max_xloc = pp_max_xloc.wrap(xcc[epar_max_xloc.data]) # location
_ret = find_mp_edges(j_block, 0.1, 0.1, maskval=maskval)
sheath_edge, msphere_edge, mp_width, sheath_ind, sphere_ind = _ret
# extract b and b**2 at sheath + 6 grid points and sphere - 6 grid pointns
# clipping cases where things go outside the block. clipped ponints are
# set to nan
step = 6
# extract b
if b_block.layout == "flat":
comp_axis = 0
ic, _, iy, iz = np.ix_(*[np.arange(si) for si in b_block.shape])
ix = np.clip(sheath_ind + step, 0, nx - 1)
b_sheath = b_block.data[ic, ix, iy, iz]
ix = np.clip(sheath_ind - step, 0, nx - 1)
b_sphere = b_block.data[ic, ix, iy, iz]
elif b_block.layout == "interlaced":
comp_axis = 3
_, iy, iz = np.ix_(*[np.arange(si) for si in b_block.shape[:-1]])
ix = np.clip(sheath_ind + step, 0, nx - 1)
b_sheath = b_block.data[ix, iy, iz]
ix = np.clip(sheath_ind - step, 0, nx - 1)
b_sphere = b_block.data[ix, iy, iz]
# extract b**2
bmag_sheath = np.sqrt(np.sum(b_sheath**2, axis=comp_axis))
bmag_sphere = np.sqrt(np.sum(b_sphere**2, axis=comp_axis))
costheta = (np.sum(b_sheath * b_sphere, axis=comp_axis) /
(bmag_sphere * bmag_sheath))
costheta = np.where((sheath_ind + step < nx) & (sphere_ind - step >= 0),
costheta, np.nan)
mp_shear = mp_width.wrap((180.0 / np.pi) * np.arccos(costheta))
# don't bother with pretty name since it's not written to file
# plane_crds = b_block.crds.slice_keep('x=0', cc=True)
# fld_kwargs = dict(center="Cell", time=b.time)
mp_width.name = "mp_width"
mp_xloc.name = "mp_xloc"
sheath_edge.name = "mp_sheath_edge"
msphere_edge.name = "mp_sphere_edge"
mp_shear.name = "mp_shear"
pp_max.name = "pp_max"
pp_max_xloc.name = "pp_max_xloc"
epar_max.name = "epar_max"
epar_max_xloc.name = "epar_max_xloc"
mp_info = {}
mp_info["mp_width"] = mp_width
mp_info["mp_xloc"] = mp_xloc
mp_info["mp_sheath_edge"] = sheath_edge
mp_info["mp_sphere_edge"] = msphere_edge
mp_info["mp_shear"] = mp_shear
mp_info["pp_max"] = pp_max
mp_info["pp_max_xloc"] = pp_max_xloc
mp_info["epar_max"] = epar_max
mp_info["epar_max_xloc"] = epar_max_xloc
# cache new fields to disk
if mp_fname:
viscid.save_fields(mp_fname + ".h5", mp_info.values())
try:
_paraboloid_params = fit_paraboloid(mp_info[fit], p0=fit_p0)
mp_info["paraboloid"] = _paraboloid_params
except ImportError as _exception:
try:
msg = _exception.message
except AttributeError:
msg = _exception.msg
mp_info["paraboloid"] = viscid.DeferredImportError(msg)
mp_info["mp_width"].pretty_name = "Magnetopause Width"
mp_info["mp_xloc"].pretty_name = "Magnetopause $X_{gse}$ Location"
mp_info["mp_sheath_edge"].pretty_name = "Magnetosheath Edge"
mp_info["mp_sphere_edge"].pretty_name = "Magnetosphere Edge"
mp_info["mp_shear"].pretty_name = "Magnetic Shear"
mp_info["pp_max"].pretty_name = "Max Pressure"
mp_info["pp_max_xloc"].pretty_name = "Max Pressure Location"
mp_info["epar_max"].pretty_name = "Max E Parallel"
mp_info["epar_max_xloc"].pretty_name = "Max E Parallel Location"
return mp_info
def find_mp_edges(j_block, msphere_thresh=0.1, sheath_thresh=0.1,
maskval=1e-4):
"""Find x location of msphere and msheath edges using current (J)
Note:
GSE coordinates only please
Args:
j_block (VectorField): Current density containing the whole
magnetopause
msphere_thresh (float): thereshold of current on the
magnetosphere side as a fraction of the maximum
current density, i.e., 0.1 is 10% of the max
sheath_thresh (float): thereshold of current on the
magnetosheath side as a fraction of the maximum
current density, i.e., 0.1 is 10% of the max
maskval (float, None): if not None, then mask out J values
less than maskval; useful for masking out bowshock, and
current free regions
Returns:
tuple: sheath and sphere fields / values
- **sheath_edge**: float or 2D ScalarField of x values
- **msphere_edge**: float or 2D ScalarField of x values
- **mp_width**: **sheath_edge** - **msphere_edge**
- **sheath_ind**: index of sheath_edge x location
- **sphere_ind**: index of msphere_edge x location
"""
if maskval is not None:
jy_mask = j_block['y'].data < maskval
else:
jy_mask = np.zeros_like(j_block['y'].data, dtype='bool')
xcc = j_block.get_crd_cc('x')
nx = len(xcc)
masked_jy = 1.0 * j_block['y']
masked_jy.data = np.ma.masked_where(jy_mask, j_block['y'])
jy_absmax = np.amax(np.abs(masked_jy), axis=0, keepdims=True)
msphere_mask = (masked_jy > msphere_thresh * jy_absmax)
sheath_mask = (masked_jy > sheath_thresh * jy_absmax)
jy_absmax = None
sphere_ind = np.argmax(msphere_mask, axis=0)
if isinstance(sphere_ind, viscid.field.Field):
msphere_edge = np.where(sphere_ind > 0, xcc[sphere_ind.data], np.nan)
msphere_edge = sphere_ind.wrap(msphere_edge)
else:
msphere_edge = np.where(sphere_ind > 0, xcc[sphere_ind], np.nan)
# reverse it to go from the other direction
sheath_ind = nx - 1 - np.argmax(sheath_mask['x=::-1'], axis=0)
if isinstance(sheath_ind, viscid.field.Field):
sheath_edge = np.where(sheath_ind < (nx - 1), xcc[sheath_ind.data], np.nan)
sheath_edge = sheath_ind.wrap(sheath_edge)
else:
sheath_edge = np.where(sheath_ind < (nx - 1), xcc[sheath_ind], np.nan)
# in MHD crds, it my be sufficient to swap msp and msh at this point
mp_width = sheath_edge - msphere_edge
return sheath_edge, msphere_edge, mp_width, sheath_ind, sphere_ind
def _main():
f = viscid.load_file("$WORK/xi_fte_001/*.3d.[4050f].xdmf")
mp = get_mp_info(f['pp'], f['b'], f['j'], f['e_cc'], fit='mp_xloc',
slc="x=6.5f:10.5f, y=-4f:4f, z=-4.8f:3f", cache=False)
y, z = mp['pp_max_xloc'].meshgrid_flat(prune=True)
x = mp['pp_max_xloc'].data.reshape(-1)
Y, Z = mp['pp_max_xloc'].meshgrid(prune=True)
x2 = paraboloid(Y, Z, *mp['paraboloid'][0])
skip = 117
n = paraboloid_normal(Y, Z, *mp['paraboloid'][0]).reshape(3, -1)[:, ::skip]
minvar_y = Y.reshape(-1)[::skip]
minvar_z = Z.reshape(-1)[::skip]
minvar_n = np.zeros([3, len(minvar_y)])
for i in range(minvar_n.shape[0]):
p0 = [0.0, minvar_y[i], minvar_z[i]]
p0[0] = mp['pp_max_xloc']['y={0[0]}f, z={0[1]}f'.format(p0)]
minvar_n[:, i] = viscid.find_minvar_lmn_around(f['b'], p0, l=2.0, n=64)[2, :]
# 2d plots, normals don't look normal in the matplotlib projection
if False: # pylint: disable=using-constant-test
from matplotlib import pyplot as plt
from viscid.plot import vpyplot as vlt
normals = paraboloid_normal(Y, Z, *mp['paraboloid'][0])
p0 = np.array([x2, Y, Z]).reshape(3, -1)
p1 = p0 + normals.reshape(3, -1)
vlt.scatter_3d(np.vstack([x, y, z])[:, ::skip], equal=True)
for i in range(0, p0.shape[1], skip):
plt.gca().plot([p0[0, i], p1[0, i]],
[p0[1, i], p1[1, i]],
[p0[2, i], p1[2, i]], color='c')
# z2 = _ellipsiod(X, Y, *popt)
plt.gca().plot_surface(Y, Z, x2, color='r')
vlt.show()
# mayavi 3d plots, normals look better here
if True: # pylint: disable=using-constant-test
from viscid.plot import vlab
vlab.points3d(x[::skip], y[::skip], z[::skip], scale_factor=0.25,
color=(0.0, 0.0, 1.0))
mp_width = mp['mp_width']['x=0']
mp_sheath_edge = mp['mp_sheath_edge']['x=0']
mp_sphere_edge = mp_sheath_edge - mp_width
vlab.mesh(x2, Y, Z, scalars=mp_width.data)
vlab.mesh(mp_sheath_edge.data, Y, Z, opacity=0.75, color=(0.75, ) * 3)
vlab.mesh(mp_sphere_edge.data, Y, Z, opacity=0.75, color=(0.75, ) * 3)
n = paraboloid_normal(Y, Z, *mp['paraboloid'][0]).reshape(3, -1)[:, ::skip]
vlab.quiver3d(x2.reshape(-1)[::skip],
Y.reshape(-1)[::skip],
Z.reshape(-1)[::skip],
n[0], n[1], n[2], color=(1, 0, 0))
vlab.quiver3d(x2.reshape(-1)[::skip],
Y.reshape(-1)[::skip],
Z.reshape(-1)[::skip],
minvar_n[0], minvar_n[1], minvar_n[2], color=(0, 0, 1))
vlab.show()
if __name__ == "__main__":
import sys # pylint: disable=wrong-import-position,wrong-import-order
sys.exit(_main())
##
## EOF
##
```
#### File: viscid/calculator/plasma.py
```python
from __future__ import print_function, division
import numpy as np
try:
import numexpr as ne
_HAS_NUMEXPR = True
except ImportError:
_HAS_NUMEXPR = False
from viscid import field
from viscid import logger
# from viscid.calculator import calc
__all__ = ["calc_psi", "calc_beta"]
def calc_psi(B, rev=False):
"""Calc Flux function (only valid in 2d)
Parameters:
B (VectorField): magnetic field, should only have two
spatial dimensions so we can infer the symmetry dimension
rev (bool): since this integration doesn't like going
through undefined regions (like within 1 earth radius of
the origin for openggcm), you can use this to start
integrating from the opposite corner.
Returns:
ScalarField: 2-D scalar flux function
Raises:
ValueError: If B has <> 2 spatial dimensions
"""
# TODO: if this is painfully slow, i bet just putting this exact
# code in a cython module would make it a bunch faster, the problem
# being that the loops are in python instead of some broadcasting
# numpy type thing
B = B.slice_reduce(":")
# try to guess if a dim of a 3D field is invariant
reduced_axes = []
if B.nr_sdims > 2:
slcs = [slice(None)] * B.nr_sdims
for i, nxi in enumerate(B.sshape):
if nxi <= 2:
slcs[i] = 0
reduced_axes.append(B.crds.axes[i])
slcs.insert(B.nr_comp, slice(None))
B = B[slcs]
# ok, so the above didn't work... just nip out the smallest dim?
if B.nr_sdims == 3:
slcs = [slice(None)] * B.nr_sdims
i = np.argmin(B.sshape)
slcs[i] = 0
reduced_axes.append(B.crds.axes[i])
logger.warning("Tried to get the flux function of a 3D field. "
"I can't do that, so I'm\njust ignoring the {0} "
"dimension".format(reduced_axes[-1]))
slcs.insert(B.nr_comp, slice(None))
B = B[slcs]
if B.nr_sdims != 2:
raise ValueError("flux function only implemented for 2D fields")
comps = ""
for comp in "xyz":
if comp in B.crds.axes:
comps += comp
# ex: comps = "yz", comp_inds = [1, 2]
comp_inds = [dict(x=0, y=1, z=2)[comp] for comp in comps]
# Note: what follows says y, z, but it has been generalized
# to any two directions, so hy isn't necessarily hy, but it's
# easier to see at a glance if it's correct using a specific
# example
ycc, zcc = B.get_crds(comps)
comp_views = B.component_views()
hy, hz = comp_views[comp_inds[0]], comp_views[comp_inds[1]]
dy = ycc[1:] - ycc[:-1]
dz = zcc[1:] - zcc[:-1]
ny, nz = len(ycc), len(zcc)
A = np.empty((ny, nz), dtype=B.dtype)
if rev:
A[-1, -1] = 0.0
for i in range(ny - 2, -1, -1):
A[i, -1] = A[i + 1, -1] - dy[i] * 0.5 * (hz[i, -1] + hz[i + 1, -1])
for j in range(nz - 2, -1, -1):
A[:, j] = A[:, j + 1] + dz[j] * 0.5 * (hy[:, j + 1] + hy[:, j])
else:
A[0, 0] = 0.0
for i in range(1, ny):
A[i, 0] = A[i - 1, 0] + dy[i - 1] * 0.5 * (hz[i, 0] + hz[i - 1, 0])
for j in range(1, nz):
A[:, j] = A[:, j - 1] - dz[j - 1] * 0.5 * (hy[:, j - 1] + hy[:, j])
psi = field.wrap_field(A, B.crds, name="psi", center=B.center,
pretty_name=r"$\psi$", parents=[B])
if reduced_axes:
slc = "..., " + ", ".join("{0}=None".format(ax) for ax in reduced_axes)
psi = psi[slc]
return psi
def calc_beta(pp, B, scale=1.0):
"""Calc plasma beta (2.0 * p / B^2)
Parameters:
pp (ScalarField or ndarray): pressure
B (VectorField): magnetic field
scale (float, optional): overall scale factor
Returns:
ScalarField: Plasma beta
Note:
For OpenGGCM, where pp is in pPa and B is in nT, scale should
be 40.0.
"""
two = np.array([2.0], dtype=pp.dtype)
bx, by, bz = B.component_views()
if _HAS_NUMEXPR:
ldict = dict(scale=scale, bx=bx, by=by, bz=bz,
pp=pp, two=two)
result = ne.evaluate("scale * two * pp / sqrt(bx**2 + by**2 + bz**2)",
local_dict=ldict)
else:
result = scale * two * pp / np.sqrt(bx**2 + by**2 + bz**2)
context = dict(name="beta", pretty_name=r"$\beta_{pl}$")
return pp.wrap(result, context=context)
##
## EOF
##
```
#### File: viscid/calculator/separator.py
```python
from __future__ import division, print_function
from itertools import count
import numpy as np
import viscid
from viscid.compat import string_types
from viscid.cython import streamline
UNEVEN_MASK = 0b1000
UNEVEN_HALF = 0.65
__all__ = ["trace_separator", "topology_bitor_clusters", "get_sep_pts_bitor",
"get_sep_pts_bisect"]
def trace_separator(grid, b_slcstr="x=-25f:15f, y=-30f:30f, z=-15f:15f",
r=1.0, plot=False, trace_opts=None, cache=True,
cache_dir=None):
"""Trace a separator line from most dawnward null
**Still in testing** Uses the bisection algorithm.
Args:
grid (Grid): A grid that has a "b" field
b_slcstr (str): Some valid slice for B field
r (float): spatial step of separator line
plot (bool): make debugging plots
trace_opts (dict): passed to streamline function
cache (bool, str): Save to and load from cache, if "force",
then don't load from cache if it exists, but do save a
cache at the end
cache_dir (str): Directory for cache, if None, same directory
as that file to which the grid belongs
Raises:
IOError: Description
Returns:
tuple: (separator_lines, nulls)
- **separator_lines** (list): list of M 3xN ndarrays that
represent M separator lines with N points
- **nulls** (ndarray): 3xN array of N null points
"""
if not cache_dir:
cache_dir = grid.find_info("_viscid_dirname", "./")
run_name = grid.find_info("run")
sep_fname = "{0}/{1}.sep.{2:06.0f}".format(cache_dir, run_name, grid.time)
try:
if isinstance(cache, string_types) and cache.strip().lower() == "force":
raise IOError()
with np.load(sep_fname + ".npz") as dat:
sep_iter = (f for f in dat.files if f.startswith("arr_"))
_it = sorted(sep_iter, key=lambda s: int(s[len("arr_"):]))
seps = [dat[n] for n in _it]
nulls = dat['nulls']
except IOError:
_b = grid['b'][b_slcstr]
_, nulls = viscid.find_nulls(_b['x=-30f:15f'], ibound=5.0)
# get most dawnward null, nulls2 is all nulls except p0
nullind = np.argmin(nulls[1, :])
p0 = nulls[:, nullind]
nulls2 = np.concatenate([nulls[:, :nullind], nulls[:, (nullind + 1):]],
axis=1)
if plot:
from viscid.plot import vlab
vlab.plot_earth_3d(crd_system='gse')
vlab.points3d(nulls2[0], nulls2[1], nulls2[2],
color=(0, 0, 0), scale_factor=1.0)
vlab.points3d(nulls[0, nullind], nulls[1, nullind], nulls[2, nullind],
color=(1, 1, 1), scale_factor=1.0)
seed = viscid.Sphere(p0=p0, r=r, ntheta=30, nphi=60,
theta_endpoint=True, phi_endpoint=True)
p1 = viscid.get_sep_pts_bisect(_b, seed, max_depth=12, plot=plot,
trace_opts=trace_opts)
# print("p1 shape", p1.shape)
# if p1.shape[1] > 2:
# raise RuntimeError("Invalid B field, should be no branch @ null")
seps = []
sep_stubs = []
for i in range(p1.shape[1]):
sep_stubs.append([p0, p1[:, i]])
# print("??", sep_stubs)
while sep_stubs:
sep = sep_stubs.pop(0)
# print("!!! new stub")
for i in count():
# print("::", i)
seed = viscid.SphericalPatch(p0=sep[-1], p1=sep[-1] - sep[-2],
r=r, nalpha=240, nbeta=240)
pn = viscid.get_sep_pts_bisect(_b, seed, max_depth=8, plot=plot,
trace_opts=trace_opts)
if pn.shape[1] == 0:
# print("END: pn.shape[1] == 0")
break
# print("++", nulls2.shape, pn.shape)
closest_null_dist = np.min(np.linalg.norm(nulls2 - pn[:, :1], axis=0))
# print("closest_null_dist:", closest_null_dist)
if closest_null_dist < 1.01 * r:
# print("END: within 1.01 of a null")
break
# print("??", pn)
for j in range(1, pn.shape[1]):
# print("inserting new stub")
sep_stubs.insert(0, [sep[-1], pn[:, j]])
sep.append(pn[:, 0])
# print("sep", sep)
seps.append(np.stack(sep, axis=1))
if cache:
np.savez_compressed(sep_fname, *seps, nulls=nulls)
return seps, nulls
def topology_bitor_clusters(fld, min_depth=1, max_depth=10, multiple=True,
plot=False, sep_val=streamline.TOPOLOGY_MS_SEPARATOR,
mask_limit=0b1111, periodic="00", pt_bnds=()):
"""Find separator as intersection of all global topologies
Neighbors are bitwise ORed until at least one value matches
`sep_val` which is presumably (Close | Open N | Open S | SW).
This happens between min_depth and max_depth times,
where the resolution of each iteration is reduced by a factor
of two, ie, worst case 2**(max_depth).
Args:
fld (Field): Topology (bitmask) as a field
min_depth (int): Iterate at least this many times
max_depth (int): Iterate at most this many times
multiple (bool): passed to :py:func:`viscid.cluster`
sep_val (int): Value of bitmask that indicates a separator
plot (bool): Make a 2D plot of Fld and the sep candidates
mask_limit (int): if > 0, then bitmask fld with mask_limit,
i.e., fld = fld & mask_limit (bitwise and)
periodic (sequence): indicate whether that direction is
periodic, and if so, whether the coordinate arrays are
overlapped or not. Values can be True, False, or '+'. '+'
indicates that x[0] and x[-1] are not colocated, so assume
they're dx apart where dx = x[-1] - x[-2].
pt_bnd (sequence): Boundaries that come to a point, i.e., all
values along that boundary are neighbors such as the poles
of a sphere. Specified like "0-" for lower boundary of
dimension 0 or "1+" for the upper boundary of dimension 1.
Returns:
ndarray: 2xN for N clusters of separator points in the same
coordinates as `fld`
"""
pd = [False if pi == "0" else bool(pi) for pi in periodic]
fld = fld.slice_reduce(":")
if mask_limit:
fld = np.bitwise_and(fld, mask_limit)
a = fld.data
x, y = fld.get_crds()
for i in range(max_depth):
if pd[0]:
a[(0, -1), :] |= a[(-1, 0), :]
if pd[1]:
a[:, (0, -1)] |= a[:, (-1, 0)]
a = (a[ :-1, :-1] | a[ :-1, 1: ] | # pylint: disable=bad-whitespace
a[1: , :-1] | a[1: , 1: ]) # pylint: disable=bad-whitespace
x = 0.5 * (x[1:] + x[:-1])
y = 0.5 * (y[1:] + y[:-1])
# bitwise_or an entire bounary if all points are neighbors, like
# at the poles of a sphere
for bnd in pt_bnds:
slc = [slice(None), slice(None)]
slc[int(bnd[0])] = -1 if bnd[1] == "+" else 0
a[slc] = np.bitwise_or.reduce(a[slc])
indx, indy = np.where(a == sep_val)
if i + 1 >= min_depth and len(indx):
break
pts = viscid.cluster(indx, indy, x, y, multiple=multiple,
periodic=periodic)
if plot:
from matplotlib import pyplot as plt
from viscid.plot import vpyplot as vlt
vlt.clf()
ax0 = vlt.subplot(121)
vlt.plot(fld, title=True)
vlt.subplot(122, sharex=ax0, sharey=ax0)
or_fld = viscid.arrays2field((x, y), a, name="OR")
vlt.plot(or_fld, title=True)
_x, _y = or_fld.get_crds()
plt.plot(_x[indx], _y[indy], 'ko')
plt.plot(pts[0], pts[1], 'y^')
plt.show()
return pts
def _prep_trace_opt_defaults(trace_opts):
if trace_opts is None:
trace_opts = dict()
else:
trace_opts = dict(trace_opts)
trace_opts.setdefault('ibound', 2.5)
trace_opts.setdefault('output', viscid.OUTPUT_TOPOLOGY)
trace_opts.setdefault('max_length', 300.0)
trace_opts.setdefault('topo_style', 'msphere')
return trace_opts
def get_sep_pts_bitor(fld, seed, trace_opts=None, make_3d=True, **kwargs):
"""bitor topologies to find separator points in uv map from seed
Args:
fld (VectorField): Magnetic Field
seed (viscid.seed.SeedGen): Any Seed generator with a 2d local
representation
trace_opts (dict): kwargs for calc_streamlines
make_3d (bool): convert result from uv to 3d space
**kwargs: passed to :py:func:`topology_bitor_clusters`
Returns:
3xN ndarray of N separator points in uv space or 3d space
depending on the `make_3d` kwarg
"""
trace_opts = _prep_trace_opt_defaults(trace_opts)
topo = viscid.calc_streamlines(fld, seed, **trace_opts)[1]
try:
pt_bnds = seed.pt_bnds
except AttributeError:
pt_bnds = ()
try:
periodic = seed.periodic
except AttributeError:
periodic = "00"
kwargs.setdefault('pt_bnds', pt_bnds)
kwargs.setdefault('periodic', periodic)
pts = topology_bitor_clusters(topo, **kwargs)
if make_3d:
pts = seed.uv_to_3d(pts)
return pts
def perimeter_check_bitwise_or(arr):
"""Does perimeter of arr topologies contain a separator?
Returns:
bool
"""
return bool(np.bitwise_or.reduce(arr) == streamline.TOPOLOGY_MS_SEPARATOR)
def get_sep_pts_bisect(fld, seed, trace_opts=None, min_depth=3, max_depth=7,
plot=False, perimeter_check=perimeter_check_bitwise_or,
make_3d=True):
"""bisect uv map of seed to find separator points
Args:
fld (VectorField): Magnetic Field
seed (viscid.seed.SeedGen): Any Seed generator with a 2d local
representation
trace_opts (dict): kwargs for calc_streamlines
min_depth (int): Min allowable bisection depth
max_depth (int): Max bisection depth
plot (bool): Useful for debugging the algorithm
perimeter_check (func): Some func that returns a bool with the
same signature as :py:func:`perimeter_check_bitwise_or`
make_3d (bool): convert result from uv to 3d space
Returns:
3xN ndarray of N separator points in uv space or 3d space
depending on the `make_3d` kwarg
"""
trace_opts = _prep_trace_opt_defaults(trace_opts)
pts_even = _get_sep_pts_bisect(fld, seed, trace_opts=trace_opts,
min_depth=0, max_depth=2, plot=False,
perimeter_check=perimeter_check,
make_3d=False)
pts_uneven = _get_sep_pts_bisect(fld, seed, trace_opts=trace_opts,
min_depth=0, max_depth=2, plot=False,
perimeter_check=perimeter_check,
make_3d=False, start_uneven=True)
if pts_uneven.shape[1] > pts_even.shape[1]:
start_uneven = True
else:
start_uneven = False
# start_uneven = True
return _get_sep_pts_bisect(fld, seed, trace_opts=trace_opts,
min_depth=min_depth, max_depth=max_depth,
plot=plot, start_uneven=start_uneven,
perimeter_check=perimeter_check,
make_3d=make_3d)
def _get_sep_pts_bisect(fld, seed, trace_opts=None, min_depth=3, max_depth=7,
plot=False, perimeter_check=perimeter_check_bitwise_or,
make_3d=True, start_uneven=False, _base_quadrent="",
_uneven_mask=0, _first_recurse=True):
if len(_base_quadrent) == max_depth:
return [_base_quadrent] # causes pylint to complain
if trace_opts is None:
trace_opts = dict()
nx, ny = seed.uv_shape
(xlim, ylim) = seed.uv_extent
if _first_recurse and start_uneven:
_uneven_mask = UNEVEN_MASK
if _first_recurse and plot:
from viscid.plot import vlab
from viscid.plot import vpyplot as vlt
vlt.clf()
_, all_topo = viscid.calc_streamlines(fld, seed, **trace_opts)
vlt.plot(np.bitwise_and(all_topo, 15), show=False)
verts, arr = seed.wrap_mesh(all_topo.data)
vlab.mesh(verts[0], verts[1], verts[2], scalars=arr, opacity=0.75)
# quadrents and lines are indexed as follows...
# directions are counter clackwise around the quadrent with
# lower index (which matters for lines which are shared among
# more than one quadrent, aka, lines 1,2,6,7). Notice that even
# numbered lines are horizontal, like the interstate system :)
# -<--10-----<-8---
# | ^ ^
# 11 2 9 3 7
# \/ | |
# --<-2-----<-6----
# | ^ ^
# 3 0 1 1 5
# \/ | |
# ----0->-----4->--
# find low(left), mid(center), and high(right) crds in x and y
low_quad = "{0}{1:x}".format(_base_quadrent, 0 | _uneven_mask)
high_quad = "{0}{1:x}".format(_base_quadrent, 3 | _uneven_mask)
xl, xm, yl, ym = _quadrent_limits(low_quad, xlim, ylim)
_, xh, _, yh = _quadrent_limits(high_quad, xlim, ylim)
segsx, segsy = [None] * 12, [None] * 12
topo = [None] * 12
nxm, nym = nx //2, ny // 2
# make all the line segments
segsx[0], segsy[0] = np.linspace(xl, xm, nxm), np.linspace(yl, yl, nxm)
segsx[1], segsy[1] = np.linspace(xm, xm, nym), np.linspace(yl, ym, nym)
segsx[2], segsy[2] = np.linspace(xm, xl, nxm), np.linspace(ym, ym, nxm)
segsx[3], segsy[3] = np.linspace(xl, xl, nym), np.linspace(ym, yl, nym)
segsx[4], segsy[4] = np.linspace(xm, xh, nxm), np.linspace(yl, yl, nxm)
segsx[5], segsy[5] = np.linspace(xh, xh, nym), np.linspace(yl, ym, nym)
segsx[6], segsy[6] = np.linspace(xh, xm, nxm), np.linspace(ym, ym, nxm)
segsx[7], segsy[7] = np.linspace(xh, xh, nym), np.linspace(ym, yh, nym)
segsx[8], segsy[8] = np.linspace(xh, xm, nxm), np.linspace(yh, yh, nxm)
segsx[9], segsy[9] = np.linspace(xm, xm, nym), np.linspace(ym, yh, nym)
segsx[10], segsy[10] = np.linspace(xm, xl, nxm), np.linspace(yh, yh, nxm)
segsx[11], segsy[11] = np.linspace(xl, xl, nym), np.linspace(yh, ym, nym)
allx = np.concatenate(segsx)
ally = np.concatenate(segsy)
# print("plot::", _base_quadrent, '|', _uneven_mask, '|', len(allx), len(ally))
pts3d = seed.to_3d(seed.uv_to_local(np.array([allx, ally])))
_, all_topo = viscid.calc_streamlines(fld, pts3d, **trace_opts)
topo[0] = all_topo[:len(segsx[0])]
cnt = len(topo[0])
for i, segx in zip(count(1), segsx[1:]):
topo[i] = all_topo[cnt:cnt + len(segx)]
# print("??", i, cnt, cnt + len(segx), np.bitwise_and.reduce(topo[i]))
cnt += len(topo[i])
# assemble the lines into the four quadrents
quad_topo = [None] * 4
# all arrays snip off the last element since those are
# duplicated by the next line... reversed arrays do the
# snipping with -1:0:-1
quad_topo[0] = np.concatenate([topo[0][:-1], topo[1][:-1],
topo[2][:-1], topo[3][:-1]])
quad_topo[1] = np.concatenate([topo[4][:-1], topo[5][:-1],
topo[6][:-1], topo[1][-1:0:-1]])
quad_topo[2] = np.concatenate([topo[2][-1:0:-1], topo[9][:-1],
topo[10][:-1], topo[11][:-1]])
quad_topo[3] = np.concatenate([topo[6][-1:0:-1], topo[7][:-1],
topo[8][:-1], topo[9][-1:0:-1]])
# now that the quad arrays are populated, decide which quadrents
# still contain the separator (could be > 1)
required_uneven_subquads = False
ret = []
for i in range(4):
if perimeter_check(quad_topo[i]):
next_quad = "{0}{1:x}".format(_base_quadrent, i | _uneven_mask)
subquads = _get_sep_pts_bisect(fld, seed, trace_opts=trace_opts,
min_depth=min_depth,
max_depth=max_depth, plot=plot,
_base_quadrent=next_quad,
_uneven_mask=0,
_first_recurse=False)
ret += subquads
if len(ret) == 0:
perimeter = np.concatenate([topo[0][::-1], topo[4][::-1],
topo[5][::-1], topo[7][::-1],
topo[8][::-1], topo[10][::-1],
topo[11][::-1], topo[3][::-1]])
if _uneven_mask:
if len(_base_quadrent) > min_depth:
print("sep trace issue, but min depth reached: {0} > {1}"
"".format(len(_base_quadrent), min_depth))
ret = [_base_quadrent]
else:
print("sep trace issue, the separator ended prematurely")
elif perimeter_check(perimeter):
ret = _get_sep_pts_bisect(fld, seed, trace_opts=trace_opts,
min_depth=min_depth, max_depth=max_depth,
plot=plot, _base_quadrent=_base_quadrent,
_uneven_mask=UNEVEN_MASK,
_first_recurse=False)
required_uneven_subquads = True
if plot and not required_uneven_subquads:
from viscid.plot import vlab
from matplotlib import pyplot as plt
from viscid.plot import vpyplot as vlt
_pts3d = seed.to_3d(seed.uv_to_local(np.array([allx, ally])))
vlab.points3d(_pts3d[0], _pts3d[1], _pts3d[2],
all_topo.data.reshape(-1), scale_mode='none',
scale_factor=0.02)
plt.scatter(allx, ally, color=np.bitwise_and(all_topo, 15),
vmin=0, vmax=15, marker='o', edgecolor='y', s=40)
if _first_recurse:
# turn quadrent strings into locations
xc = np.empty(len(ret))
yc = np.empty(len(ret))
for i, r in enumerate(ret):
xc[i], yc[i] = _quadrent_center(r, xlim, ylim)
pts_uv = np.array([xc, yc])
if plot:
from viscid.plot import vlab
from matplotlib import pyplot as plt
from viscid.plot import vpyplot as vlt
plt.plot(pts_uv[0], pts_uv[1], "y*", ms=20,
markeredgecolor='k', markeredgewidth=1.0)
vlt.show(block=False)
vlab.show(stop=True)
# return seed.to_3d(seed.uv_to_local(pts_uv))
# if pts_uv.size == 0:
# return None
if make_3d:
return seed.uv_to_3d(pts_uv)
else:
return pts_uv
else:
return ret
def _quadrent_limits(quad_str, xlim, ylim):
xmin, xmax = xlim
ymin, ymax = ylim
xl, xh = xmin, xmax
yl, yh = ymin, ymax
for _, quad in enumerate(quad_str):
try:
quadi = int(quad, base=16)
except TypeError:
raise
midpt = UNEVEN_HALF if quadi & UNEVEN_MASK else 0.5
xm = xl + midpt * (xh - xl)
if quadi & 1:
xl = xm
else:
xh = xm
ym = yl + midpt * (yh - yl)
if quadi & 2:
yl = ym
else:
yh = ym
return xl, xh, yl, yh
def _quadrent_center(quad_str, xlim, ylim):
xl, xh, yl, yh = _quadrent_limits(quad_str, xlim, ylim)
midpt = UNEVEN_HALF if int(quad_str[-1], base=16) & UNEVEN_MASK else 0.5
xm = xl + midpt * (xh - xl)
ym = yl + midpt * (yh - yl)
return xm, ym
def _make_square_segments(xl, xh, yl, yh, nx, ny):
x = np.linspace(xl, xh, nx)
y = np.linspace(yl, yh, ny)
bottom = np.vstack((x, [y[0]] * x.shape[0]))
right = np.vstack(([x[-1]] * y.shape[0], y))
top = np.vstack((x[::-1], [y[-1]] * x.shape[0]))
left = np.vstack(([x[0]] * (y.shape[0] - 1), y[::-1][:-1]))
return bottom, right, top, left
def _make_square(xl, xh, yl, yh, nx, ny):
bottom, right, top, left = _make_square_segments(xl, xh, yl, yh, nx, ny)
return np.concatenate((bottom, right, top, left), axis=1)
# def perimeter_check_pattern(arr):
# """Does perimeter of arr topologies contain a separator?
# Returns:
# bool
# """
# cyc = np.array([x[0] for x in groupby(arr)])
# cyc = np.roll(cyc, -1 * np.argmin(cyc))
# cyc_rev = np.roll(cyc[::-1], -1 * (len(cyc) - 1))
# watch_list = [(0, 1, 2, 4)]
# return bool(cyc in watch_list or cyc_rev in watch_list)
##
## EOF
##
```
#### File: viscid/compat/element_tree.py
```python
from viscid import logger
force_native_xml = True
try:
if force_native_xml:
raise ImportError
from lxml import etree
logger.debug("Using lxml library")
def parse(fname, **kwargs):
return etree.parse(fname, **kwargs)
def xinclude(tree, base_url=None, **kwargs):
"""Summary
Args:
tree (Tree): The object returned by parse
base_url (str): Not used
**kwargs: passed to tree.xinclude()
"""
# TODO: ignore if an xincluded xdmf file doesn't exist?
if base_url:
logger.warn("lxml will ignore base_url: %s", base_url)
return tree.xinclude(**kwargs)
except ImportError:
from xml.etree import ElementTree
from viscid.compat import _xdmf_include
logger.debug("Using native xml library")
def parse(fname, **kwargs):
return ElementTree.parse(fname, **kwargs)
def xinclude(tree, base_url=None, **kwargs):
"""Summary
Args:
tree (Tree): The object returned by parse
base_url (str): Interpret xinclude paths relative to this
**kwargs: passed to _xdmf_include.include
"""
root = tree.getroot()
_xdmf_include.include(root, base_url=base_url, **kwargs)
```
#### File: Viscid/viscid/dipole.py
```python
from __future__ import print_function, division
import sys
import numpy as np
import viscid
from viscid import field
from viscid import seed
from viscid.calculator import interp_trilin
# from viscid import vutil
try:
import numexpr as ne # pylint: disable=wrong-import-order
_HAS_NUMEXPR = True
except ImportError:
_HAS_NUMEXPR = False
__all__ = ['guess_dipole_moment', 'make_dipole', 'fill_dipole', 'calc_dip',
'set_in_region', 'make_spherical_mask', 'xyz2lsrlp', 'dipole_map',
'dipole_map_value']
# note that this global is used immutably (ie, not rc file configurable)
DEFAULT_STRENGTH = 1.0 / 3.0574e-5
def guess_dipole_moment(b, r=2.0, strength=DEFAULT_STRENGTH, cap_angle=40,
cap_ntheta=121, cap_nphi=121, plot=False):
"""guess dipole moment from a B field"""
viscid.warn("guess_dipole_moment doesn't seem to do better than 1.6 "
"degrees, you may want to use cotr instead.")
cap = seed.SphericalCap(r=r, angle=cap_angle, ntheta=cap_ntheta,
nphi=cap_nphi)
b_cap = interp_trilin(b, cap)
# FIXME: this doesn't get closer than 1.6 deg @ (theta, mu) = (0, 7.5)
# so maybe the index is incorrect somehow?
idx = np.argmax(viscid.magnitude(b_cap).data)
pole = cap.points()[:, idx]
# FIXME: it should be achievabe to get strength from the magimum magnitude,
# up to the direction
pole = strength * pole / np.linalg.norm(pole)
# # not sure where 0.133 comes from, this is probably not correct
# pole *= 0.133 * np.dot(pole, b_cap.data.reshape(-1, 3)[idx, :]) * r**3
if plot:
from matplotlib import pyplot as plt
from viscid.plot import vpyplot as vlt
vlt.plot(viscid.magnitude(b_cap))
vlt.plot(viscid.magnitude(b_cap), style='contour', levels=10,
colors='k', colorbar=False, ax=plt.gca())
vlt.show()
return pole
def make_dipole(m=(0, 0, -DEFAULT_STRENGTH), strength=None, l=None, h=None,
n=None, twod=False, dtype='f8', nonuniform=False,
crd_system='gse', name='b'):
"""Generate a dipole field with magnetic moment m [x, y, z]"""
if l is None:
l = [-5] * 3
if h is None:
h = [5] * 3
if n is None:
n = [256] * 3
x = np.array(np.linspace(l[0], h[0], n[0]), dtype=dtype)
y = np.array(np.linspace(l[1], h[1], n[1]), dtype=dtype)
z = np.array(np.linspace(l[2], h[2], n[2]), dtype=dtype)
if twod:
y = np.array(np.linspace(-0.1, 0.1, 2), dtype=dtype)
if nonuniform:
z += 0.01 * ((h[2] - l[2]) / n[2]) * np.sin(np.linspace(0, np.pi, n[2]))
B = field.empty([x, y, z], nr_comps=3, name=name, center='cell',
layout='interlaced', dtype=dtype)
B.set_info('crd_system', viscid.as_crd_system(crd_system))
B.set_info('cotr', viscid.dipole_moment2cotr(m, crd_system=crd_system))
return fill_dipole(B, m=m, strength=strength)
def fill_dipole(B, m=(0, 0, -DEFAULT_STRENGTH), strength=None, mask=None):
"""set B to a dipole with magnetic moment m
Args:
B (Field): Field to fill with a dipole
m (ndarray, or datetime64-like): Description
strength (float): if given, rescale the dipole moment
even if it was given explicitly
mask (Field): boolean field as mask, B will be filled where
the mask is True
Returns:
Field: B
"""
# FIXME: should really be taking the curl of a vector field
if mask:
Bdip = field.empty_like(B)
else:
Bdip = B
# Xcc, Ycc, Zcc = B.get_crds_cc(shaped=True) # pylint: disable=W0612
Xv, Yv, Zv = B.get_crds_vector(shaped=True) # pylint: disable=W0612
_crd_lst = [[_x, _y, _z] for _x, _y, _z in zip(Xv, Yv, Zv)]
dtype = B.dtype
one = np.array([1.0], dtype=dtype) # pylint: disable=W0612
three = np.array([3.0], dtype=dtype) # pylint: disable=W0612
if viscid.is_datetime_like(m):
m = viscid.get_dipole_moment(m, crd_system=B)
else:
m = np.asarray(m, dtype=dtype)
if strength is not None:
m = (strength / np.linalg.norm(m)) * m
mx, my, mz = m # pylint: disable=W0612
# geneate a dipole field for the entire grid
# Note: this is almost the exact same as calc_dip, but since components
# are done one-at-a-time, it requires less memory since it copies the
# result of each component into Bdip separately
if _HAS_NUMEXPR:
for i, cn in enumerate("xyz"):
_X, _Y, _Z = _crd_lst[i]
_XI = _crd_lst[i][i]
_mi = m[i]
rsq = ne.evaluate("_X**2 + _Y**2 + _Z**2") # pylint: disable=W0612
mdotr = ne.evaluate("mx * _X + my * _Y + mz * _Z") # pylint: disable=W0612
Bdip[cn] = ne.evaluate("((three * _XI * mdotr / rsq) - _mi) / rsq**1.5")
else:
for i, cn in enumerate("xyz"):
_X, _Y, _Z = _crd_lst[i]
_XI = _crd_lst[i][i]
_mi = m[i]
rsq = _X**2 + _Y**2 + _Z**2
mdotr = mx * _X + my * _Y + mz * _Z
Bdip[cn] = ((three * _XI * mdotr / rsq) - _mi) / rsq**1.5
if mask:
B.data[...] = np.choose(mask.astype('i'), [B, Bdip])
return B
def calc_dip(pts, m=(0, 0, -DEFAULT_STRENGTH), strength=None, crd_system='gse',
dtype=None):
"""Calculate a dipole field at various points
Args:
pts (ndarray): Nx3 array of points at which to calculate the
dipole. Should use the same crd system as `m`
m (sequence, datetime): dipole moment
strength (None, float): If given, rescale m to this magnitude
crd_system (str): Something from which cotr can divine the
coordinate system for both `pts` and `m`. This is only used
if m is given as a datetime and we need to figure out the
dipole moment at a given time in a given crd system
dtype (str, np.dtype): dtype of the result, defaults to
the same datatype as `pts`
Returns:
ndarray: Nx3 dipole field vectors for N points
"""
pts = np.asarray(pts, dtype=dtype)
if len(pts.shape) == 1:
pts = pts.reshape(1, 3)
single_pt = True
else:
single_pt = False
if dtype is None:
dtype = pts.dtype
one = np.array([1.0], dtype=dtype) # pylint: disable=W0612
three = np.array([3.0], dtype=dtype) # pylint: disable=W0612
if viscid.is_datetime_like(m):
m = viscid.get_dipole_moment(m, crd_system=crd_system)
else:
m = np.asarray(m, dtype=dtype)
if strength is not None:
m = (strength / np.linalg.norm(m)) * m
mx, my, mz = m # pylint: disable=W0612
m = m.reshape(1, 3)
# geneate a dipole field for the entire grid
# Note: this is almost the same as fill_dipole, but all components
# are calculated simultaneously, and so this uses more memory
if _HAS_NUMEXPR:
_X, _Y, _Z = pts.T
rsq = ne.evaluate("_X**2 + _Y**2 + _Z**2") # pylint: disable=W0612
mdotr = ne.evaluate("mx * _X + my * _Y + mz * _Z") # pylint: disable=W0612
Bdip = ne.evaluate("((three * pts * mdotr / rsq) - m) / rsq**1.5")
else:
_X, _Y, _Z = pts.T
rsq = _X**2 + _Y**2 + _Z**2
mdotr = mx * _X + my * _Y + mz * _Z
Bdip = ((three * pts * mdotr / rsq) - m) / rsq**1.5
if single_pt:
Bdip = Bdip[0, :]
return Bdip
def set_in_region(a, b, alpha=1.0, beta=1.0, mask=None, out=None):
"""set `ret = alpha * a + beta * b` where mask is True"""
alpha = np.asarray(alpha, dtype=a.dtype)
beta = np.asarray(beta, dtype=a.dtype)
a_dat = a.data if isinstance(a, viscid.field.Field) else a
b_dat = b.data if isinstance(b, viscid.field.Field) else b
b = None
if _HAS_NUMEXPR:
vals = ne.evaluate("alpha * a_dat + beta * b_dat")
else:
vals = alpha * a_dat + beta * b_dat
a_dat = b_dat = None
if out is None:
out = field.empty_like(a)
if mask is None:
out.data[...] = vals
else:
if hasattr(mask, "nr_comps") and mask.nr_comps:
mask = mask.as_centered(a.center).as_layout(a.layout)
try:
out.data[...] = np.choose(mask, [out.data, vals])
except ValueError:
out.data[...] = np.choose(mask.data.reshape(list(mask.sshape) + [1]),
[out.data, vals])
return out
def make_spherical_mask(fld, rmin=0.0, rmax=None, rsq=None):
"""make a mask that is True between rmin and rmax"""
if rmax is None:
rmax = np.sqrt(0.9 * np.finfo('f8').max)
if True and fld.nr_comps and fld.center.lower() in ('edge', 'face'):
mask = np.empty(fld.shape, dtype='bool')
Xv, Yv, Zv = fld.get_crds_vector(shaped=True) # pylint: disable=W0612
_crd_lst = [[_x, _y, _z] for _x, _y, _z in zip(Xv, Yv, Zv)]
# csq = [c**2 for c in fld.get_crds_vector(shaped=True)]
for i in range(3):
rsq = np.sum([c**2 for c in _crd_lst[i]], axis=0)
_slc = [slice(None)] * len(fld.shape)
_slc[fld.nr_comp] = i
mask[_slc] = np.bitwise_and(rsq >= rmin**2, rsq < rmax**2)
return fld.wrap_field(mask, dtype='bool')
else:
rsq = np.sum([c**2 for c in fld.get_crds(shaped=True)], axis=0)
mask = np.bitwise_and(rsq >= rmin**2, rsq < rmax**2)
if fld.nr_comps:
fld = fld['x']
return fld.wrap_field(mask, dtype='bool')
def _precondition_pts(pts):
"""Make sure pts are a 2d ndarray with length 3 in 1st dim"""
pts = np.asarray(pts)
if len(pts.shape) == 1:
pts = pts.reshape((3, 1))
return pts
def xyz2lsrlp(pts, cotr=None, crd_system='gse'):
"""Ceovert x, y, z -> l-shell, r, lambda, phi [sm coords]
- r, theta, phi = viscid.cart2sph(pts in x, y, z)
- lambda = 90deg - theta
- r = L cos^2(lambda)
Args:
pts (ndarray): 3xN for N (x, y, z) points
cotr (None): if given, use cotr to perform mapping to / from sm
crd_system (str): crd system of pts
Returns:
ndarray: 4xN array of N (l-shell, r, lamda, phi) points
"""
pts = _precondition_pts(pts)
crd_system = viscid.as_crd_system(crd_system)
cotr = viscid.as_cotr(cotr)
# pts -> sm coords
pts_sm = cotr.transform(crd_system, 'sm', pts)
del pts
# sm xyz -> r theta phi
pts_rlp = viscid.cart2sph(pts_sm)
# theta -> lamda (latitude)
pts_rlp[1, :] = 0.5 * np.pi - pts_rlp[1, :]
# get the L-shell from lamda and r
lshell = pts_rlp[0:1, :] / np.cos(pts_rlp[1:2, :])**2
return np.concatenate([lshell, pts_rlp], axis=0)
def dipole_map(pts, r=1.0, cotr=None, crd_system='gse', as_spherical=False):
"""Map pts along an ideal dipole to radius r
lambda = 90deg - theta; r = L cos^2(lambda)
cos^2(lambda) = cos^2(lambda_0) * (r / r_0)
Args:
pts (ndarray): 3xN for N (x, y, z) points
r (float): radius to map to
cotr (None): if given, use cotr to perform mapping to / from sm
crd_system (str): crd system of pts
as_spherical(bool): if True, then the return array is
(t, theta, phi) with theta in the range [0, 180] and phi
[0, 360] (in degrees)
Returns:
ndarray: 3xN array of N (x, y, z) points all at a distance
r_mapped from the center of the dipole
"""
pts = _precondition_pts(pts)
crd_system = viscid.as_crd_system(crd_system)
cotr = viscid.as_cotr(cotr)
lsrlp = xyz2lsrlp(pts, cotr=cotr, crd_system=crd_system)
del pts
# this masking causes trouble
# lsrlp = np.ma.masked_where(r lsrlp[0:1, :], lsrlp)
# lsrlp = np.ma.masked_where(np.array([[r] * 3]).T > lsrlp[0:1, :], lsrlp)
# rlp: r, lamda (latitude), phi
rlp_mapped = np.empty_like(lsrlp[1:, :])
rlp_mapped[0, :] = r
# root is determined by sign of latitude in sm?
root = np.sign(lsrlp[2:3, :])
rlp_mapped[1, :] = root * np.arccos(np.sqrt(r / lsrlp[0:1, :]))
rlp_mapped[2, :] = lsrlp[3:4, :]
del lsrlp
rlp_mapped[1, :] = 0.5 * np.pi - rlp_mapped[1, :] # lamda (latitude) -> theta
if as_spherical:
ret = rlp_mapped # is now r, theta, phi
ret[1:, :] = np.rad2deg(ret[1:, :])
# rotate angle phi by 360 until ret[2, :] is all between 0 and 360
ret[2, :] -= 360.0 * (ret[2, :] // 360.0)
else:
ret = cotr.transform('sm', crd_system, viscid.sph2cart(rlp_mapped))
return ret
def dipole_map_value(fld, pts, r=1.0, fillna=None, cotr=None,
crd_system=None, interp_kind='linear'):
"""Map values assuming they're constant along ideal dipole lines
Args:
fld (Field): values to interpolate onto the mapped pts
pts (ndarray): 3xN for N (x, y, z) points that will be mapped
r (float): radius of resulting map
cotr (None): if given, use cotr to perform mapping in sm
crd_system (str): crd system of pts
interp_kind (str): how to interpolate fld onto source points
Returns:
ndarray: ndarray of mapped values, one for each of the N points
"""
if crd_system is None:
crd_system = viscid.as_crd_system(fld, 'gse')
if fld.is_spherical:
# TODO: verify that crd_system works as expected for ionosphere
# fields (ie, meaning of +x and phi = 0)
fld = viscid.as_spherefield(fld, order=('theta', 'phi'))['r=newaxis, ...']
else:
pass
# pts should be shaped 3xNX*NY*NZ or similar such that the points
# are in the same order as the flattened c-contiguous array
mapped_pts = dipole_map(pts, r=r, cotr=cotr, crd_system=crd_system,
as_spherical=fld.is_spherical)
ret = viscid.interp(fld, mapped_pts, kind=interp_kind, wrap=False)
if fillna is not None:
ret[np.isnan(ret)] = fillna
return ret
def _main():
crd_system = 'gse'
print(viscid.get_dipole_moment_ang(dip_tilt=45.0, dip_gsm=0.0,
crd_system=crd_system))
print(viscid.get_dipole_moment_ang(dip_tilt=0.0, dip_gsm=45.0,
crd_system=crd_system))
print(viscid.get_dipole_moment_ang(dip_tilt=45.0, dip_gsm=45.0,
crd_system=crd_system))
print("---")
ptsNP = np.array([[+2, -2, +2], [+2, -1, +2], [+2, 1, +2], [+2, 2, +2]]).T
ptsSP = np.array([[+2, -2, -2], [+2, -1, -2], [+2, 1, -2], [+2, 2, -2]]).T
ptsNN = np.array([[-2, -2, +2], [-2, -1, +2], [-2, 1, +2], [-2, 2, +2]]).T
ptsSN = np.array([[-2, -2, -2], [-2, -1, -2], [-2, 1, -2], [-2, 2, -2]]).T
mapped_ptsNP = dipole_map(ptsNP)
mapped_ptsNN = dipole_map(ptsNN)
mapped_ptsSP = dipole_map(ptsSP)
mapped_ptsSN = dipole_map(ptsSN)
try:
from viscid.plot import vlab
colors1 = np.array([(0.6, 0.2, 0.2),
(0.2, 0.2, 0.6),
(0.6, 0.6, 0.2),
(0.2, 0.6, 0.6)])
colors2 = colors1 * 0.5
vlab.points3d(ptsNP, scale_factor=0.4, color=tuple(colors1[0]))
vlab.points3d(ptsNN, scale_factor=0.4, color=tuple(colors1[1]))
vlab.points3d(ptsSP, scale_factor=0.4, color=tuple(colors1[2]))
vlab.points3d(ptsSN, scale_factor=0.4, color=tuple(colors1[3]))
vlab.points3d(mapped_ptsNP, scale_factor=0.4, color=tuple(colors2[0]))
vlab.points3d(mapped_ptsNN, scale_factor=0.4, color=tuple(colors2[1]))
vlab.points3d(mapped_ptsSP, scale_factor=0.4, color=tuple(colors2[2]))
vlab.points3d(mapped_ptsSN, scale_factor=0.4, color=tuple(colors2[3]))
b = make_dipole()
vlab.plot_lines(viscid.calc_streamlines(b, mapped_ptsNP, ibound=0.5)[0])
vlab.plot_lines(viscid.calc_streamlines(b, mapped_ptsNN, ibound=0.5)[0])
vlab.show()
except ImportError:
print("Mayavi not installed, no 3D plots", file=sys.stderr)
if __name__ == "__main__":
_main()
```
#### File: Viscid/viscid/extools.py
```python
from __future__ import print_function
import os
import subprocess as sub
__all__ = ['make_animation', 'meshlab_convert']
def make_animation(movie_fname, prefix, framerate=5, qscale=2, keep=False,
args=None, frame_idx_fmt="_%06d", program="ffmpeg",
yes=False):
""" make animation by calling program (only ffmpeg works for now) using
args, which is a namespace filled by the argparse options from
add_animate_arguments. Plots are expected to be named
${args.prefix}_000001.png where the number is in order from 1 up """
if args is not None:
prefix = args.prefix
framerate = args.framerate
qscale = args.qscale
movie_fname = args.animate
keep = args.keep
if movie_fname:
cmd = "yes | {0}".format(program) if yes else program
if program == "ffmpeg":
sub.Popen("{0} -r {1} -i {3}{4}.png -pix_fmt yuv420p "
"-qscale {2} {5}".format(cmd, framerate, qscale, prefix,
frame_idx_fmt, movie_fname),
shell=True).communicate()
if movie_fname is None and prefix is not None:
keep = True
if not keep:
sub.Popen("rm -f {0}_*.png".format(prefix), shell=True).communicate()
return None
def meshlab_convert(fname, fmt="dae", quiet=True):
"""Run meshlabserver to convert 3D mesh files
Uses `MeshLab <http://meshlab.sourceforge.net/>`_, which is a great
little program for playing with 3D meshes. The best part is that
OS X's Preview can open the COLLADA (`*.dae`) format. How cool is
that?
Args:
fname (str): file to convert
fmt (str): extension of result, defaults to COLLADA format
quiet (bool): redirect output to :py:attr:`os.devnull`
Returns:
None
"""
iname = fname
oname = '.'.join(iname.split('.')[:-1]) + "." + fmt.strip()
redirect = "&> {0}".format(os.devnull) if quiet else ""
cmd = ("meshlabserver -i {0} -o {1} -m vc vn fc fn {2}"
"".format(iname, oname, redirect))
sub.Popen(cmd, shell=True, stdout=None, stderr=None)
##
## EOF
##
```
#### File: Viscid/viscid/multiplot.py
```python
from __future__ import print_function
import itertools
from viscid.compat import izip
from viscid import logger
from viscid import parallel
__all__ = ['multiplot']
def multiplot(vfile, plot_func=None, nr_procs=1, time_slice=":", **kwargs):
"""Make lots of plots
Calls plot_func (or `_do_multiplot` if plot_func is None) with 2
positional arguments (int, Grid), and all the kwargs given to
multiplot.
Grid is determined by vfile.iter_times(time_slice).
plot_func gets additional keyword arguments first_run (bool) and
first_run_result (whatever is returned from plot_func by the first
call).
This is the function used by the ``p2d`` script. It may be useful
to you.
Args:
vfile (VFile, Grid): Something that has iter_times
plot_func (callable): Function that makes a single plot. It
must take an int (index of time slice), a Grid, and any
number of keyword argumets. If None, _do_multiplot is used
nr_procs (int): number of parallel processes to farm out
plot_func to
time_slice (str): passed to vfile.iter_times()
**kwargs: passed as keword aguments to plot_func
"""
# make sure time slice yields >= 1 actual time slice
try:
next(vfile.iter_times(time_slice))
except StopIteration:
raise ValueError("Time slice '{0}' yields no data".format(time_slice))
if plot_func is None:
plot_func = _do_multiplot
grid_iter = izip(itertools.count(), vfile.iter_times(time_slice))
args_kw = kwargs.copy()
args_kw["first_run"] = True
args_kw["first_run_result"] = None
if "subplot_params" not in args_kw.get("kwopts", {}):
r = parallel.map(1, plot_func, [next(grid_iter)], args_kw=args_kw,
force_subprocess=(nr_procs > 1))
# now get back to your regularly scheduled programming
args_kw["first_run"] = False
args_kw["first_run_result"] = r[0]
parallel.map(nr_procs, plot_func, grid_iter, args_kw=args_kw)
def _do_multiplot(tind, grid, plot_vars=None, global_popts=None, kwopts=None,
share_axes=False, show=False, subplot_params=None,
first_run_result=None, first_run=False, **kwargs):
import matplotlib.pyplot as plt
from viscid.plot import vpyplot as vlt
logger.info("Plotting timestep: %d, %g", tind, grid.time)
if plot_vars is None:
raise ValueError("No plot_vars given to `_do_multiplot` :(")
if kwargs:
logger.info("Unused kwargs: {0}".format(kwargs))
if kwopts is None:
kwopts = {}
transpose = kwopts.get("transpose", False)
plot_size = kwopts.get("plot_size", None)
dpi = kwopts.get("dpi", None)
out_prefix = kwopts.get("out_prefix", None)
out_format = kwopts.get("out_format", "png")
selection = kwopts.get("selection", None)
timeformat = kwopts.get("timeformat", ".02f")
tighten = kwopts.get("tighten", False)
# wicked hacky
# subplot_params = kwopts.get("subplot_params", _subplot_params)
# nrows = len(plot_vars)
nrows = len([pv[0] for pv in plot_vars if not pv[0].startswith('^')])
ncols = 1
if transpose:
nrows, ncols = ncols, nrows
if nrows == 0:
logger.warn("I have no variables to plot")
return
fig = plt.gcf()
if plot_size is not None:
fig.set_size_inches(*plot_size, forward=True)
if dpi is not None:
fig.set_dpi(dpi)
shareax = None
this_row = -1
for i, fld_meta in enumerate(plot_vars):
if not fld_meta[0].startswith('^'):
this_row += 1
same_axis = False
else:
same_axis = True
fld_name_meta = fld_meta[0].lstrip('^')
fld_name_split = fld_name_meta.split(',')
if '=' in fld_name_split[0]:
# if fld_name is actually an equation, assume
# there's no slice, and commas are part of the
# equation
fld_name = ",".join(fld_name_split)
fld_slc = ""
else:
fld_name = fld_name_split[0]
fld_slc = ",".join(fld_name_split[1:])
if selection is not None:
# fld_slc += ",{0}".format(selection)
if fld_slc != "":
fld_slc = ",".join([fld_slc, selection])
else:
fld_slc = selection
if fld_slc.strip() == "":
fld_slc = None
# print("fld_time:", fld.time)
if this_row < 0:
raise ValueError("first plot can't begin with a +")
row = this_row
col = 0
if transpose:
row, col = col, row
if not same_axis:
ax = plt.subplot2grid((nrows, ncols), (row, col),
sharex=shareax, sharey=shareax)
if i == 0 and share_axes:
shareax = ax
if "plot_opts" not in fld_meta[1]:
fld_meta[1]["plot_opts"] = global_popts
elif global_popts is not None:
fld_meta[1]["plot_opts"] = "{0},{1}".format(
fld_meta[1]["plot_opts"], global_popts)
with grid.get_field(fld_name, slc=fld_slc) as fld:
vlt.plot(fld, masknan=True, **fld_meta[1])
# print("fld cache", grid[fld_meta[0]]._cache)
if timeformat and timeformat.lower() != "none":
plt.suptitle(grid.format_time(timeformat))
# for adjusting subplots / tight_layout and applying the various
# hacks to keep plots from dancing around in movies
if not subplot_params and first_run_result:
subplot_params = first_run_result
if tighten:
tighten = dict(rect=[0, 0.03, 1, 0.90])
ret = vlt.auto_adjust_subplots(tight_layout=tighten,
subplot_params=subplot_params)
if not first_run:
ret = None
if out_prefix:
plt.savefig("{0}_{1:06d}.{2}".format(out_prefix, tind + 1, out_format))
if show:
plt.show()
plt.clf()
return ret
##
## EOF
##
```
#### File: Viscid/viscid/npdatetime.py
```python
from __future__ import print_function, division
from datetime import datetime, timedelta
from distutils.version import LooseVersion
try:
from itertools import izip
except ImportError:
izip = zip
import re
import sys
import numpy as np
if sys.version_info[0] == 3:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
__all__ = ['PrecisionError',
'as_datetime64', 'as_timedelta64', 'as_datetime', 'as_timedelta',
'to_datetime64', 'to_timedelta64', 'to_datetime', 'to_timedelta',
'as_isotime', 'to_isotime', 'format_time', 'format_datetime',
'is_valid_datetime64', 'is_valid_timedelta64',
'datetime_as_seconds', 'timedelta_as_seconds', 'time_as_seconds',
'datetime64_as_years',
'asarray_datetime64', 'linspace_datetime64',
'round_time', 'regularize_time',
'time_sum', 'time_diff',
'is_datetime_like', 'is_timedelta_like', 'is_time_like']
_NP_TZ = LooseVersion(np.__version__) < LooseVersion('1.11')
TIME_UNITS = ('as', 'fs', 'ps', 'ns', 'us', 'ms', 's', 'm', 'h')
TIME_SCALE = (1e3, 1e3, 1e3, 1e3, 1e3, 1e3, 60, 60)
DATE_UNITS = ('D', 'W', 'M', 'Y')
ORDERED_UNITS = list(TIME_UNITS) + list(DATE_UNITS)
DATETIME_BASE = "datetime64"
DELTA_BASE = "timedelta64"
class PrecisionError(ArithmeticError):
"""Used if conversion to a time unit truncates value to 0"""
pass
# This class is for Python2.6 compatability since in 2.6, timedelta has
# no total_seconds method
class TimeDeltaCompat(timedelta):
@staticmethod
def __new__(*args, **kwargs): # pylint: disable=no-method-argument
if len(args) == 2 and isinstance(args[1], (timedelta, np.timedelta64)):
return timedelta.__new__(args[0], args[1].days, args[1].seconds,
args[1].microseconds)
else:
return timedelta.__new__(*args, **kwargs)
def total_seconds(self):
try:
return super(TimeDeltaCompat, self).total_seconds()
except AttributeError:
return (self.microseconds + (self.seconds + self.days * 24 * 3600)
* 10**6) / 10**6
def _format_unit(unit, base=DATETIME_BASE):
if unit:
return "{0}[{1}]".format(base, unit)
else:
return base
def _as_dtype(t):
if isinstance(t, np.dtype):
return t
else:
return t.dtype
def _get_base_unit(t):
name = _as_dtype(t).name
base = name[:name.rfind('[')]
unit = name[name.rfind('[') + 1:name.rfind(']')]
return base, unit
def _get_unit(t):
return _get_base_unit(t)[1]
def _is_datetime64(t):
return _as_dtype(t).type == np.datetime64
def _is_dateunit(t):
return _is_datetime64(t) and _get_unit(t) in DATE_UNITS
def _is_timeunit(t):
return _is_datetime64(t) and _get_unit(t) in TIME_UNITS
def _as_datetime64_scalar(time, unit=None):
unit_args = [unit] if unit else []
if is_timedelta_like(time):
scalar = as_timedelta64(time, unit=unit).astype(_format_unit(None))
elif isinstance(time, string_types):
try:
time = as_isotime(time)
except (TypeError, ValueError):
pass # Let ValueErrors happen in numpy constructors below
if _is_timeunit(np.datetime64(time).dtype) and _NP_TZ:
has_tz = bool(re.match(r".*([+-][0-9]{2,4}|Z)$", time))
if not has_tz:
time += 'Z'
scalar = np.datetime64(time, *unit_args)
elif unit_args and hasattr(time, 'astype'):
scalar = time.astype(_format_unit(unit_args[0]))
else:
scalar = np.datetime64(time, *unit_args)
return scalar
def _as_timedelta64_scalar(time, unit=None):
unit_args = [unit] if unit else []
flt_unit = unit if unit else 's'
# turn 'H:M:S.ms', 'M:S.ms', 'S.ms' into floating point seconds
if isinstance(time, string_types):# and ':' in time:
time = [float(t) for t in time.split(':')][::-1]
if len(time) > 1 and unit is not None:
raise ValueError("When giving time as a string, units are automatic")
if len(time) > 3:
raise ValueError("Timedelta as string only goes up to hours")
t_flt = 0.0
for factor, t in zip([1, 60, 60 * 60], time):
t_flt += factor * t
time = t_flt
flt_unit = 's'
# turn floating point time into integer with the correct unit
if is_datetime_like(time):
time = as_datetime64(time) - as_datetime64(np.timedelta64(0, 's'))
elif isinstance(time, (np.timedelta64, timedelta)):
time = np.timedelta64(time).astype(_format_unit(unit, base=DELTA_BASE))
elif isinstance(time, (int, float, np.integer, np.floating)):
orig_time, orig_flt_unit = time, flt_unit
unit_idx = TIME_UNITS.index(flt_unit)
while not np.isclose(time, int(np.round(time)), rtol=1e-4, atol=1e-18):
if unit_idx <= 0:
raise ValueError("Floating point time {0} [{1}] is too precise "
"for any time unit?".format(orig_time, orig_flt_unit))
unit_idx -= 1
time *= TIME_SCALE[unit_idx]
flt_unit = TIME_UNITS[unit_idx]
time = np.timedelta64(int(np.round(time)), flt_unit)
unit, unit_args = flt_unit, [flt_unit]
return np.timedelta64(time, *unit_args)
def as_isotime(time):
"""Try to convert times in string format to ISO 8601
Raises:
TypeError: Elements are not strings
ValueError: numpy.datetime64(time) fails
"""
if isinstance(time, (list, tuple, np.ndarray)):
scalar = False
else:
scalar = True
time = [time]
ret = [None] * len(time)
for i, t in enumerate(time):
if isinstance(t, string_types):
t = t.strip().upper().lstrip('UT')
if re.match(r"^[0-9]{2}([0-9]{2}:){3,5}[0-9]{1,2}(\.[0-9]*)?$", t):
# Handle YYYY:MM:DD:hh:mm:ss.ms -> YYYY-MM-DDThh:mm:ss.ms
# YYYY:MM:DD:hh:mm:s.ms -> YYYY-MM-DDThh:mm:s.ms
# YYYY:MM:DD:hh:mm:ss -> YYYY-MM-DDThh:mm:ss
# YYYY:MM:DD:hh:mm -> YYYY-MM-DDThh:mm
# YYYY:MM:DD:hh -> YYYY-MM-DDThh
# -- all this _tsp nonsense is to take care of s.ms; annoying
_tsp = t.replace('.', ':').split(':')
_tsp[0] = _tsp[0].zfill(4)
_tsp[1:6] = [_s.zfill(2) for _s in _tsp[1:6]]
t = ":".join(_tsp[:6])
if len(_tsp) > 6:
t += "." + _tsp[6]
# --
ret[i] = t[:10].replace(':', '-') + 'T' + t[11:]
elif re.match(r"^[0-9]{2}([0-9]{2}:){2}[0-9]{2}$", t):
# Handle YYYY:MM:DD -> YYYY-MM-DD
ret[i] = t.replace(':', '-')
else:
ret[i] = t
try:
np.datetime64(ret[i])
except ValueError:
raise
else:
raise TypeError("Can only turn strings to ISO 8601 time format "
"({0})".format(type(t)))
if scalar:
return ret[0]
else:
if isinstance(time, np.ndarray):
return np.array(time, dtype=time.dtype)
else:
return ret
def as_datetime64(time, unit=None):
"""Convert to a Numpy datetime64 scalar or array
Args:
time: some python datetime or string in ISO 8601 format, could
also be a sequence of these to return a Numpy ndarray
unit (str): one of {Y,M,W,D,h,m,s,m,s,us,ns,ps,fs,as}
Returns:
np.datetime64[unit] or array with dtype np.datetime64[unit]
"""
if isinstance(time, np.ndarray):
time = time.astype(_format_unit(unit))
elif isinstance(time, (list, tuple)):
time = np.array([_as_datetime64_scalar(ti, unit=unit) for ti in time],
dtype=_format_unit(unit))
else:
time = _as_datetime64_scalar(time, unit=unit)
return time
def as_timedelta64(time, unit=None):
"""Convert to a timedelta64 type
Args:
time (timedelta-like): an int/float/string/... to convert
unit (None): This is the unit of the input, the result
will be the most coarse unit that can store the time
"""
# if isinstance(time, np.ndarray):
# time = time.astype(_format_unit(None, base=DELTA_BASE))
if isinstance(time, (np.ndarray, list, tuple)):
time = np.array([_as_timedelta64_scalar(ti, unit=unit) for ti in time])
else:
time = _as_timedelta64_scalar(time, unit=unit)
return time
def as_datetime(time, unit=None):
"""Convert time to a Numpy ndarray of datetime.datetime objects
Args:
time: some python datetime or string in ISO 8601 format, could
also be a sequence of these to return a Numpy ndarray
unit (str): one of {Y,M,W,D,h,m,s,m,s,us,ns,ps,fs,as}
Returns:
np.ndarray of native datetime.datetime objects (dtype = object)
"""
try:
dt64 = as_datetime64(time, unit=unit)
except ValueError:
dt64 = as_datetime64(as_timedelta64(time, unit=unit))
return round_time(dt64, 'us').astype(datetime)
def as_timedelta(time, unit=None, allow0=True):
"""Convert time to a Numpy ndarray of datetime.datetime objects
Note:
Python timedelta objects are accurate up to microseconds
Args:
time: some python datetime or string in ISO 8601 format, could
also be a sequence of these to return a Numpy ndarray
unit (str): one of {Y,M,W,D,h,m,s,m,s,us,ns,ps,fs,as}
allow0 (bool): If False, then raise PrecisionError if a value
has been rounded to 0
Returns:
np.ndarray of native datetime.datetime objects (dtype = object)
"""
time = as_timedelta64(time, unit=unit)
ret = round_time(time, unit='us', allow0=allow0).astype(timedelta)
if not isinstance(ret, np.ndarray) and not hasattr(ret, "total_seconds"):
ret = TimeDeltaCompat(ret)
elif isinstance(ret, np.ndarray) and not hasattr(ret[0], "total_seconds"):
ret = np.array([TimeDeltaCompat(r) for r in ret])
return ret
def asarray_datetime64(arr, unit=None, conservative=False):
"""If is_valid_datetime64, then return a datetime64 array
Args:
arr (sequence): something that can become an arary
unit (str): one of {Y,M,W,D,h,m,s,m,s,us,ns,ps,fs,as}
conservative (bool): If True, then only turn arr into a
date-time array if it really looks like it
"""
if conservative:
if is_datetime_like(arr):
return as_datetime64(arr, unit=unit)
else:
return np.asarray(arr)
else:
try:
return as_datetime64(arr, unit=unit)
except ValueError:
return np.asarray(arr)
def linspace_datetime64(start, stop, n, endpoint=True, unit=None):
"""Make an evenly space ndarray from start to stop with n values"""
start = as_datetime64(start, unit=unit)
stop = as_datetime64(stop, unit=unit)
start, stop = regularize_time([start, stop], most_precise=True)
fltarr = np.linspace(start.astype('i8'), stop.astype('i8'), n,
endpoint=endpoint, dtype='f8')
return np.round(fltarr, 0).astype('i8').astype(start.dtype)
def _most_precise_t_unit(tlst):
"""Find the most precise time unit from the bunch
Args:
tlst: datetime64 or timedelta64 instances
Returns:
str: unit of the most precise time
"""
units = [_get_base_unit(t)[1] for t in tlst]
unit_idx = [ORDERED_UNITS.index(u) for u in units]
return units[np.argmin(unit_idx)]
def _adjust_t_unit(t, unit, cfunc=None, allow0=True):
"""adjust the unit of t using cfunc
Args:
t (datetime64, timedelta64): time to convert
unit: target unit
cfunc (callable): one of `as_datetime64` or `as_timedelta64`
allow0 (bool): If False, then raise PrecisionError if a value
has been truncated to 0
Raises:
OverflowError: if all elements of tlst can't fit in the same
unit
PrecisionError: if rounding a time truncated it to 0 and not
`allow0`
"""
orig_base, orig_unit = _get_base_unit(t)
if cfunc is None:
cfunc_lookup = {'datetime64': as_datetime64,
'timedelta64': as_timedelta64}
cfunc = cfunc_lookup[orig_base]
if orig_unit == unit:
t1 = t
elif ORDERED_UNITS.index(unit) < ORDERED_UNITS.index(orig_unit):
# we want a more precise unit... raise an OverflowError if the
# new unit can not store the most coarse part of t
t1 = cfunc(t, unit=unit)
# converting back to orig_unit and checking t == t2 effectively
# checks to make sure we haven't overflowed into the sign bit
# since this isn't checked by Numpy internally. if the conversion
# overflowed a 64-bit int completely, an OverflowError has already
# been raised
t2 = cfunc(t1, unit=orig_unit)
if t != t2:
raise OverflowError("Time {0} could not be refined to unit '{1}' "
"because it overflowed into the sign bit ({2})."
"".format(str(t), unit, str(t1)))
else:
# we want a less precise unit, i.e., round t to the new unit... raise
# a PrecisionError if t was rounded to 0
t1 = cfunc(t, unit=unit)
if not allow0 and t1.astype('i8') == 0 and t.astype('i8') != 0:
raise PrecisionError("The time {0} was truncated to 0 when "
"rounded to the nearest '{1}'"
"".format(str(t), unit))
return t1
def round_time(tlst, unit, allow0=True):
"""Round a time or list of times to minimum level of coarseness
Note:
* When rounding, some values might be rounded to 0. If you
rather raise a PrecisionError, then give `allow0=False`.
Args:
tlst (timelike, list): single or list of datetime64 or
timedelta64
unit (str): units of result will be at least as coarse as
this unit
allow0 (bool): If False, then raise PrecisionError if a value
has been truncated to 0
Returns:
timelike or list: `tlst` rounded to a unit at least as coarse
as `unit`
"""
cfunc_lookup = {'datetime64': as_datetime64,
'timedelta64': as_timedelta64}
if not isinstance(tlst, (list, tuple, np.ndarray)):
tlst = [tlst]
single_val = True
else:
single_val = False
bases_units = [_get_base_unit(t) for t in tlst]
bases = [bu[0] for bu in bases_units]
units = [bu[1] for bu in bases_units]
unit_idxs = [ORDERED_UNITS.index(u) for u in units]
unit0_idx = ORDERED_UNITS.index(unit)
ret = []
for t, base, unit_idx in izip(tlst, bases, unit_idxs):
cfunc = cfunc_lookup[base]
if unit_idx >= unit0_idx:
ret.append(t)
else:
ret.append(_adjust_t_unit(t, unit, cfunc=cfunc, allow0=allow0))
if single_val:
return ret[0]
else:
if isinstance(tlst, np.ndarray):
return np.asarray(ret)
else:
return ret
def regularize_time(tlst, unit=None, most_precise=False, allow_rounding=True,
allow0=True):
"""Convert a list of times to a common unit
Notes:
* If some times are too fine to fit in the same unit as the
rest of the times, then they will be rounded to a more
coarse unit. If you rather raise an OverflowError, then give
`allow_rounding=False`.
* When rounding, some values might be rounded to 0. If you
rather raise a PrecisionError, then give `allow0=False`.
Args:
tlst (timelike, list): single or list of datetime64 or
timedelta64
unit (str): If given, regularize all times to this unit,
otherwise, regularize them to the most precise of the
bunch
most_precise (bool): If True, then convert all times to the
most precise unit that fits all the times
allow_rounding (bool): if any time is too small to be
represented in the desired unit, then use a more coarse
unit and round values so that everything fits
allow0 (bool): If False, then raise PrecisionError if a value
has been rounded to 0
Returns:
timelike or list: single or list of times all in the same unit
"""
cfunc_lookup = {'datetime64': as_datetime64,
'timedelta64': as_timedelta64}
if not isinstance(tlst, (list, tuple, np.ndarray)):
tlst = [tlst]
single_val = True
else:
single_val = False
if unit is None:
unit = _most_precise_t_unit(tlst)
bases_units = [_get_base_unit(t) for t in tlst]
bases = [bu[0] for bu in bases_units]
cfuncs = [cfunc_lookup[b] for b in bases]
# round values to successively more coarse units until we get to
# a unit that can contain all the times in our list
for u in ORDERED_UNITS[ORDERED_UNITS.index(unit):]:
ret = []
try:
for t, cfunc in izip(tlst, cfuncs):
ret.append(_adjust_t_unit(t, u, cfunc, allow0=allow0))
except OverflowError:
if not allow_rounding:
raise
else:
unit = u
break
# if we want the most precise unit that fits everything, then keep
# refining the unit until we get an OverflowError
if most_precise:
for u in reversed(ORDERED_UNITS[:ORDERED_UNITS.index(unit)]):
try:
nxt = []
for t, cfunc in izip(ret, cfuncs):
nxt.append(_adjust_t_unit(t, u, cfunc, allow0=allow0))
except OverflowError:
break
else:
ret = nxt
if single_val:
return ret[0]
else:
if isinstance(tlst, np.ndarray):
return np.asarray(ret)
else:
return ret
def time_sum(t0, tdelta, unit=None, most_precise=False, allow_rounding=True,
allow0=True):
"""Add timedelta64 to datetime64 at highest precision w/o overflow
Notes:
* If `allow_rounding`, then the result may not be in `unit`. If
you rather raise an OverflowError, give `allow_rounding=False`
* If t0 can not be represented using the same units as tdelta,
then tdelta could be rounded to 0. If you rather raise a
PrecisionError, then give `allow0=False`
Args:
t0 (datetime64): starting date
tdelta (timedelta64): timedelta to add
unit (str): If given, regularize all times to this unit,
otherwise, regularize them to the most precise of the
bunch
most_precise (bool): If True, then convert all times to the
most precise unit that fits all the times
allow_rounding (bool): if tdelta is too small to be represented
in the same unit as t0, then round it to the finest unit
that fits both t0 and tdelta
allow0 (bool): If False, and a value is rounded to 0 in a given
unit, then raise a PrecisionError
Returns:
datetime64: t0 + tdelta
"""
t0 = as_datetime64(t0)
tdelta = as_timedelta64(tdelta)
t0, tdelta = regularize_time([t0, tdelta], unit=unit,
most_precise=most_precise,
allow_rounding=allow_rounding,
allow0=allow0)
return t0 + tdelta
def time_diff(t1, t2, unit=None, most_precise=False):
"""Diff two datetime64s at highest precision w/o overflow
Note:
If `allow_rounding`, then the result may not be in `unit`. If
you rather raise an OverflowError, give `allow_rounding=False`
Args:
t1 (datetime64): `t1` for `t1 - t2`
t2 (datetime64): `t2` for `t1 - t2`
unit (str): If given, regularize all times to this unit,
otherwise, regularize them to the most precise of the
bunch
most_precise (bool): If True, then convert all times to the
most precise unit that fits all the times
Returns:
timedelta64: t1 - t2
"""
t1 = as_datetime64(t1)
t2 = as_datetime64(t2)
t1, t2 = regularize_time([t1, t2], unit=unit, most_precise=most_precise)
return t1 - t2
def is_valid_datetime64(arr, unit=None):
"""Returns True iff arr can be made into a datetime64 array"""
try:
as_datetime64(arr, unit=unit)
return True
except ValueError:
return False
def is_valid_timedelta64(arr, unit=None):
"""Returns True iff arr can be made into a timedelta64 array"""
try:
as_timedelta64(arr, unit=unit)
return True
except ValueError:
return False
def datetime_as_seconds(a, decimals=0, unit=None):
"""round datetime a to the nearest decimals seconds"""
a_as_dt64 = as_datetime64(a, unit=unit)
_epoch = regularize_time(a_as_dt64, unit='s')
frac = time_diff(a_as_dt64, _epoch) / np.timedelta64(1, 's')
rounded = np.round(frac, decimals)
return as_datetime64(_epoch + as_timedelta64(rounded, unit='s'))
def timedelta_as_seconds(a, decimals=0, unit=None):
"""round timedelta a to the nearest decimals seconds
Note:
works for 'fs', but not 'as'
"""
a_td64 = as_timedelta64(a, unit=unit)
rounded = np.round(a_td64 / as_timedelta64(1, 's'), decimals)
return as_timedelta64(rounded, unit='s')
def time_as_seconds(a, decimals=0, unit=None):
"""round a to the nearest decimal seconds"""
if is_datetime_like(a):
return datetime_as_seconds(a, decimals=decimals, unit=unit)
elif is_timedelta_like(a, conservative=True):
return timedelta_as_seconds(a, decimals=decimals, unit=unit)
else:
return np.round(a, decimals=decimals)
def format_datetime(time, fmt="%Y-%m-%d %H:%M:%S.%.02f"):
"""Shortcut to :py:func:`format_time` for a datetime format"""
return format_time(time, fmt=fmt)
def format_time(time, fmt='.02f', basetime=None):
"""Format time as a string
Args:
t (float): time
style (str): for this method, can be::
----------------------- ------- ----------------------------
style time string
----------------------- ------- ----------------------------
'hms' 90015.0 "25:00:15"
'hmss' 90015.0 "25:00:15 (090015)"
'dhms' 900.0 "0 days 00:15:00"
'dhmss' 900.0 "0 days 00:15:00 (000900)"
'.02f' 900.0 '900.00'
'%Y-%m-%d %H:%M:%S' 900.0 '1970-01-01 00:15:00'
'%Y-%m-%d %H:%M:%S.%1f' 900.0 '1970-01-01 00:15:00.0'
----------------------- ------- ----------------------------
Note that the last one can involve any formatting strings
understood by datetime.strftime
basetime (np.datetime64): if formatting just number of seconds
from something like ".02f", then use this time as 0 seconds
Returns:
str
"""
dttime = as_datetime(time)
ret = ""
if basetime is None:
basetime = as_datetime64(0.0)
if fmt.lower() == 'ut':
fmt = '%Y-%m-%d %H:%M:%S'
if fmt in ('dhms', 'dhmss', 'hms', 'hmss'):
# These are special time-style formatters
if fmt.startswith('d'):
days = int(as_timedelta64(dttime) / np.timedelta64(1, 'D'))
if days == 1:
days_str = '{0} day'.format(days)
else:
days_str = '{0} days '.format(days)
else:
days_str = ''
ret = datetime.strftime(dttime, days_str + '%H:%M:%S')
if fmt.endswith('ss'):
_tt = time_diff(dttime, basetime) / np.timedelta64(1, 's')
ret += " ({0:06d})".format(int(_tt))
elif '%' not in fmt:
# if there's no % symbol, then it's probably not a strftime format,
# so use fmt as normal string formatting of total_seconds
_tt = (as_datetime64(time) - basetime) / np.timedelta64(1, 's')
ret = "{0:{1}}".format(_tt, fmt.strip())
else:
if not fmt:
msec_fmt = ['1']
fmt = "%Y-%m-%d %H:%M:%S.%f"
else:
msec_fmt = re.findall(r"%\.?([0-9]*)f", fmt)
fmt = re.sub(r"%\.?([0-9]*)f", "%f", fmt)
tstr = datetime.strftime(dttime, fmt)
# now go back and for any %f -> [0-9]{6}, reformat the precision
it = list(izip(msec_fmt, re.finditer("[0-9]{6}", tstr)))
for ffmt, m in reversed(it):
a, b = m.span()
val = float("0." + tstr[a:b])
ifmt = int(ffmt) if len(ffmt) > 0 else 6
f = "{0:0.{1}f}".format(val, ifmt)[2:]
tstr = tstr[:a] + f + tstr[b:]
ret = tstr
return ret
def _check_like(val, _np_types, _native_types, check_str=None): # pylint: disable=too-many-return-statements
"""
Checks the follwing:
- if val is instance of _np_types or _native_types
- if val is a list or ndarray of _np_types or _native_types
- if val is a string or list of strings that can be parsed by check_str
Does not check:
- if val is an ndarray of strings that can be parsed by check_str
"""
_all_types = _np_types + _native_types
if isinstance(val, _all_types):
return True
elif isinstance(val, string_types):
return check_str and check_str(val)
elif isinstance(val, (list, tuple)):
for v in val:
if isinstance(v, string_types):
if check_str and check_str(v):
continue
if not isinstance(v, _all_types):
return False
return True
elif hasattr(val, 'dtype'):
if val.dtype == np.object:
return all(isinstance(v, _native_types) for v in val)
else:
return val.dtype.type in _np_types
else:
return False
def datetime64_as_years(time):
"""Get time as floating point years since the year 0"""
time = as_datetime64(time)
epoch_year = 1970
epoch = as_datetime64("{0}-01-01T00:00:00.0".format(epoch_year))
tdelta = time_diff(time, epoch, most_precise=True)
years = tdelta / np.timedelta64(1, 'D') / 365.242 + epoch_year
return years
def is_datetime_like(val, conservative=False): # pylint: disable=unused-argument
"""Returns True iff val is datetime-like"""
if conservative and val is None:
return False
if conservative:
try:
int(val)
return False
except (ValueError, TypeError):
pass
return _check_like(val, (np.datetime64, ), (datetime, ),
is_valid_datetime64)
def is_timedelta_like(val, conservative=False):
"""Returns True iff val is timedelta-like"""
if conservative:
if val is None:
return False
if isinstance(val, string_types):
try:
int(val)
return False
except (ValueError, TypeError):
pass
return _check_like(val, (np.timedelta64, ), (timedelta, ),
is_valid_timedelta64)
else:
return _check_like(val, (np.timedelta64, np.floating, np.integer),
(timedelta, float, int), is_valid_timedelta64)
def is_time_like(val, conservative=False):
"""Returns True iff val is datetime-like or timedelta-like"""
return (is_datetime_like(val, conservative=conservative) or
is_timedelta_like(val, conservative=conservative))
to_datetime64 = as_datetime64
to_timedelta64 = as_timedelta64
to_datetime = as_datetime
to_timedelta = as_timedelta
to_isotime = as_isotime
def _main():
verb = True
d0 = as_datetime64('2010-06-21')
d1 = as_datetime64('2014-12-15T03:00:00.0003')
d2 = as_datetime64('1970-01-01', 'as')
t0 = as_timedelta64(60, 'm')
t1 = as_timedelta64(121, 'us')
t2 = as_timedelta64(1536, 'as')
l0 = [d0, d1, t0, t1, t2]
if verb:
print("l0", l0, "\n")
#
# TEST `round_time` and `regularize_time`
#
l1 = regularize_time(l0, unit='us')
if verb:
print("l1", l1, "\n")
l2 = round_time(l0, 'us')
if verb:
print("l1", l2, "\n")
l3 = round_time(l1, 's')
if verb:
print("l3", l3, "\n")
l4 = round_time(l0, 'fs', allow0=False)
if verb:
print("l4", l4, "\n")
assert l1 == l2
assert l1 != l3
try:
_ = regularize_time(l0, unit='us', allow0=False)
except PrecisionError:
pass
else:
assert 0, "rounding 1536 atto secs -> us should have caused an error"
try:
_ = regularize_time(l0, allow_rounding=False)
except OverflowError:
pass
else:
assert 0, "2010-06-21 should not be representable in atto secs"
try:
_ = round_time(l0, 's', allow0=False)
except PrecisionError:
pass
else:
assert 0, "rounding 1536 atto secs -> secs should have caused an error"
#
# TEST `time_sum`
#
print(d0, "+", t1, "=", time_sum(d0, t1))
print(d0, "+", t2, "=", time_sum(d0, t2))
try:
time_sum(d0, t2, allow0=False)
except PrecisionError:
pass
else:
assert 0, "rounding 1536 atto secs -> us should have caused an error"
try:
time_sum(d0, t2, allow_rounding=False)
except OverflowError:
pass
else:
assert 0, "2010-06-21 should not be representable in atto secs"
#
# TEST `time_diff`
#
print(d0, "-", d1, "=", time_diff(d0, d1))
print(d0, "-", d1, "=", time_diff(d0, d1, most_precise=True))
print(d0, "-", d1, "=", time_diff(d0, d1, unit='s'))
print(d0, "-", d2, "=", time_diff(d0, d2))
print(d0, "-", d2, "=", time_diff(d0, d2, unit='s'))
print(d0, "-", d2, "=", time_diff(d0, d2, unit='Y'))
#
# TEST linspace_datetime64
#
lst = linspace_datetime64(as_datetime64('1930-03-04'),
as_datetime64('2010-02-14T12:30:00'),
10)
print(lst, lst.dtype)
if __name__ == "__main__":
sys.exit(_main())
##
## EOF
##
```
#### File: Viscid/viscid/parallel.py
```python
from __future__ import print_function, division
from math import ceil
import threading
import multiprocessing as mp
import multiprocessing.pool
from contextlib import closing
from itertools import repeat
import sys
import numpy as np
import viscid
from viscid.compat import izip, futures, string_types
__all__ = ["chunk_list", "chunk_slices", "chunk_interslices", "chunk_sizes",
"map", "map_async"]
# Non daemonic processes are probably a really bad idea
class NoDaemonProcess(mp.Process):
"""Using this is probably a bad idea"""
# make 'daemon' attribute always return False
@staticmethod
def _get_daemon():
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class NoDaemonPool(multiprocessing.pool.Pool): # pylint: disable=W0223
""" I am vulnerable to armies of undead worker processes, chances
are you don't actually want to use me
"""
Process = NoDaemonProcess
class _MapThread(threading.Thread):
def __init__(self, result_container, index, **kwargs):
self.results = result_container
self.index = index
self.target = kwargs.pop("target")
self.args = kwargs.pop("args", [])
self.kwargs = kwargs.pop("kwargs", {})
super(_MapThread, self).__init__(**kwargs)
def run(self):
self.results[self.index] = self.target(*self.args, **self.kwargs)
def chunk_list(seq, nchunks, size=None):
"""Chunk a list
slice seq into chunks of nchunks size, seq can be a anything
sliceable such as lists, numpy arrays, etc. These chunks will be
'contiguous', see :meth:`chunk_interslice` for picking every nth
element.
Parameters:
size: if given, set nchunks such that chunks have about 'size'
elements
Returns:
nchunks slices of length N = (len(lst) // nchunks) or N - 1
See Also:
Use :meth:`chunk_iterator` to chunk up iterators
Example:
>>> it1, it2, it3 = chunk_list(range(8), 3)
>>> it1 == range(0, 3) # 3 vals
True
>>> it2 == range(3, 6) # 3 vals
True
>>> it3 == range(6, 8) # 2 vals
True
"""
nel = len(seq)
if size is not None:
nchunks = int(ceil(nel / nchunks))
ret = chunk_slices(nel, nchunks)
for i in range(nchunks):
ret[i] = seq[slice(*ret[i])]
return ret
def chunk_slices(nel, nchunks, size=None):
r"""Make continuous chunks
Get the slice info (can be unpacked and passed to the slice builtin
as in slice(\*ret[i])) for nchunks contiguous chunks in a list with
nel elements
Parameters:
nel: how many elements are in one pass of the original list
nchunks: how many chunks to make
size: if given, set nchunks such that chunks have about 'size'
elements
Returns:
a list of (start, stop) tuples with length nchunks
Example:
>>> sl1, sl2 = chunk_slices(5, 2)
>>> sl1 == (0, 3) # 3 vals
True
>>> sl2 == (3, 5) # 2 vals
True
"""
if size is not None:
nchunks = int(ceil(nel / nchunks))
nlong = nel % nchunks # nshort guarenteed < nchunks
lenshort = nel // nchunks
lenlong = lenshort + 1
ret = [None] * nchunks
start = 0
for i in range(nlong):
ret[i] = (start, start + lenlong)
start += lenlong
for i in range(nlong, nchunks):
ret[i] = (start, start + lenshort)
start += lenshort
return ret
def chunk_interslices(nchunks):
"""Make staggered chunks
Similar to chunk_slices, but pick every nth element instead of
getting a contiguous patch for each chunk
Parameters:
nchunks: how many chunks to make
Returns:
a list of (start, stop, step) tuples with length nchunks
Example:
>>> chunk_slices(2) == [(0, None, 2), (1, None, 2)]
True
"""
ret = [None] * nchunks
for i in range(nchunks):
ret[i] = (i, None, nchunks)
return ret
def chunk_sizes(nel, nchunks, size=None):
"""For chunking up lists, how big is each chunk
Parameters:
nel: how many elements are in one pass of the original list
nchunks: is inferred from the length of iter_list
size: if given, set nchunks such that chunks have about 'size'
elements
Returns:
an ndarray of the number of elements in each chunk, this
should be the same for chunk_list, chunk_slices and
chunk_interslices
Example:
>>> nel1, nel2 = chunk_sizes(5, 2)
>>> nel1 == 2
True
>>> nel2 == 3
True
"""
if size is not None:
nchunks = int(ceil(nel / nchunks))
nlong = nel % nchunks # nshort guarenteed < nchunks
lenshort = nel // nchunks
lenlong = lenshort + 1
ret = np.empty((nchunks,), dtype="int")
ret[:nlong] = lenlong
ret[nlong:] = lenshort
return ret
def _star_passthrough(args):
""" this is so we can give a zipped iterable to func """
# args[0] is function, args[1] is positional args, and args[2] is kwargs
return args[0](*(args[1]), **(args[2]))
def sanitize_nr_procs(nr_procs):
if isinstance(nr_procs, string_types):
nr_procs = nr_procs.strip().lower()
if nr_procs == "all" or nr_procs == "auto":
nr_procs = mp.cpu_count()
return int(nr_procs)
def map(nr_procs, func, args_iter, args_kw=None, timeout=1e8,
daemonic=True, threads=False, pool=None, force_subprocess=False):
"""Just like ``subprocessing.map``?
same as :meth:`map_async`, except it waits for the result to
be ready and returns it
Note:
When using threads, this is WAY faster than map_async since
map_async uses the builtin python ThreadPool. I have no idea
why that's slower than making threads by hand.
"""
nr_procs = sanitize_nr_procs(nr_procs)
if args_kw is None:
args_kw = {}
# don't waste time spinning up a new process
if threads:
args = [(func, ai, args_kw) for ai in args_iter]
with futures.ThreadPoolExecutor(max_workers=nr_procs) as executor:
ret = [val for val in executor.map(_star_passthrough, args)]
elif pool is None and nr_procs == 1 and not force_subprocess:
args_iter = izip(repeat(func), args_iter, repeat(args_kw))
ret = [_star_passthrough(args) for args in args_iter]
else:
p, r = map_async(nr_procs, func, args_iter, args_kw=args_kw,
daemonic=daemonic, threads=threads, pool=pool)
ret = r.get(int(timeout))
# in principle this join should return almost immediately since
# we already called r.get
p.join()
return ret
def map_async(nr_procs, func, args_iter, args_kw=None, daemonic=True,
threads=False, pool=None):
"""Wrap python's ``map_async``
This has some utility stuff like star passthrough
Run func on nr_procs with arguments given by args_iter. args_iter
should be an iterable of the list of arguments that can be unpacked
for each invocation. kwargs are passed to func as keyword arguments
Returns:
(tuple) (pool, multiprocessing.pool.AsyncResult)
Note:
When using threads, this is WAY slower than map since
map_async uses the builtin python ThreadPool. I have no idea
why that's slower than making threads by hand.
Note: daemonic can be set to False if one needs to spawn child
processes in func, BUT this could be vulnerable to creating
an undead army of worker processes, only use this if you
really really need it, and know what you're doing
Example:
>>> func = lambda i, letter: print i, letter
>>> p, r = map_async(2, func, itertools.izip(itertools.count(), 'abc'))
>>> r.get(1e8)
>>> p.join()
>>> # the following is printed from 2 processes
0 a
1 b
2 c
"""
nr_procs = sanitize_nr_procs(nr_procs)
if args_kw is None:
args_kw = {}
if not threads and sys.platform == 'darwin' and ("mayavi.mlab" in sys.modules or
"mayavi" in sys.modules):
import mayavi
if mayavi.ETSConfig.toolkit == 'qt4':
viscid.logger.critical("Using multiprocessing with Mayavi + Qt4 "
"will cause segfaults on join.\n"
"A workaround is to use the wx backend "
"(`os.environ['ETS_TOOLKIT'] = 'wx'`).")
args_iter = izip(repeat(func), args_iter, repeat(args_kw))
# if given a pool, don't close it when we're done delegating tasks
if pool is not None:
return pool, pool.map_async(_star_passthrough, args_iter)
else:
if threads:
pool = mp.pool.ThreadPool(nr_procs)
elif daemonic:
pool = mp.Pool(nr_procs)
else:
pool = NoDaemonPool(nr_procs)
with closing(pool) as p:
return p, p.map_async(_star_passthrough, args_iter)
##
## EOF
##
```
#### File: viscid/plot/vlab.py
```python
from __future__ import print_function, division
import os
import sys
import numpy as np
import mayavi
from mayavi import mlab
from mayavi.modules.axes import Axes
from mayavi.sources.builtin_surface import BuiltinSurface
from mayavi.sources.vtk_data_source import VTKDataSource
from traits.trait_errors import TraitError
from tvtk.api import tvtk
import viscid
from viscid import field
def add_source(src, figure=None):
"""Add a vtk data source to a figure
Args:
src (VTKDataSource): Description
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
Returns:
None
"""
if not figure:
figure = mlab.gcf()
if src not in figure.children:
engine = figure.parent
engine.add_source(src, scene=figure)
return src
def add_lines(lines, scalars=None, figure=None, name="NoName"):
"""Add list of lines to a figure
Args:
lines (list): See :py:func:`lines2source`
scalars (ndarray): See :py:func:`lines2source`
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
name (str): name of vtk object
Returns:
:py:class:`mayavi.sources.vtk_data_source.VTKDataSource`
"""
src = lines2source(lines, scalars=scalars, name=name)
add_source(src, figure=figure)
return src
def add_field(fld, figure=None, center="", name=""):
"""Add a Viscid Field to a mayavi figure
Args:
fld (Field): Some Viscid Field
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
center (str): 'cell' or 'node', leave blank to use fld.center
name (str): name of vtk object, leave black for fld.name
Returns:
:py:class:`mayavi.sources.vtk_data_source.VTKDataSource`
"""
src = field2source(fld, center=center, name=name)
add_source(src, figure=figure)
return src
def points2source(vertices, scalars=None, name="NoName"):
# if scalars:
# scalars = [scalars]
verts, scalars, _, other = viscid.vutil.prepare_lines([vertices], scalars)
src = mlab.pipeline.scalar_scatter(verts[0], verts[1], verts[2])
if scalars is not None:
if scalars.dtype == np.dtype('u1'):
sc = tvtk.UnsignedCharArray()
sc.from_array(scalars.T)
scalars = sc
src.mlab_source.dataset.point_data.scalars = scalars
src.mlab_source.dataset.modified()
src.name = name
return src
def lines2source(lines, scalars=None, name="NoName"):
"""Turn a list of lines as ndarrays into vtk data source
Args:
lines (list): List of 3xN, 4xN, 6xN ndarrays of xyz, xyzs, or
xyzrgb data for N points along the line. N need not be the
same for all lines.
scalars (ndarray, list): Scalars for each point, or each line.
See :py:func:`viscid.vutil.prepare_lines` for more details
name (str): name of vtk object
Returns:
:py:class:`mayavi.sources.vtk_data_source.VTKDataSource`
See Also:
* :py:func:`viscid.vutil.prepare_lines`
"""
r = viscid.vutil.prepare_lines(lines, scalars, do_connections=True)
lines, scalars, connections, other = r
src = mlab.pipeline.scalar_scatter(lines[0], lines[1], lines[2])
if scalars is not None:
if scalars.dtype == np.dtype('u1'):
sc = tvtk.UnsignedCharArray()
sc.from_array(scalars.T)
scalars = sc
src.mlab_source.dataset.point_data.scalars = scalars
src.mlab_source.dataset.modified()
src.mlab_source.dataset.lines = connections
src.name = name
return src
def field2source(fld, center=None, name=None):
"""Convert a field to a vtk data source
This dispatches to either :meth:`field_to_point_source` or
:meth:`field_to_cell_source` depending on the centering of
`fld`.
Parameters:
fld: field to convert
center (str): Either "cell", "node", or "" to use the
same centering as fld
name (str): Add specific name. Leave as "" to use fld.name
Returns:
mayavi source
Raises:
NotImplementedError: If center (or fld.center) is not
recognized
"""
if not center:
center = fld.center
center = center.lower()
if center == "node":
src = field2point_source(fld, name=name)
elif center == "cell":
src = field2cell_source(fld, name=name)
else:
raise NotImplementedError("cell / node only for now")
return src
def field2point_source(fld, name=None):
"""Convert a field to a vtk point data source"""
grid, arr = _prep_field(fld)
dat_target = grid.point_data
if fld.iscentered("Cell"):
grid.dimensions = tuple(fld.crds.shape_cc)
grid.x_coordinates = fld.get_crd_cc(0) # ('x')
grid.y_coordinates = fld.get_crd_cc(1) # ('y')
grid.z_coordinates = fld.get_crd_cc(2) # ('z')
elif fld.iscentered("Node"):
grid.dimensions = tuple(fld.crds.shape_nc)
grid.x_coordinates = fld.get_crd_nc(0) # ('x')
grid.y_coordinates = fld.get_crd_nc(1) # ('y')
grid.z_coordinates = fld.get_crd_nc(2) # ('z')
else:
raise ValueError("cell or node only please")
src = _finalize_source(fld, arr, grid, dat_target)
if name:
src.name = name
return src
def field2cell_source(fld, name=None):
"""Convert a field to a vtk cell data source"""
grid, arr = _prep_field(fld)
dat_target = grid.cell_data
if fld.iscentered("Cell"):
grid.dimensions = tuple(fld.crds.shape_nc)
grid.x_coordinates = fld.get_crd_nc(0) # ('x')
grid.y_coordinates = fld.get_crd_nc(1) # ('y')
grid.z_coordinates = fld.get_crd_nc(2) # ('z')
elif fld.iscentered("Node"):
raise NotImplementedError("can't do lossless cell data from nodes yet")
else:
raise ValueError("cell or node only please")
src = _finalize_source(fld, arr, grid, dat_target)
if name:
src.name = name
return src
def _prep_field(fld):
grid = tvtk.RectilinearGrid()
# note, the transpose operations are b/c fld.data is now xyz ordered,
# but vtk expects zyx data
if isinstance(fld, field.ScalarField):
zyx_dat = fld.data.T
arr = np.reshape(zyx_dat, (-1,))
# vtk expects zyx data, but fld.data is now xyz
elif isinstance(fld, field.VectorField):
if fld.layout == field.LAYOUT_INTERLACED:
zyx_dat = np.transpose(fld.data, (2, 1, 0, 3))
arr = np.reshape(zyx_dat, (-1, 3))
elif fld.layout == field.LAYOUT_FLAT:
zyx_dat = np.transpose(fld.data, (0, 3, 2, 1))
arr = np.reshape(np.rollaxis(zyx_dat, 0, len(fld.shape)), (-1, 3))
else:
raise ValueError()
else:
raise ValueError("Unexpected fld type: {0}".format(type(fld)))
# swap endian if needed
if str(arr.dtype).startswith(">"):
arr = arr.byteswap().newbyteorder()
return grid, arr
def _finalize_source(fld, arr, grid, dat_target):
if isinstance(fld, field.ScalarField):
dat_target.scalars = arr
dat_target.scalars.name = fld.name
elif isinstance(fld, field.VectorField):
dat_target.vectors = arr
dat_target.vectors.name = fld.name
src = VTKDataSource(data=grid)
src.name = fld.name
return src
def _prep_vector_source(v_src, scalars):
"""Side-effect: v_src will be modified if scalars are given"""
if isinstance(v_src, viscid.field.Field):
v_src = field2source(v_src, center='node')
if scalars is not None:
if isinstance(scalars, viscid.field.Field):
scalars = field2source(scalars, center='node')
v_src._point_scalars_list.append(scalars.name) # pylint: disable=protected-access
v_src.data.point_data.scalars = scalars.data.point_data.scalars
v_src.point_scalars_name = scalars.name
return v_src, scalars
def scalar_cut_plane(src, center=None, **kwargs):
"""Wraps `mayavi.mlab.pipeline.scalar_cut_plane`
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
If you call this multiple times with the same
`viscid.field.Field`, you should consider using field2source
yourself and passing the Mayavi source object
Args:
src (Mayavi Source or ScalarField): If src is a ScalarField,
then the field is wrapped into a Mayavi Source and added
to the figure
center (str): centering for the Mayavi source, 'cell' will
make the grid visible, while 'node' will interpolate
between points
**kwargs: Passed to `mayavi.mlab.pipeline.scalar_cut_plane`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.scalar_cut_plane.ScalarCutPlane`
"""
if isinstance(src, viscid.field.Field):
src = field2source(src, center=center)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
scp = mlab.pipeline.scalar_cut_plane(src, **kwargs)
apply_cmap(scp, **cmap_kwargs)
return scp
def vector_cut_plane(v_src, scalars=None, color_mode='vector', **kwargs):
"""Wraps `mayavi.mlab.pipeline.vector_cut_plane`
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
If you call this multiple times with the same
`viscid.field.Field`, you should consider using field2source
yourself and passing the Mayavi source object
Args:
v_src (Mayavi Source, or VectorField): Vector to cut-plane. If
a Mayavi Source, then it must be node centered.
scalars (Mayavi Source, or ScalarField): Optional scalar data.
If a Mayavi Source, then it must be node centered. This
will enable scale_mode and color_mode by 'scalar'
color_mode (str): Color by 'vector', 'scalar', or 'none'
**kwargs: Passed to `mayavi.mlab.pipeline.vector_cut_plane`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.vector_cut_plane.VectorCutPlane`
"""
v_src, scalars = _prep_vector_source(v_src, scalars)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
vcp = mlab.pipeline.vector_cut_plane(v_src, **kwargs)
apply_cmap(vcp, mode='vector', **cmap_kwargs)
apply_cmap(vcp, mode='scalar', **cmap_kwargs)
vcp.glyph.color_mode = 'color_by_{0}'.format(color_mode.strip().lower())
return vcp
def mesh_from_seeds(seeds, scalars=None, **kwargs):
"""Wraps `mayavi.mlab.mesh` for Viscid seed generators
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
Args:
seeds (Viscid.SeedGen): Some seed generator with a 2D mesh
representation
scalars (ndarray, ScalarField): data mapped onto the mesh,
i.e., the result of viscid.interp_trilin(seeds, ...)
**kwargs: Passed to `mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.surface.Surface`
"""
if scalars is not None:
vertices, scalars = seeds.wrap_mesh(scalars)
else:
vertices, = seeds.wrap_mesh()
return mesh(vertices[0], vertices[1], vertices[2], scalars=scalars,
**kwargs)
def mesh(x, y, z, scalars=None, **kwargs):
"""Wraps `mayavi.mlab.mesh`
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
Args:
x (TYPE): 2D array of vertices' x-values
y (TYPE): 2D array of vertices' y-values
z (TYPE): 2D array of vertices' z-values
scalars (ndarray, ScalarField): optional scalar data
**kwargs: Passed to `mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.surface.Surface`
"""
if scalars is not None:
if isinstance(scalars, viscid.field.Field):
scalars = scalars.data
scalars = scalars.reshape(x.shape)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
m = mlab.mesh(x, y, z, scalars=scalars, **kwargs)
if scalars is not None:
apply_cmap(m, **cmap_kwargs)
return m
def quiver3d(*args, **kwargs):
"""Wraps `mayavi.mlab.quiver3d`
Args:
*args: passed to `mayavi.mlab.quiver3d`
**kwargs: Other Arguments are popped, then kwargs is passed to
`mayavi.mlab.quiver3d`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
TYPE: Description
"""
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
quivers = mlab.quiver3d(*args, **kwargs)
apply_cmap(quivers, mode='scalar', **cmap_kwargs)
apply_cmap(quivers, mode='vector', **cmap_kwargs)
return quivers
def points3d(*args, **kwargs):
"""Wraps `mayavi.mlab.points3d`
Args:
*args: passed to `mayavi.mlab.points3d`
**kwargs: Other Arguments are popped, then kwargs is passed to
`mayavi.mlab.points3d`
Keyword Arguments:
modify_args (bool): if True (default), then check if args is a
single 2d sequence of shape 3xN or Nx3. Then split them up
appropriately. if False, then args are passed through
to mlab.points3d unchanged, nomatter what.
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
TYPE: Description
"""
modify_args = kwargs.pop('modify_args', True)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
if modify_args and len(args) < 3:
a0 = np.asarray(args[0])
if len(a0.shape) > 1 and a0.shape[0] == 3:
args = [a0[0, :].reshape(-1),
a0[1, :].reshape(-1),
a0[2, :].reshape(-1)] + list(args[1:])
elif len(a0.shape) > 1 and a0.shape[1] == 3:
args = [a0[:, 0].reshape(-1),
a0[:, 1].reshape(-1),
a0[:, 2].reshape(-1)] + list(args[1:])
points = mlab.points3d(*args, **kwargs)
apply_cmap(points, **cmap_kwargs)
return points
def streamline(v_src, scalars=None, **kwargs):
"""Wraps `mayavi.mlab.pipeline.streamline`; mind the caveats
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
Side-effect: If scalars are given, then v_src is modified to
point to the scalar data!
If v_src and scalars are Mayavi sources, they must be node
centered.
If you call this multiple times with the same v_src and
scalars, you should consider using field2source yourself and
passing the Mayavi source objects, unless you're using
different scalars with the same vector field, since this
function has side-effects on the vector sourc.
Args:
v_src (Mayavi Source, or VectorField): Vector to streamline. If
a Mayavi Source, then it must be node centered.
scalars (Mayavi Source, or ScalarField): Optional scalar data.
If a Mayavi Source, then it must be node centered.
**kwargs: Passed to `mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.streamline.Streamline`
"""
v_src, scalars = _prep_vector_source(v_src, scalars)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
sl = mlab.pipeline.streamline(v_src, **kwargs)
apply_cmap(sl, mode='vector', **cmap_kwargs)
apply_cmap(sl, mode='scalar', **cmap_kwargs)
return sl
def iso_surface(src, backface_culling=True, **kwargs):
"""Wraps `mayavi.mlab.pipeline.iso_surface`; mind the caveats
Note that backfaces are culled by default.
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
If src is a Mayavi source, it must be node centered.
If you call this multiple times with the same
`viscid.field.Field`, you should consider using field2source
yourself and passing the Mayavi source object
Args:
src (Mayavi Source or ScalarField): If src is a ScalarField,
then the field is wrapped into a Mayavi Source and added
to the figure. If a Mayavi Source, then it must be node
centered.
backface_culling (bool): Cull backfaces by default. Useful for
translucent surfaces.
**kwargs: Passed to `mayavi.mlab.pipeline.scalar_cut_plane`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.iso_surface.IsoSurface`
"""
if isinstance(src, viscid.field.Field):
src = field2source(src, center='node')
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
iso = mlab.pipeline.iso_surface(src, **kwargs)
apply_cmap(iso, **cmap_kwargs)
iso.actor.property.backface_culling = backface_culling
return iso
def plot_line(line, scalars=None, **kwargs):
"""Wrap :py:func:`plot_lines` for a single line"""
if scalars is not None:
scalars = [scalars]
return plot_lines([line], scalars=scalars, **kwargs)
def plot_lines(lines, scalars=None, style="tube", figure=None,
name="Lines", tube_radius=0.05, tube_sides=6, **kwargs):
"""Make 3D mayavi plot of lines
Scalars can be a bunch of single values, or a bunch of rgb data
to set the color of each line / vertex explicitly. This is
explained in :py:func:`lines2source`.
Example:
A common use case of setting the line color from a topology
will want to use :py:func:`viscid.topology2color`::
>>> import viscid
>>> from viscid.plot import vlab
>>>
>>> B = viscid.make_dipole()
>>> seeds = viscid.Line([-4, 0, 0], [4, 0, 0])
>>> lines, topology = viscid.calc_streamlines(B, seeds,
>>> ibound=0.05)
>>> scalars = viscid.topology2color(topology)
>>> vlab.plot_lines(lines, scalars, tube_radius=0.02)
>>> vlab.savefig("dipole.x3d")
>>> viscid.meshlab_convert("dipole.x3d", "dae")
>>> vlab.show()
Parameters:
lines (list): See :py:func:`lines2source`
scalars (TYPE): See :py:func:`lines2source`
style (str): 'tube' or 'none'
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
name (str): Description
tube_radius (float): Radius if style == 'tube'
tube_sides (int): Angular resolution if style == 'tube'
**kwargs: passed to :meth:`mayavi.mlab.pipeline.surface`. This
is useful for setting a colormap among other things.
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
Mayavi surface module
Raises:
ValueError: if style is neither tube nor strip
"""
style = style.lower()
if not figure:
figure = mlab.gcf()
src = lines2source(lines, scalars=scalars, name=name)
# always use the stripper since actually turns a collection of line
# segments into a line... that way capping will cap lines, not line
# segments, etc.
lines = mlab.pipeline.stripper(src, figure=figure)
if style == "tube":
lines = mlab.pipeline.tube(lines, figure=figure, tube_radius=tube_radius,
tube_sides=tube_sides)
elif style == "none" or not style:
pass
else:
raise ValueError("Unknown style for lines: {0}".format(style))
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
surface = mlab.pipeline.surface(lines, **kwargs)
apply_cmap(surface, **cmap_kwargs)
return surface
def plot_ionosphere(fld, radius=1.063, figure=None, bounding_lat=0.0,
rotate=None, crd_system="gse", **kwargs):
"""Plot an ionospheric field
Args:
fld (Field): Some spherical (phi, theta) / (lot, lat) field
radius (float): Defaults to 1Re + 400km == 1.063Re
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
bounding_lat (float): Description
rotate (None, sequence, str, datetime64): sequence of length 4
that contains (angle, ux, uy, uz) for the angle and axis of
a rotation, or a UT time as string or datetime64 to rotate
earth to a specific date/time, or a cotr object in
conjunction with crd_system
crd_system (str, other): Used if rotate is datetime-like. Can
be one of ('gse', 'mhd'), or anything that returns from
:py:func:`viscid.as_crd_system`.
**kwargs: passed to :py:func:`mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
No Longer Raises:
ValueError: Description
"""
if figure is None:
figure = mlab.gcf()
fld = viscid.as_spherefield(fld, order=('phi', 'theta'), units='deg')
phil, thetal = fld.xl
phih, thetah = fld.xh
nphi, ntheta = fld.shape
sphere = viscid.Sphere([0, 0, 0], r=radius, ntheta=ntheta, nphi=nphi,
thetalim=(thetal, thetah), philim=(phil, phih),
theta_phi=False)
verts, arr = sphere.wrap_mesh(fld.data)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
if 'name' not in kwargs:
kwargs['name'] = fld.name
m = mlab.mesh(verts[0], verts[1], verts[2], scalars=arr, figure=figure,
**kwargs)
if bounding_lat:
rp = 1.5 * radius
z = radius * np.cos((np.pi / 180.0) * bounding_lat)
clip = mlab.pipeline.data_set_clipper(m.module_manager.parent)
clip.widget.widget.place_widget(-rp, rp, -rp, rp, -z, z)
clip.update_pipeline()
clip.widget.widget.enabled = False
insert_filter(clip, m.module_manager)
# m.module_manager.parent.parent.filter.auto_orient_normals = True
else:
pass
# m.module_manager.parent.filter.auto_orient_normals = True
m.actor.mapper.interpolate_scalars_before_mapping = True
apply_cmap(m, **cmap_kwargs)
m.actor.actor.rotate_z(180)
_apply_rotation(m, 'sm', rotate, crd_system=crd_system)
return m
def plot_nulls(nulls, Acolor=(0.0, 0.263, 0.345), Bcolor=(0.686, 0.314, 0.0),
Ocolor=(0.239, 0.659, 0.557), **kwargs):
kwargs.setdefault('scale_mode', 'none')
kwargs.setdefault('scale_factor', 0.3)
if not isinstance(nulls, dict):
empty = np.ones((3, 0))
nulls = dict(O=[empty, nulls], A=[empty, empty], B=[empty, empty])
Opts = nulls['O'][1]
if Ocolor is not None and Opts.shape[1]:
mlab.points3d(Opts[0], Opts[1], Opts[2], color=Ocolor, name="Onulls",
**kwargs)
Apts = nulls['A'][1]
if Ocolor is not None and Opts.shape[1]:
mlab.points3d(Apts[0], Apts[1], Apts[2], color=Acolor, name="Anulls",
**kwargs)
Bpts = nulls['B'][1]
if Bcolor is not None and Bpts.shape[1]:
mlab.points3d(Bpts[0], Bpts[1], Bpts[2], color=Bcolor, name="Bnulls",
**kwargs)
def fancy_axes(figure=None, target=None, nb_labels=5, xl=None, xh=None,
tight=False, symmetric=False, padding=0.05, opacity=0.7,
face_color=None, line_width=2.0, grid_color=None,
labels=True, label_color=None, label_shadow=True,
consolidate_labels=True):
"""Make axes with 3 shaded walls and a grid similar to matplotlib
Args:
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
target (Mayavi Element): If either xl or xh are not given, then
get that limit from a bounding box around `target`
nb_labels (int, sequence): number of labels in all, or each
(x, y, z) directions
xl (float, sequence): lower corner of axes
xh (float, sequence): upper corner of axes
tight (bool): If False, then let xl and xh expand to make nicer
labels. This uses matplotlib to determine new extrema
symmetric (bool): If True, then xl + xh = 0
padding (float): add padding as a fraction of the total length
opacity (float): opacity of faces
face_color (sequence): color (r, g, b) of faces
line_width (float): Width of grid lines
grid_color (sequence): Color of grid lines
labels (bool): Whether or not to put axis labels on
label_color (sequence): color of axis labels
label_shadow (bool): Add shadows to all labels
consolidate_labels (bool): if all nb_labels are the same, then
only make one axis for the labels
Returns:
VTKDataSource: source to which 2 surfaces and 3 axes belong
"""
if figure is None:
figure = mlab.gcf()
# setup xl and xh
if xl is None or xh is None:
_outline = mlab.outline(target, figure=figure)
if xl is None:
xl = _outline.bounds[0::2]
if xh is None:
xh = _outline.bounds[1::2]
_outline.remove()
nb_labels = np.broadcast_to(nb_labels, (3,))
xl = np.array(np.broadcast_to(xl, (3,)))
xh = np.array(np.broadcast_to(xh, (3,)))
L = xh - xl
xl -= padding * L
xh += padding * L
# now adjust xl and xh to be prettier
if symmetric:
tight = False
if not tight:
from matplotlib.ticker import AutoLocator
for i in range(len(xl)): # pylint: disable=consider-using-enumerate
l = AutoLocator()
l.create_dummy_axis()
l.set_view_interval(xl[i], xh[i])
locs = l()
xl[i] = locs[0]
xh[i] = locs[-1]
dx = (xh - xl) / (nb_labels - 1)
grid = tvtk.ImageData(dimensions=nb_labels, origin=xl, spacing=dx)
src = VTKDataSource(data=grid)
src.name = "fancy_axes"
if face_color is None:
face_color = figure.scene.background
if grid_color is None:
grid_color = figure.scene.foreground
if label_color is None:
label_color = grid_color
face = mlab.pipeline.surface(src, figure=figure, opacity=opacity,
color=face_color)
face.actor.property.frontface_culling = True
if line_width:
grid = mlab.pipeline.surface(src, figure=figure, opacity=1.0,
color=grid_color, line_width=line_width,
representation='wireframe')
grid.actor.property.frontface_culling = True
if labels:
def _make_ax_for_labels(_i, all_axes=False):
if all_axes:
_ax = Axes(name='axes-labels')
else:
_ax = Axes(name='{0}-axis-labels'.format('xyz'[_i]))
# VTK bug... y_axis and z_axis are flipped... how is VTK still
# the de-facto 3d plotting library?
if _i == 0:
_ax.axes.x_axis_visibility = True
_ax.axes.y_axis_visibility = False
_ax.axes.z_axis_visibility = False
elif _i == 1:
_ax.axes.x_axis_visibility = False
_ax.axes.y_axis_visibility = False
_ax.axes.z_axis_visibility = True # VTK bug
elif _i == 2:
_ax.axes.x_axis_visibility = False
_ax.axes.y_axis_visibility = True # VTK bug
_ax.axes.z_axis_visibility = False
else:
raise ValueError()
_ax.property.opacity = 0.0
_ax.axes.number_of_labels = nb_labels[_i]
# import IPython; IPython.embed()
_ax.title_text_property.color = label_color
_ax.title_text_property.shadow = label_shadow
_ax.label_text_property.color = label_color
_ax.label_text_property.shadow = label_shadow
src.add_module(_ax)
if consolidate_labels and np.all(nb_labels[:] == nb_labels[0]):
_make_ax_for_labels(0, all_axes=True)
else:
_make_ax_for_labels(0, all_axes=False)
_make_ax_for_labels(1, all_axes=False)
_make_ax_for_labels(2, all_axes=False)
return src
axes = mlab.axes
xlabel = mlab.xlabel
ylabel = mlab.ylabel
zlabel = mlab.zlabel
title = mlab.title
outline = mlab.outline
orientation_axes = mlab.orientation_axes
view = mlab.view
def _extract_cmap_kwargs(kwargs):
cmap_kwargs = dict()
cmap_kwargs["cmap"] = kwargs.pop("cmap", None)
cmap_kwargs["alpha"] = kwargs.pop("alpha", None)
cmap_kwargs["clim"] = kwargs.pop("clim", None)
cmap_kwargs["symmetric"] = kwargs.pop("symmetric", False)
cmap_kwargs["logscale"] = kwargs.pop("logscale", False)
return kwargs, cmap_kwargs
def colorbar(*args, **kwargs):
"""Wraps mayavi.mlab.colorbar and adjusts cmap if you so choose"""
cmap = kwargs.pop("cmap", False)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
cmap_kwargs.pop("cmap")
ret = mlab.colorbar(*args, **kwargs)
apply_cmap(ret, cmap=cmap, **cmap_kwargs)
return ret
def scalarbar(*args, **kwargs):
"""Wraps mayavi.mlab.scalarbar and adjusts cmap if you so choose"""
cmap = kwargs.pop("cmap", False)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
cmap_kwargs.pop("cmap")
ret = mlab.scalarbar(*args, **kwargs)
apply_cmap(ret, cmap=cmap, **cmap_kwargs)
return ret
def vectorbar(*args, **kwargs):
"""Wraps mayavi.mlab.vectorbar and adjusts cmap if you so choose"""
cmap = kwargs.pop("cmap", False)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
cmap_kwargs.pop("cmap")
ret = mlab.vectorbar(*args, **kwargs)
apply_cmap(ret, cmap=cmap, **cmap_kwargs)
return ret
def get_cmap(cmap=None, lut=None, symmetric=False):
"""Get a Matplotlib colormap as an rgba ndarray
Args:
cmap (str): name of colormap, or an ndarray of rgb(a) colors
lut (int): number of entries desired in the lookup table
Returns:
ndarray: Nx4 array of N rgba colors
"""
import matplotlib
if symmetric and not cmap:
cmap = matplotlib.rcParams.get("viscid.symmetric_cmap", None)
try:
cm = matplotlib.cm.get_cmap(name=cmap, lut=lut)
rgba = (255 * np.asarray(cm(np.linspace(0, 1, cm.N)))).astype('i')
except TypeError:
rgba = np.asarray(cmap)
if np.all(rgba >= 0.0) and np.all(rgba <= 1.0):
rgba = (255 * rgba).astype('i')
else:
rgba = rgba.astype('i')
if np.any(rgba < 0) or np.any(rgba > 255):
raise ValueError("cmap ndarray must have color values between "
"0 and 255 or 0.0 and 1.0")
if rgba.shape[1] not in (3, 4) and rgba.shape[0] in (3, 4):
rgba = np.array(rgba.T)
if rgba.shape[1] == 3:
rgba = np.hstack([rgba, 255 * np.ones_like(rgba[:, :1])])
return rgba
def apply_cmap(target, cmap=None, lut=None, alpha=None, mode='scalar',
clim=None, symmetric=False, logscale=False):
"""Apply a Matplotlib colormap to a Mayavi object & adjust limits
Args:
target: Some Mayavi object on mode to apply the colormap
cmap (sequence, None, False): name of a Matplotlib colormap, or
a sequence of rgb(a) colors, or None to use the default,
or False to leave the colormap alone.
lut (int): number of entries desired in the lookup table
alpha (number, sequence): scalar or array that sets the alpha
(opacity) channel in the range [0..255]. This is expanded
to both ends of the colormap using linear interpolation,
i.e., [0, 255] will be a linear ramp from transparent to
opaque over the whole colormap.
mode (str): one of 'scalar', 'vector', or 'other'
clim (sequence): contains (vmin, vmax) for color scale
symmetric (bool): force the limits on the colorbar to be
symmetric around 0, and if no `cmap` is given, then also
use the default symmetric colormap
logscale (bool): Use a logarithmic color scale
Raises:
AttributeError: Description
ValueError: Description
"""
mode = mode.strip().lower()
# get the mayavi lut object
try:
if mode == "scalar":
mvi_lut = target.module_manager.scalar_lut_manager.lut
elif mode == "vector":
mvi_lut = target.module_manager.vector_lut_manager.lut
else:
if mode != "other":
raise ValueError("mode should be 'scalar', 'vector', or "
"'other'; not '{0}'".format(mode))
raise AttributeError()
except AttributeError:
mvi_lut = target.lut
# set the limits on the colorbar
if isinstance(clim, (list, tuple)):
mvi_lut.range = [clim[0], clim[1]]
elif clim == 0:
symmetric = True
elif clim:
symmetric = clim
if logscale and symmetric:
viscid.logger.warn("logscale and symmetric are mutually exclusive;"
"ignoring symmetric.")
if logscale:
mvi_lut.scale = 'log10'
elif symmetric:
# float(True) -> 1
val = float(symmetric) * np.max(np.abs(mvi_lut.range))
mvi_lut.range = [-val, val]
vmin, vmax = mvi_lut.range
is_symmetric = bool(np.isclose(vmax, -1 * vmin, atol=0))
# now set the colormap
changed = False
if cmap is False:
rgba = None if alpha is None else mvi_lut.table.to_array()
else:
rgba = get_cmap(cmap=cmap, lut=lut, symmetric=is_symmetric)
changed = True
if alpha is not None:
alpha = np.asarray(alpha).reshape(-1)
rgba[:, -1] = np.interp(np.linspace(0, 1, len(rgba)),
np.linspace(0, 1, len(alpha)), alpha)
changed = True
if changed:
mvi_lut.table = rgba
def insert_filter(filtr, module_manager):
"""Insert a filter above an existing module_manager
Args:
filter (TYPE): Description
module_manager (TYPE): Description
"""
filtr.parent.children.remove(module_manager)
filtr.children.append(module_manager)
def _apply_rotation(obj, from_system, rotate=None, crd_system='gse'):
if hasattr(rotate, "get_rotation_wxyz"):
rotate = rotate.get_rotation_wxyz(from_system, crd_system)
else:
cotr = viscid.as_cotr(rotate)
rotate = cotr.get_rotation_wxyz(from_system, crd_system)
if len(rotate) != 4:
raise ValueError("Rotate should be [angle, ux, uy, uz], got {0}"
"".format(rotate))
obj.actor.actor.rotate_wxyz(*rotate)
def plot_blue_marble(r=1.0, figure=None, nphi=128, ntheta=64, map_style=None,
lines=False, res=2, rotate=None, crd_system='gse'):
"""Plot Earth using the Natural Earth dataset maps
Args:
r (float): radius of earth
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
nphi (int): phi resolution of Earth's mesh
ntheta (int): theta resolution of Earth's mesh
map_style (str): Nothing for standard map, or 'faded'
lines (bool): Whether or not to show equator, tropics,
arctic circles, and a couple meridians.
res (int): Resolution in thousands of pixels longitude (must
be one of 1, 2, 4, 8)
rotate (None, sequence, str, datetime64): sequence of length 4
that contains (angle, ux, uy, uz) for the angle and axis of
a rotation, or a UT time as string or datetime64 to rotate
earth to a specific date/time, or a cotr object in
conjunction with crd_system
crd_system (str, other): Used if rotate is datetime-like. Can
be one of ('gse', 'mhd'), or anything that returns from
:py:func:`viscid.as_crd_system`.
Returns:
(VTKDataSource, mayavi.modules.surface.Surface)
"""
# make a plane, then deform it into a sphere
eps = 1e-4
ps = tvtk.PlaneSource(origin=(r, r * np.pi - eps, r * 0.0),
point1=(r, r * np.pi - eps, r * 2 * np.pi),
point2=(r, eps, 0.0),
x_resolution=nphi,
y_resolution=ntheta)
ps.update()
transform = tvtk.SphericalTransform()
tpoly = tvtk.TransformPolyDataFilter(transform=transform,
input_connection=ps.output_port)
tpoly.update()
src = VTKDataSource(data=tpoly.output, name="blue_marble")
surf = mlab.pipeline.surface(src)
# now load a jpg, and use it to texture the sphere
linestr = '_lines' if lines else ''
assert map_style in (None, '', 'faded')
assert res in (1, 2, 4, 8)
map_style = '_{0}'.format(map_style) if map_style else ''
img_name = "images/earth{0}{1}_{2}k.jpg".format(map_style, linestr, res)
fname = os.path.realpath(os.path.dirname(__file__) + '/' + img_name)
img = tvtk.JPEGReader(file_name=fname)
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
surf.actor.enable_texture = True
surf.actor.texture = texture
surf.actor.property.color = (1.0, 1.0, 1.0)
# rotate 180deg b/c i can't rotate the texture to make the prime meridian
surf.actor.actor.rotate_z(180)
_apply_rotation(surf, 'geo', rotate, crd_system=crd_system)
add_source(src, figure=figure)
return src, surf
plot_natural_earth = plot_blue_marble
def plot_earth_3d(figure=None, daycol=(1, 1, 1), nightcol=(0, 0, 0),
radius=1.0, res=24, crd_system="gse", night_only=False,
**kwargs):
"""Plot a black and white sphere (Earth) showing sunward direction
Parameters:
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
daycol (tuple, optional): color of dayside (RGB)
nightcol (tuple, optional): color of nightside (RGB)
res (optional): rosolution of teh sphere
crd_system (str, other): One of ('mhd', 'gse'), or anything
that returns from :py:func:`viscid.as_crd_system`.
Returns:
Tuple (day, night) as vtk sources
"""
if figure is None:
figure = mlab.gcf()
crd_system = viscid.as_crd_system(crd_system)
if crd_system == "mhd":
theta_dusk, theta_dawn = 270, 90
elif crd_system == "gse":
theta_dusk, theta_dawn = 90, 270
else:
# use GSE convention?
theta_dusk, theta_dawn = 90, 270
night = BuiltinSurface(source='sphere', name='night')
night.data_source.set(center=(0, 0, 0), radius=radius,
start_theta=theta_dusk, end_theta=theta_dawn,
theta_resolution=res, phi_resolution=res)
mod = mlab.pipeline.surface(night, color=nightcol, figure=figure, **kwargs)
mod.actor.property.backface_culling = True
if not night_only:
day = BuiltinSurface(source='sphere', name='day')
day.data_source.set(center=(0, 0, 0), radius=radius,
start_theta=theta_dawn, end_theta=theta_dusk,
theta_resolution=res, phi_resolution=res)
mod = mlab.pipeline.surface(day, color=daycol, figure=figure, **kwargs)
mod.actor.property.backface_culling = True
else:
day = None
return day, night
def to_mpl(figure=None, ax=None, size=None, antialiased=True, hide=True,
fit=None, **kwargs):
"""Display a mayavi figure inline in an Jupyter Notebook.
This function takes a screenshot of a figure and blits it to a matplotlib
figure using matplotlib.pyplot.imshow()
Args:
figure: A mayavi figure, if not specified, uses mlab.gcf()
ax: Matplotlib axis of the destination (plt.gca() if None)
size (None, tuple): if given, resize the scene in pixels (x, y)
antialiased (bool): Antialias mayavi plot
hide (bool): if True, try to hide the render window
fit (None, bool): Resize mpl window to fit image exactly. If
None, then fit if figure does not currently exist.
**kwargs: passed to mayavi.mlab.screenshot()
"""
if figure is None:
figure = mlab.gcf()
if size is not None:
resize(size, figure=figure)
pixmap = mlab.screenshot(figure, antialiased=antialiased, **kwargs)
# try to hide the window... Qt backend only
if hide:
hide_window(figure)
if ax is None:
from matplotlib import pyplot as plt
# if there are no figures, and fit is None, then fit
if fit is None:
fit = not bool(plt.get_fignums)
ax = plt.gca()
if fit:
pltfig = ax.figure
dpi = pltfig.get_dpi()
pltfig.set_size_inches([s / dpi for s in figure.scene.get_size()],
forward=True)
pltfig.subplots_adjust(top=1, bottom=0, left=0, right=1,
hspace=0, wspace=0)
ax.imshow(pixmap)
ax.axis('off')
def figure(*args, **kwargs):
offscreen = kwargs.pop('offscreen', False)
hide = kwargs.pop('hide', None)
fig = mlab.figure(*args, **kwargs)
# if size was set, run resize to account for the height of window
# decorations
if 'size' in kwargs:
resize(kwargs['size'], figure=fig)
# hide window by default?
if hide or (hide is None and offscreen):
hide_window(fig)
# send it offscreen?
if offscreen:
make_fig_offscreen(fig, hide=False)
return fig
def make_fig_offscreen(figure, hide=True):
if hide:
hide_window(figure)
figure.scene.off_screen_rendering = True
return figure
def show(stop=False):
"""Calls :meth:`mayavi.mlab.show(stop=stop)`"""
mlab.show(stop=stop)
def clf(figure=None):
"""Clear source data, then clear figure
Args:
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
"""
if not figure:
figure = mlab.gcf()
clear_data(figure)
mlab.clf(figure)
def remove_source(src):
"""Safely remove a specific vtk source
Args:
src (vtk_data_source): vtk data source to remove
"""
src.stop()
try:
try:
src.data.release_data()
except TraitError:
src.data.release_data_flag = 1
src.cell_scalars_name = ''
src.cell_tensors_name = ''
src.cell_vectors_name = ''
src.point_scalars_name = ''
src.point_tensors_name = ''
src.point_vectors_name = ''
except AttributeError:
pass
src.start()
src.stop()
src.remove()
def clear_data(figures=None):
"""Workaround for Mayavi / VTK memory leak
This is needed when Mayavi/VTK keeps a reference to source data
when you would expect it to be freed like on a call to `mlab.clf()`
or when removing sources from the pipeline.
Note:
This must be called when the pipeline still has the source, so
before a call to `mlab.clf()`, etc.
1. Set release_data_flag on all sources' data
2. Remove reference to the data
3. Remove the data source
Args:
figures (None, mayavi.core.scene.Scene, or 'all'): if None,
gets current scene; if Scene object, just that one; if
'all', act on all scenes in the current engine. Can also be
a list of Scene objects
"""
if figures is None:
figures = [mlab.gcf()]
elif figures == "all":
figures = mlab.get_engine().scenes
if not isinstance(figures, (list, tuple)):
figures = [figures]
if all(fig is None for fig in figures):
return
for fig in figures:
# # fig stop / start kills mayavi now, not sure why
# fig.stop()
for child in list(fig.children):
remove_source(child)
# fig.start()
return
def resize(size, figure=None):
"""Summary
Args:
size (tuple): width, height in pixels
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
Returns:
None
"""
if figure is None:
figure = mlab.gcf()
try:
# scene.set_size doesn't seem to work when rendering on screen, so
# go into the backend and do it by hand
if mlab.options.offscreen:
figure.scene.set_size(size)
elif figure.scene.off_screen_rendering:
viscid.logger.warn("viscid.plot.vlab.resize doesn't work for "
"figures that are off-screened this way. Try "
"creating the figure with viscid.plot.vlab."
"figure(size=(w, h), offscreen=True)")
else:
toolkit = mayavi.ETSConfig.toolkit
if toolkit == 'qt4':
sc = figure.scene
window_height = sc.control.parent().size().height()
render_height = sc.render_window.size[1]
h = window_height - render_height
sc.control.parent().resize(size[0], size[1] + h)
elif toolkit == 'wx':
w, h = size[0], size[1]
figure.scene.control.Parent.Parent.SetClientSizeWH(w, h)
else:
viscid.logger.warn("Unknown mayavi backend {0} (not qt4 or "
"wx); not resizing.".format(toolkit))
except Exception as e: # pylint: disable=broad-except
viscid.logger.warn("Resize didn't work:: {0}".format(repr(e)))
def hide_window(figure, debug=False):
"""Try to hide the window; only does something on Qt backend"""
try:
# fig.scene.control.parent().hide()
figure.scene.control.parent().showMinimized()
except Exception as e: # pylint: disable=broad-except,unused-variable
if debug:
print("Window hide didn't work::", repr(e))
def savefig(*args, **kwargs):
"""Wrap mayavi.mlab.savefig with offscreen hack"""
fig = mlab.gcf()
prev_offscreen_state = fig.scene.off_screen_rendering
if sys.platform != "darwin":
fig.scene.off_screen_rendering = True
mlab.savefig(*args, **kwargs)
if fig.scene.off_screen_rendering != prev_offscreen_state:
fig.scene.off_screen_rendering = prev_offscreen_state
def interact(stack_depth=0, **kwargs):
viscid.vutil.interact(stack_depth=stack_depth + 1, mvi_ns=True, **kwargs)
plot3d_lines = plot_lines
plot_lines3d = plot_lines
##
## EOF
##
```
#### File: Viscid/viscid/_rc.py
```python
from __future__ import print_function
import os
import traceback
import viscid
from viscid import vjson
from viscid.compat.vimportlib import import_module
class RCPathError(Exception):
message = ""
def __init__(self, message=""):
self.message = message
super(RCPathError, self).__init__(message)
def _get_obj(rt, path):
if len(path) == 0:
return rt
try:
if not hasattr(rt, path[0]):
try:
# if path[0] is not an attribute of rt, then try
# importing it
module_name = "{0}.{1}".format(rt.__name__, path[0])
import_module(module_name)
except ImportError:
# nope, the attribute really doesn't exist
raise AttributeError("root {0} has no attribute "
"{1}".format(rt, path[0]))
return _get_obj(getattr(rt, path[0]), path[1:])
except AttributeError:
# can't re-raise AttributeError since this is recursive
# and we're catching AttributeError, so the info about
# what part of the path DNE would be lost
raise RCPathError("'{0}' has no attribute '{1}'"
"".format(rt.__name__, path[0]))
def set_attribute(path, value):
# try:
# value = _parse_rc_value(value)
# except RCValueError as e:
# viscid.logger.warn("Skipping bad ~/.viscidrc value:: {0}\n".format(value)
# " {0}".format(str(e)))
# return None
p = path.split('.')
obj = _get_obj(viscid, p[:-1])
if not hasattr(obj, p[-1]):
viscid.logger.warn("from rc file; '{0}' has no attribute '{1}'.\n"
"If this isn't a typeo then the functionality may "
"have moved.".format(".".join(p[:-1]), p[-1]))
setattr(obj, p[-1], value)
return obj
def load_rc_file(fname):
touched_objects = []
try:
with open(os.path.expanduser(os.path.expandvars(fname)), 'r') as f:
try:
import yaml
rc_obj = yaml.load(f)
except ImportError:
try:
rc_obj = vjson.load(f)
except ValueError as e:
tb = traceback.format_exc()
tb = '\n'.join(' ' * 4 + line_ for line_ in tb.split('\n'))
m = ("{0}\n{1}\n"
"JSON parsing of {2} failed. If the file is using "
"Yaml syntax, please install PyYaml."
"".format(tb, str(e), f.name))
raise ValueError(m)
for path, val in rc_obj.items():
try:
touched_objects.append(set_attribute(path, val))
except RCPathError as e:
viscid.logger.warn("from rc file; {0}\n"
"If this isn't a typeo then the "
"functionality may have moved."
"".format(str(e)))
except IOError:
pass
for obj in set(touched_objects):
try:
obj.post_rc_actions()
except AttributeError:
pass
##
## EOF
##
```
#### File: viscid/readers/athena_hst.py
```python
from __future__ import print_function
import re
import numpy as np
from viscid.readers import vfile
from viscid import field
from viscid import coordinate
class AthenaHstFile(vfile.VFile): # pylint: disable=abstract-method
"""An Athena time history file"""
_detector = r"^\s*(.*)\.(hst)\s*$"
def __init__(self, fname, **kwargs):
"""
Keyword Arguments:
float_type_name (str): should be 'f4' or 'f8' if you know
the data type of the file's data.
"""
super(AthenaHstFile, self).__init__(fname, **kwargs)
def _parse(self):
# get field names from header
with open(self.fname, 'r') as f:
line = f.readline()
line = f.readline().lstrip("#").strip()
fld_names = re.split(r"\[[0-9]+\]=", line)[1:]
fld_names = [fn.strip() for fn in fld_names]
# crds here are really times
dat = np.loadtxt(self.fname, unpack=True)
t = dat[0]
crds = coordinate.wrap_crds("nonuniform_cartesian", [('t', t)])
g = self._make_grid(self, name="AthenaHstGrid")
g.set_crds(crds)
g.time = 0
for i in range(1, len(fld_names)):
fld = self._make_field(g, "Scalar", fld_names[i], crds, dat[i],
center="Node")
g.add_field(fld)
self.add(g)
self.activate(0)
##
## EOF
##
```
#### File: viscid/readers/athena_xdmf.py
```python
import os
import re
from viscid.readers import xdmf
from viscid.readers import athena
class AthenaFileXDMF(athena.AthenaFile, xdmf.FileXDMF): # pylint: disable=abstract-method
"""File type for Athena style convenience stuff
Makes AthenaGrid the default grid and handles grouping multiple
files.
"""
_detector = r"^\s*(.*)\.([0-9]+)\.(ath.xdmf)\s*$"
_collection = None
@classmethod
def group_fnames(cls, fnames):
return athena.group_athena_files_common(cls._detector, fnames)
@classmethod
def collective_name_from_group(cls, fnames):
return athena.athena_collective_name_from_group(cls._detector,
fnames)
def load(self, fname):
if not isinstance(fname, list):
fname = [fname]
if len(fname) > 1:
self._collection = fname
else:
self._collection = None
super(AthenaFileXDMF, self).load(fname[0])
basename = os.path.basename(self.fname)
self.set_info('run', re.match(self._detector, basename).group(1))
def _parse(self):
if self._collection is not None:
# assume we have a collection of temporal files, because why not
data_temporal = self._make_dataset(self, dset_type="temporal",
name="AthenaXDMFTemporalCollection")
for fname in self._collection:
grids = self._parse_file(fname, data_temporal)
for _grid in grids:
data_temporal.add(_grid)
data_temporal.activate(0)
self.add(data_temporal)
self.activate(0)
else:
super(AthenaFileXDMF, self)._parse()
##
## EOF
##
```
#### File: viscid/readers/ggcm_jrrle.py
```python
from __future__ import print_function
import os
import re
from datetime import datetime, timedelta
import numpy as np
from viscid import grid
from viscid.readers import vfile
from viscid.readers import openggcm
from viscid.readers._fortfile_wrapper import FortranFile
from viscid.compat import OrderedDict
try:
from viscid.readers import _jrrle
except ImportError as e:
from viscid.verror import UnimportedModule
msg = "Fortran readers not available since they were not built correctly"
_jrrle = UnimportedModule(e, msg=msg)
read_ascii = False
class JrrleFileWrapper(FortranFile):
"""Interface for actually opening / reading a jrrle file"""
fields_seen = None
seen_all_fields = None
def __init__(self, filename):
self._read_func = [_jrrle.read_jrrle1d, _jrrle.read_jrrle2d,
_jrrle.read_jrrle3d]
self.fields_seen = OrderedDict()
self.seen_all_fields = False
super(JrrleFileWrapper, self).__init__(filename)
def read_field(self, fld_name, ndim):
"""Read a field given a seekable location
Parameters:
loc(int): position in file we can seek to
ndim(int): dimensionality of field
Returns:
tuple (field name, dict of meta data, array)
"""
meta = self.inquire(fld_name)
arr = np.empty(meta['dims'], dtype='float32', order='F')
self._read_func[ndim - 1](self.unit, arr, fld_name, read_ascii)
return meta, arr
def inquire_all_fields(self, reinquire=False):
if reinquire:
self.seen_all_fields = False
self.fields_seen = OrderedDict()
if self.seen_all_fields:
return
self.rewind()
while not self.seen_all_fields:
self.inquire_next()
# last_seen, meta = self.inquire_next()
# if meta is not None:
# print(last_seen, "lives at", meta["file_position"])
self.advance_one_line()
def inquire(self, fld_name):
try:
meta = self.fields_seen[fld_name]
self.seek(meta['file_position'])
return meta
except KeyError:
try:
last_added = next(reversed(self.fields_seen))
self.seek(self.fields_seen[last_added]['file_position'])
self.advance_one_line()
except StopIteration:
pass # we haven't read any fields yet, that's ok
while not self.seen_all_fields:
found_fld_name, meta = self.inquire_next()
if found_fld_name == fld_name:
return meta
self.advance_one_line()
raise KeyError("file '{0}' has no field '{1}'"
"".format(self.filename, fld_name))
def inquire_next(self):
"""Collect the meta-data from the next field in the file
Returns:
tuple (field name, dict of meta data) both
of which will be None if there are no more Fields
Note:
After this operation is done, the file-pointer will be reset
to the position it was before the inquiry.
"""
if not self.isopen:
raise RuntimeError("file is not open")
varname = np.array(" "*80, dtype="S80")
tstring = np.array(" "*80, dtype="S80")
found_field, ndim, nx, ny, nz, it = _jrrle.inquire_next(self._unit,
varname,
tstring)
varname = str(np.char.decode(varname)).strip()
tstring = str(np.char.decode(tstring)).strip()
if not found_field:
self.seen_all_fields = True
return None, None
if varname in self.fields_seen:
meta = self.fields_seen[varname]
else:
dims = tuple(x for x in (nx, ny, nz) if x > 0)
meta = dict(timestr=tstring,
inttime=it,
ndim=ndim,
dims=dims,
file_position=self.tell())
self.fields_seen[varname] = meta
return varname, meta
class JrrleDataWrapper(vfile.DataWrapper):
"""Interface for lazily pointing to a jrrle field"""
file_wrapper = None
filename = None
fld_name = None
expected_shape = None
def __init__(self, file_wrapper, fld_name, expected_shape):
"""Lazy wrapper for a field in a jrrle file
Parameters:
expected_shape (tuple): shape of data in the file (xyz)
"""
super(JrrleDataWrapper, self).__init__()
self.file_wrapper = file_wrapper
self.filename = file_wrapper.filename
self.fld_name = fld_name
# translate to zyx
self.expected_shape = expected_shape
@property
def shape(self):
"""
Returns:
zyx shape since that's the shape __array__ returns
"""
return self.expected_shape[::-1]
@property
def dtype(self):
return np.dtype("float32")
def __array__(self, *args, **kwargs):
with self.file_wrapper as f:
ndim = len(self.expected_shape)
# fld_name, meta, arr = f.read_field_at(self.loc, ndim)
meta, arr = f.read_field(self.fld_name, ndim)
arr = np.array(arr.flatten(order='F').reshape(meta['dims'][::-1]),
order='C')
# meta's dims are xyz (from file), but ex
if meta['dims'] != self.expected_shape:
raise RuntimeError("Field '{0}' from file '{1}' has shape {2} "
"instead of {3}".format(self.fld_name,
self.filename,
meta['dims'],
self.expected_shape))
return arr.astype(self.dtype)
def read_direct(self, *args, **kwargs):
return self.__array__()
def len(self):
return self.shape[0]
def __getitem__(self, item):
return self.__array__().__getitem__(item)
class GGCMFileJrrleMHD(openggcm.GGCMFileFortran): # pylint: disable=abstract-method
"""Jimmy's run length encoding files"""
_detector = r"^\s*(.*)\.(p[xyz]_[0-9]+|3df)" \
r"\.([0-9]{6})\s*$"
_fwrapper_type = JrrleFileWrapper
_data_item_templates = None
_def_fld_center = "Cell"
def __init__(self, filename, **kwargs):
super(GGCMFileJrrleMHD, self).__init__(filename, **kwargs)
def _shape_discovery_hack(self, filename):
with self.get_file_wrapper(filename) as f:
_, meta = f.inquire_next()
return meta['dims']
def _parse_file(self, filename, parent_node):
# we do minimal file parsing here for performance. we just
# make data wrappers from the templates we got from the first
# file in the group, and package them up into grids
# find the time from the first field's meta data
int_time = int(re.match(self._detector, filename).group(3))
time = float(int_time)
_grid = self._make_grid(parent_node, name="<JrrleGrid>",
**self._grid_opts)
self.time = time
_grid.time = time
_grid.set_crds(self._crds)
templates = self._fld_templates
if templates is None:
templates = self._make_template(filename)
# make a DataWrapper and a Field for each template that we
# have from the first file that we parsed, then add it to
# the _grid
if self._iono:
data_wrapper = JrrleDataWrapper
else:
data_wrapper = JrrleDataWrapper
for item in templates:
data = data_wrapper(self.get_file_wrapper(filename),
item['fld_name'], item['shape'])
fld = self._make_field(_grid, "Scalar", item['fld_name'],
self._crds, data,
center=self._def_fld_center, time=time,
zyx_native=True)
_grid.add_field(fld)
return _grid
def _make_template(self, filename):
"""read meta data for all fields in a file to get
a list of field names and shapes, all the required info
to make a JrrleDataWrapper
"""
with self.get_file_wrapper(filename) as f:
f.inquire_all_fields()
template = []
meta = None
for fld_name, meta in f.fields_seen.items():
d = dict(fld_name=fld_name,
shape=meta['dims'])
template.append(d)
if meta is not None:
if self.find_info('basetime', default=None) is None:
basetime, _ = self.parse_timestring(meta['timestr'])
if self.parents:
self.parents[0].set_info("basetime", basetime)
else:
self.set_info("basetime", basetime)
return template
@classmethod
def collective_name_from_group(cls, fnames):
fname0 = fnames[0]
basename = os.path.basename(fname0)
run = re.match(cls._detector, basename).group(1)
fldtype = re.match(cls._detector, basename).group(2)
new_basename = "{0}.{1}".format(run, fldtype)
return os.path.join(os.path.dirname(fname0), new_basename)
class GGCMFileJrrleIono(GGCMFileJrrleMHD): # pylint: disable=abstract-method
"""Jimmy's run length encoding files"""
_detector = r"^\s*(.*)\.(iof)\.([0-9]{6})\s*$"
_iono = True
_grid_type = grid.Grid
_def_fld_center = "Node"
# class JrrleIonoDataWrapper(JrrleDataWrapper):
# @property
# def shape(self):
# ret = tuple([n - 1 for n in reversed(self.expected_shape)])
# return ret
# def __array__(self, *args, **kwargs):
# arr = super(JrrleIonoDataWrapper, self).__array__(*args, **kwargs)
# ndim = len(self.expected_shape)
# return arr[[slice(None, -1)]*ndim]
##
## EOF
##
```
#### File: viscid/readers/gkeyll.py
```python
from __future__ import print_function, division
import os
import re
from operator import itemgetter
import numpy as np
try:
import h5py
HAS_H5PY = True
except ImportError:
HAS_H5PY = False
from viscid.readers.vfile_bucket import ContainerFile
from viscid.readers.hdf5 import FileHDF5, H5pyDataWrapper
from viscid import grid
from viscid import field
from viscid.coordinate import wrap_crds
base_hydro_names = ['rho', 'rhoux', 'rhouy', 'rhouz', 'e']
base_hydro_pretty_names = [r'$\rho$', r'$\rhou_x$', r'$\rhou_y$', r'$\rhou_z$',
r'$e$']
base_5m_names = ['rho_e', 'rhoux_e', 'rhouy_e', 'rhouz_e', 'e_e',
'rho_i', 'rhoux_i', 'rhouy_i', 'rhouz_i', 'e_i',
'Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz']
base_5m_pretty_names = [
r'$\rho_e$', r'$\rho u_{x,e}$', r'$\rho u_{y,e}$', r'$\rho u_{z,e}$', r'$e_e$',
r'$\rho_i$', r'$\rho u_{x,i}$', r'$\rho u_{y,i}$', r'$\rho u_{z,i}$', r'$e_i$',
r'$E_x$', r'$E_y$', r'$E_z$', r'$B_x$', r'$B_y$', r'$B_z$']
base_10m_names = ['rho_e', 'rhoux_e', 'rhouy_e', 'rhouz_e',
'pxx_e', 'pxy_e', 'pxz_e', 'pyy_e', 'pyz_e', 'pzz_e',
'rho_i', 'rhoux_i', 'rhouy_i', 'rhouz_i',
'pxx_i', 'pxy_i', 'pxz_i', 'pyy_i', 'pyz_i', 'pzz_i',
'Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz']
base_10m_pretty_names = [
r'$\rho_e$', r'$\rho u_{x,e}$', r'$\rho u_{y,e}$', r'$\rho u_{z,e}$',
r'$\mathcal{P}_{xx,e}$', r'$\mathcal{P}_{xy,e}$', r'$\mathcal{P}_{xz,e}$',
r'$\mathcal{P}_{yy,e}$', r'$\mathcal{P}_{yz,e}$', r'$\mathcal{P}_{zz,e}$',
r'$\rho_i$', r'$\rho u_{x,i}$', r'$\rho u_{y,i}$', r'$\rho u_{z,i}$',
r'$\mathcal{P}_{xx,i}$', r'$\mathcal{P}_{xy,i}$', r'$\mathcal{P}_{xz,i}$',
r'$\mathcal{P}_{yy,i}$', r'$\mathcal{P}_{yz,i}$', r'$\mathcal{P}_{zz,i}$',
r'$E_x$', r'$E_y$', r'$E_z$', r'$B_x$', r'$B_y$', r'$B_z$']
_type_info = {len(base_hydro_names): {'field_type': 'hydro',
'names': base_hydro_names,
'pretty_names': base_hydro_pretty_names},
len(base_5m_names): {'field_type': 'five-moment',
'names': base_5m_names,
'pretty_names': base_5m_pretty_names},
len(base_10m_names): {'field_type': 'ten-moment',
'names': base_10m_names,
'pretty_names': base_10m_pretty_names}}
class GkeyllGrid(grid.Grid):
""""""
def _assemble_vector(self, base_name, comp_names="xyz", suffix="",
forget_source=False, **kwargs):
opts = dict(forget_source=forget_source, **kwargs)
# caching behavior depends on self.longterm_field_caches
comps = [self[base_name + c + suffix] for c in comp_names]
return field.scalar_fields_to_vector(comps, name=base_name, **opts)
def _get_ux_e(self):
ux_e = self['rhoux_e'] / self['rho_e']
ux_e.name = 'ux_e'
ux_e.pretty_name = r'$u_{x,e}$'
return ux_e
def _get_uy_e(self):
uy_e = self['rhouy_e'] / self['rho_e']
uy_e.name = 'uy_e'
uy_e.pretty_name = r'$u_{y,e}$'
return uy_e
def _get_uz_e(self):
uz_e = self['rhouz_e'] / self['rho_e']
uz_e.name = 'uz_e'
uz_e.pretty_name = r'$u_{z,e}$'
return uz_e
def _get_Pxx_e(self):
Pxx_e = self['pxx_e'] - self['rhoux_e']*self['rhoux_e'] / self['rho_e']
Pxx_e.name = 'Pxx_e'
Pxx_e.pretty_name = r'$P_{xx,e}$'
return Pxx_e
def _get_Pyy_e(self):
Pyy_e = self['pyy_e'] - self['rhoux_e']*self['rhoux_e'] / self['rho_e']
Pyy_e.name = 'Pyy_e'
Pyy_e.pretty_name = r'$P_{yy,e}$'
return Pyy_e
def _get_Pzz_e(self):
Pzz_e = self['pzz_e'] - self['rhoux_e']*self['rhoux_e'] / self['rho_e']
Pzz_e.name = 'Pzz_e'
Pzz_e.pretty_name = r'$P_{zz,e}$'
return Pzz_e
def _get_Pxy_e(self):
Pxy_e = self['pxy_e'] - self['rhoux_e']*self['rhouy_e'] / self['rho_e']
Pxy_e.name = 'Pxy_e'
Pxy_e.pretty_name = r'$P_{xy,e}$'
return Pxy_e
def _get_Pxz_e(self):
Pxz_e = self['pxz_e'] - self['rhoux_e']*self['rhouz_e'] / self['rho_e']
Pxz_e.name = 'Pxz_e'
Pxz_e.pretty_name = r'$P_{xz,e}$'
return Pxz_e
def _get_Pyz_e(self):
Pyz_e = self['pyz_e'] - self['rhouy_e']*self['rhouz_e'] / self['rho_e']
Pyz_e.name = 'Pyz_e'
Pyz_e.pretty_name = r'$P_{yz,e}$'
return Pyz_e
def _get_ux_i(self):
ux_i = self['rhoux_i'] / self['rho_i']
ux_i.name = 'ux_i'
ux_i.pretty_name = r'$u_{x,i}$'
return ux_i
def _get_uy_i(self):
uy_i = self['rhouy_i'] / self['rho_i']
uy_i.name = 'uy_i'
uy_i.pretty_name = r'$u_{y,i}$'
return uy_i
def _get_uz_i(self):
uz_i = self['rhouz_i'] / self['rho_i']
uz_i.name = 'uz_i'
uz_i.pretty_name = r'$u_{z,i}$'
return uz_i
def _get_Pxx_i(self):
Pxx_i = self['pxx_i'] - self['rhoux_i']*self['rhoux_i'] / self['rho_i']
Pxx_i.name = 'Pxx_i'
Pxx_i.pretty_name = r'$P_{xx,i}$'
return Pxx_i
def _get_Pyy_i(self):
Pyy_i = self['pyy_i'] - self['rhoux_i']*self['rhoux_i'] / self['rho_i']
Pyy_i.name = 'Pyy_i'
Pyy_i.pretty_name = r'$P_{yy,i}$'
return Pyy_i
def _get_Pzz_i(self):
Pzz_i = self['pzz_i'] - self['rhoux_i']*self['rhoux_i'] / self['rho_i']
Pzz_i.name = 'Pzz_i'
Pzz_i.pretty_name = r'$P_{zz,i}$'
return Pzz_i
def _get_Pxy_i(self):
Pxy_i = self['pxy_i'] - self['rhoux_i']*self['rhouy_i'] / self['rho_i']
Pxy_i.name = 'Pxy_i'
Pxy_i.pretty_name = r'$P_{xy,i}$'
return Pxy_i
def _get_Pxz_i(self):
Pxz_i = self['pxz_i'] - self['rhoux_i']*self['rhouz_i'] / self['rho_i']
Pxz_i.name = 'Pxz_i'
Pxz_i.pretty_name = r'$P_{xz,i}$'
return Pxz_i
def _get_Pyz_i(self):
Pyz_i = self['pyz_i'] - self['rhouy_i']*self['rhouz_i'] / self['rho_i']
Pyz_i.name = 'Pyz_i'
Pyz_i.pretty_name = r'$P_{yz,i}$'
return Pyz_i
def _get_u_e(self):
# get from [ux_e, uy_e, uz_e]
return self._assemble_vector("u", suffix='_e',
_force_layout=self.force_vector_layout,
pretty_name='u_e')
def _get_u_i(self):
# get from [ux_i, uy_i, uz_i]
return self._assemble_vector("u", suffix='_i',
_force_layout=self.force_vector_layout,
pretty_name='u_i')
def _get_b(self):
# get from [Bx, By, Bz]
return self._assemble_vector("B", _force_layout=self.force_vector_layout,
pretty_name="b")
class GkeyllFile(FileHDF5, ContainerFile): # pylint: disable=abstract-method
""""""
_detector = r"^\s*(.*)_(q)_([0-9]+).(h5)\s*$"
_grid_type = GkeyllGrid
SAVE_ONLY = False
_fwrapper = None
_crds = None
_fld_templates = None
def __init__(self, fname, crds=None, fld_templates=None, **kwargs):
assert HAS_H5PY
self._crds = crds
self._fld_templates = fld_templates
super(GkeyllFile, self).__init__(fname, **kwargs)
@classmethod
def group_fnames(cls, fnames):
"""Group File names
The default implementation just returns fnames, but some file
types might do something fancy here
Parameters:
fnames (list): names that can be logically grouped, as in
a bunch of file names that are different time steps
of a given run
Returns:
A list of things that can be given to the constructor of
this class
"""
infolst = []
for name in fnames:
m = re.match(cls._detector, name)
grps = m.groups()
d = dict(runname=grps[0], ftype=grps[1],
fname=m.string)
try:
d["frame"] = int(grps[2])
except (TypeError, ValueError):
# grps[2] is none for "RUN.3d.xdmf" files
d["frame"] = -1
infolst.append(d)
# careful with sorting, only consecutive files will be grouped
infolst.sort(key=itemgetter("frame"))
infolst.sort(key=itemgetter("ftype"))
infolst.sort(key=itemgetter("runname"))
info_groups = []
info_group = [infolst[0]]
for info in infolst[1:]:
last = info_group[-1]
if info['runname'] == last['runname'] and \
info['ftype'] == last['ftype']:
info_group.append(info)
else:
info_groups.append(info_group)
info_group = [info]
info_groups.append(info_group)
# turn info_groups into groups of just file names
groups = []
for info_group in info_groups:
groups.append([info['fname'] for info in info_group])
return groups
@classmethod
def collective_name_from_group(cls, fnames):
fname0 = fnames[0]
basename = os.path.basename(fname0)
run = re.match(cls._detector, basename).group(1)
fldtype = re.match(cls._detector, basename).group(2)
new_basename = "{0}.{1}.STAR.h5".format(run, fldtype)
return os.path.join(os.path.dirname(fname0), new_basename)
def get_file_wrapper(self, filename):
if self._fwrapper is None:
# self._fwrapper = GGCMFortbinFileWrapper(filename)
return h5py.File(filename, 'r')
else:
raise NotImplementedError()
# assert (self._fwrapper.filename == filename or
# glob2(self._fwrapper.filename) == glob2(filename))
# return self._fwrapper
def set_file_wrapper(self, wrapper):
raise NotImplementedError("This must be done at file init")
def load(self, fname):
if isinstance(fname, list):
self._collection = fname
else:
self._collection = [fname]
fname0 = self._collection[0]
fname1 = self.collective_name(fname)
basename = os.path.basename(fname0)
self.set_info('run', re.match(self._detector, basename).group(1))
self.set_info('fieldtype', re.match(self._detector, basename).group(2))
super(GkeyllFile, self).load(fname1)
def _parse(self):
if len(self._collection) == 1:
# load a single file
if self._crds is None:
self._crds = self.make_crds(self.fname)
_grid = self._parse_file(self.fname, self)
self.add(_grid)
self.activate(0)
else:
# load each file, and add it to the bucket
if self._crds is None:
self._crds = self.make_crds(self._collection[0])
data_temporal = self._make_dataset(self, dset_type="temporal",
name="GkeyllTemporalCollection")
self._fld_templates = self._make_template(self._collection[0])
for fname in self._collection:
f = self._load_child_file(fname, index_handle=False,
file_type=type(self),
crds=self._crds,
fld_templates=self._fld_templates)
data_temporal.add(f)
data_temporal.activate(0)
self.add(data_temporal)
self.activate(0)
def _parse_file(self, filename, parent_node):
# we do minimal file parsing here for performance. we just
# make data wrappers from the templates we got from the first
# file in the group, and package them up into grids
# frame = int(re.match(self._detector, filename).group(3))
_grid = self._make_grid(parent_node, name="<GkeyllGrid>",
**self._grid_opts)
# FIXME: To get the time at load, we have to open all hdf5 files
# which defeats the purpose of making templates etc. in attempt to
# be lazy. Maybe there's a way to use frame above?
with h5py.File(filename, 'r') as f:
step = f['timeData'].attrs['vsStep']
time = f['timeData'].attrs['vsTime']
self.set_info("step", step)
self.time = time
_grid.time = time
_grid.set_crds(self._crds)
if self._fld_templates is None:
self._fld_templates = self._make_template(filename)
for i, meta in enumerate(self._fld_templates):
# FIXME: the transpose goes xyz -> zyx
h5_data = H5pyDataWrapper(self.fname, "StructGridField",
comp_dim=-1, comp_idx=i)
fld = field.wrap_field(h5_data, self._crds, meta['fld_name'],
center="cell", pretty_name=meta['pretty_name'])
_grid.add_field(fld)
return _grid
def _make_template(self, filename):
""""""
with self.get_file_wrapper(filename) as f:
shape = f["StructGridField"].shape
sshape = shape[:-1]
nr_fields = shape[-1]
try:
type_info = _type_info[nr_fields]
except KeyError:
try:
# if the two scalar corrections potentials are stored
type_info = dict(_type_info[nr_fields - 2])
type_info['names'].append('EXTRA1')
type_info['pretty_names'].append('EXTRA1')
type_info['names'].append('EXTRA2')
type_info['pretty_names'].append('EXTRA2')
except KeyError:
raise RuntimeError("Could not desipher type (hydro, 5m, 10m)")
template = []
# TODO: use nr_fields to figure out the names of the fields?
for i in range(nr_fields):
d = dict(fldnum=i, shape=sshape,
fld_name=type_info['names'][i],
pretty_name=type_info['pretty_names'][i])
template.append(d)
self.set_info("field_type", type_info['field_type'])
return template
@staticmethod
def _get_single_crd(h5file, idx, nr_crds):
gridType = h5file['StructGrid'].attrs['vsKind'].decode()
if gridType in ['uniform']:
if idx >= len(h5file['StructGrid'].attrs['vsNumCells']):
raise IndexError()
lower = h5file['StructGrid'].attrs['vsLowerBounds'][idx]
upper = h5file['StructGrid'].attrs['vsUpperBounds'][idx]
num = h5file['StructGrid'].attrs['vsNumCells'][idx]
return [lower, upper, num + 1]
elif gridType in ['rectilinear']:
crd_arr = h5file['StructGrid/axis%d'%idx][:]
return crd_arr
elif gridType in ['structured']:
if idx == 0:
crd_arr = h5file['StructGrid'][:, 0, 0, 0]
elif idx == 1:
crd_arr = h5file['StructGrid'][0, :, 0, 1]
elif idx == 2:
crd_arr = h5file['StructGrid'][0, 0, :, 2]
return crd_arr
else:
raise RuntimeError("Gkeyll StructGrid.vsKind not understood: {0}"
"".format(repr(gridType)))
def make_crds(self, fname):
with h5py.File(fname, 'r') as f:
clist = []
# FIXME: xyz
crds = "xyz"
nr_crds = len(f['StructGridField'].shape) - 1
for i in range(nr_crds):
try:
clist.append((crds[i], self._get_single_crd(f, i, nr_crds)))
except IndexError:
pass
if f['StructGrid'].attrs['vsKind'].decode() in ['uniform']:
# FIXME: this goes xyz -> zyx
crds = wrap_crds("uniform_cartesian", clist)
else:
crds = wrap_crds("nonuniform_cartesian", clist)
return crds
##
## EOF
##
```
#### File: viscid/readers/__init__.py
```python
import viscid
# import vfile
from viscid.readers.vfile import VFile
from viscid.readers import vfile_bucket
# these imports are necessary to register file types
from viscid.readers import xdmf
from viscid.readers import hdf5
from viscid.readers import numpy_binary
from viscid.readers import ascii
# these imports register convenience readers for data from
# specific sim packages
from viscid.readers import ggcm_xdmf
from viscid.readers import ggcm_fortbin
from viscid.readers import psc
from viscid.readers import gkeyll
from viscid.readers import athena_bin
from viscid.readers import athena_tab
from viscid.readers import athena_hst
from viscid.readers import athena_xdmf
from viscid.readers import ggcm_jrrle
__all__ = ['load_file', 'load_files', 'unload_file', 'unload_all_files',
'reload_file', 'get_file', 'save_grid', 'save_field', 'save_fields']
__filebucket__ = vfile_bucket.VFileBucket()
def load(fnames):
"""Generic load
Dispatches to :meth:`load_file` or :meth:`load_files`. This
function is deprecated.
Parameters:
fnames: One or many file names
Returns:
one or many VFiles
"""
# this is not a deprecated warning since by default those aren't shown
# and i want this to be a loud and clear do not use :)
viscid.logger.warn("readers.load is deprecated in favor of load_file or "
"load_files")
files = load_files(fnames)
if isinstance(fnames, (list, tuple)):
return files
else:
return files[0]
def load_file(fname, force_reload=False, **kwargs):
"""Load a file
Parameters:
fnames (list): single file name, or list of files that are part
of the same time series. Glob patterns and slices are
accepted, see :doc:`/tips_and_tricks` for more info.
fname (str): a file name, relative to CWD
force_reload (bool): Force reload if file is already in memory
**kwargs: passed to the VFile constructor
See Also:
* :doc:`/tips_and_tricks`
Returns:
A VFile instance
"""
return __filebucket__.load_file(fname, force_reload=force_reload, **kwargs)
def load_files(fnames, force_reload=False, **kwargs):
"""Load a list of files
Parameters:
fnames (list): list of file names. Glob patterns and slices are
accepted, see :doc:`/tips_and_tricks` for more info.
force_reload (bool): Force reload if file is already in memory
**kwargs: passed to the VFile constructor
See Also:
* :doc:`/tips_and_tricks`
Returns:
A list of VFile instances. The length may not be the same
as the length of fnames, and the order may not be the same
in order to accommodate globs and file grouping.
"""
return __filebucket__.load_files(fnames, force_reload=force_reload,
**kwargs)
def unload_file(handle):
"""call unload on the handle in the bucket"""
__filebucket__[handle].unload()
def reload_file(handle):
"""call reload on the handle in the bucket"""
__filebucket__[handle].reload()
def get_file(handle):
""" return a file that's already been loaded by either
number (as in nth file loaded), of file name
"""
return __filebucket__[handle]
def save_grid(fname, grd, **kwargs):
""" save a grid, filetype is inferred from fname
"""
ftype = VFile.detect_type(fname)
ftype.save_grid(fname, grd, **kwargs)
def save_field(fname, fld, **kwargs):
""" save a field, filetype is inferred from fname"""
ftype = VFile.detect_type(fname)
ftype.save_field(fname, fld, **kwargs)
def save_fields(fname, flds, **kwargs):
""" save a list of fields, filetype is inferred from fname
"""
ftype = VFile.detect_type(fname, mode='w')
ftype.save_fields(fname, flds, **kwargs)
def unload_all_files():
"""Hammer-of-Thor the cache"""
__filebucket__.remove_all_items()
```
#### File: viscid/readers/numpy_binary.py
```python
from __future__ import print_function
import os
import numpy as np
from viscid import logger
from viscid.readers import vfile
from viscid import coordinate
class NPZDataWrapper(vfile.DataWrapper):
""" """
fname = None
loc = None
_shape = None
_dtype = None
def __init__(self, fname, loc):
super(NPZDataWrapper, self).__init__()
self.fname = fname
self.loc = loc
def _read_info(self):
# this takes super long when reading 3 hrs worth of ggcm data
# over sshfs
# import pdb; pdb.set_trace()
try:
with np.load(self.fname) as f:
dset = f[self.loc]
self._shape = dset.shape
self._dtype = dset.dtype
except IOError:
logger.error("Problem opening npz file, '%s'", self.fname)
raise
@property
def shape(self):
""" only ask for this if you really need it; can be a speed problem
for large temporal datasets over sshfs """
if self._shape is None:
self._read_info()
return self._shape
@property
def dtype(self):
""" only ask for this if you really need it; can be a speed problem
for large temporal datasets over sshfs """
if self._dtype is None:
self._read_info()
return self._dtype
def wrap_func(self, func_name, *args, **kwargs):
with np.load(self.fname) as f:
return getattr(f[self.loc], func_name)(*args, **kwargs)
def __array__(self, *args, **kwargs):
return self.wrap_func("__array__", *args, **kwargs)
def read_direct(self, *args, **kwargs):
raise NotImplementedError()
def len(self):
return self.wrap_func("len")
def __getitem__(self, item):
return self.wrap_func("__getitem__", item)
class FileNumpyNPZ(vfile.VFile):
""" open an ascii file with viscid format, or if not specified, assume
it's in gnuplot format, gnuplot format not yet specified """
_detector = r".*\.(npz)\s*$"
_KEY_CRDS = "crd_names"
_KEY_FLDS = {"node": "field_names_nc",
"cell": "field_names_cc",
"face": "field_names_fc",
"edge": "field_names_ec"}
def __init__(self, fname, **kwargs):
super(FileNumpyNPZ, self).__init__(fname, **kwargs)
def _wrap_lazy_field(self, parent_node, file_name, fld_name, crds, center):
lazy_arr = NPZDataWrapper(file_name, fld_name)
if len(lazy_arr.shape) == crds.nr_dims:
fldtype = "Scalar"
elif len(lazy_arr.shape) == crds.nr_dims + 1:
fldtype = "Vector"
else:
raise IOError("can't infer field type")
return self._make_field(parent_node, fldtype, fld_name, crds, lazy_arr,
center=center)
def _parse(self):
g = self._make_grid(self, **self._grid_opts)
with np.load(self.fname) as f:
fld_names = f.keys()
crd_names = []
# try to get crds names from an array of strings called _KEY_CRDS
# else, assume it's x, y, z and see if that works
try:
clist = [(ax, f[ax]) for ax in f[self._KEY_CRDS]]
crd_names = f[self._KEY_CRDS]
fld_names.remove(self._KEY_CRDS)
except KeyError:
for axisname in "xyz":
if axisname in f:
crd_names.append(axisname)
clist = [(cn, NPZDataWrapper(self.fname, cn)) for cn in crd_names]
crds = coordinate.wrap_crds("nonuniform_cartesian", clist)
g.set_crds(crds)
for c in clist:
# we should be sure by now that the keys exist
fld_names.remove(c[0])
# try to get field names from arrays of nc, cc, ec, fc
# fields
for fld_center, names_key in self._KEY_FLDS.items():
try:
names = f[names_key]
fld_names.remove(names_key)
except KeyError:
names = []
for name in names:
fld = self._wrap_lazy_field(g, self.fname, name, crds,
fld_center)
g.add_field(fld)
fld_names.remove(name)
# load any remaining fields as though they were node centered
for name in fld_names:
fld = self._wrap_lazy_field(g, self.fname, name, crds, "Node")
g.add_field(fld)
self.add(g)
self.activate(0)
def save(self, fname=None, **kwargs):
if fname is None:
fname = self.fname
flds = list(self.iter_fields())
self.save_fields(fname, flds)
@classmethod
def save_fields(cls, fname, flds, **kwargs):
assert len(flds) > 0
fname = os.path.expanduser(os.path.expandvars(fname))
fld_dict = {}
# setup crds
# FIXME: all coordinates are saved as non-uniform, the proper
# way to do this is to have let coordinate format its own
# hdf5 / xdmf / numpy binary output
clist = flds[0].crds.get_clist(full_arrays=True)
axis_names = []
for axis_name, crdarr in clist:
fld_dict[axis_name] = crdarr
axis_names.append(axis_name)
fld_dict[cls._KEY_CRDS] = np.array(axis_names)
# setup fields
# dict comprehension invalid in Python 2.6
# fld_names = {key.lower(): [] for key in cls._KEY_FLDS.keys()}
fld_names = {}
for key in cls._KEY_FLDS.keys():
fld_names[key.lower()] = []
for fld in flds:
fld_names[fld.center.lower()].append(fld.name)
fld_dict[fld.name] = fld.data
for center, names_lst in fld_names.items():
fld_dict[cls._KEY_FLDS[center.lower()]] = np.array(names_lst)
if fname.endswith(".npz"):
fname = fname[:-4]
np.savez(fname, **fld_dict)
##
## EOF
##
```
#### File: viscid/readers/xdmf.py
```python
from __future__ import print_function
import os
import sys
# from xml.etree import ElementTree
import numpy as np
from viscid.compat import element_tree
from viscid import logger
from viscid.readers.vfile_bucket import ContainerFile
from viscid.readers.hdf5 import FileLazyHDF5
from viscid import amr_grid
from viscid import coordinate
# class XDMFDataItem(data_item.DataItem):
# def set_precision():
# nptype = np.dtype({'Float': 'float', 'Int': 'int', 'UInt': 'unit',
# 'Char': 'int', 'UChar': 'int'}[numbertype] + str(8*precision))
class FileXDMF(ContainerFile): # pylint: disable=abstract-method
""" on init, parse an xdmf file into datasets, grids, and fields """
_detector = r".*\.(xmf|xdmf)\s*$"
_xdmf_defaults = {
"Attribute": {
"Name": None,
"AttributeType": "Scalar", # Vector,Tensor,Tensor6,Matrix,GlobalID
"Center": "node" # cell,Grid,face,edge
},
"DataItem": {
"Name": None,
"ItemType": "Uniform", # Collection, tree,
# HyperSlab, coordinates, Funciton
"Dimensions": None, # KJI order
"NumberType": "Float", # Int, UInt, Char, UChar
"Precision": 4, # 1, 4, 8
"Format": "XML", # HDF, Binary
"Endian": "Native", # Big, Little
"Compression": "Raw", # Zlib, Bzip2
"Seek": 0
},
"Domain": {
"Name": None
},
"Geometry": {
"GeometryType": "XYZ" # XY, X_Y_Z, VxVyVz, Origin_DxDyDz
},
"Grid": {
"Name": None,
"GridType": "Uniform", # Collection,Tree,Subset
"CollectionType": "Spatial", # Temporal
"Section": "DataItem" # All
},
"Information": {
"Name": None,
"Value": None
},
"Xdmf": {
"Version": None
},
"Topology": {
"Name": None,
"TopologyType": None, # Polyvertex | Polyline | Polygon |
# Triangle | Quadrilateral | Tetrahedron |
# Pyramid| Wedge | Hexahedron | Edge_3 |
# Triagle_6 | Quadrilateral_8 |
# Tetrahedron_10 | Pyramid_13 | Wedge_15 |
# Hexahedron_20 | Mixed |
# 2DSMesh | 2DRectMesh | 2DCoRectMesh |
# 3DSMesh | 3DRectMesh | 3DCoRectMesh
"NodesPerElement": None,
"NumberOfElement": None,
"Dimensions": None,
"Order": None,
"BaseOffset": 0
},
"Time": {
"Type": "Single",
"Value": None
}
}
h5_root_dir = None
_last_amr_skeleton = None # experimental, should be moved
# tree = None
def __init__(self, fname, h5_root_dir=None, **kwargs):
"""XDMF File"""
if h5_root_dir is not None:
h5_root_dir = os.path.expandvars(h5_root_dir)
self.h5_root_dir = os.path.expanduser(h5_root_dir)
super(FileXDMF, self).__init__(fname, **kwargs)
def _parse(self):
grids = self._parse_file(self.fname, self)
for grid in grids:
self.add(grid)
if len(self.children) > 0:
self.activate(0)
def _parse_file(self, fname, parent_node):
# lxml has better xpath support, so it's preferred, but it stops
# if an xinclude doesn't exist, so for now use our custom extension
# of the default python xml lib
# if HAS_LXML:
# # sweet, you have it... use the better xml library
# tree = etree.parse(self.fname) # pylint: disable=E0602
# tree.xinclude() # TODO: gracefully ignore include problems
# root = tree.getroot()
grids = []
tree = element_tree.parse(fname)
element_tree.xinclude(tree, base_url=fname)
root = tree.getroot()
# search for all root grids, and parse them
domain_grids = root.findall("./Domain/Grid")
for dg in domain_grids:
grd = self._parse_grid(dg, parent_node)
grids.append(grd)
return grids
def _fill_attrs(self, el):
defs = self._xdmf_defaults[el.tag]
ret = {}
for opt, defval in defs.items():
if defval is None:
ret[opt] = el.get(opt)
else:
ret[opt] = type(defval)(el.get(opt, defval))
return ret
def _parse_grid(self, el, parent_node=None, time=None):
attrs = self._fill_attrs(el)
grd = None
crds = None
# parse topology, or cascade parent grid's topology
topology = el.find("./Topology")
topoattrs = None
if topology is not None:
topoattrs = self._fill_attrs(topology)
elif parent_node and parent_node.topology_info:
topoattrs = parent_node.topology_info
# parse geometry, or cascade parent grid's geometry
geometry = el.find("./Geometry")
geoattrs = None
if geometry is not None:
crds, geoattrs = self._parse_geometry(geometry, topoattrs)
elif parent_node and parent_node.geometry_info:
geoattrs = parent_node.geometry_info
crds = parent_node.crds # this can be None and that's ok
# parse time
if time is None:
t = el.find("./Time")
if t is not None:
pt, tattrs = self._parse_time(t)
if tattrs["Type"] == "Single":
time = pt
# cascade a parent grid's time
if time is None and parent_node and parent_node.time is not None:
time = parent_node.time
gt = attrs["GridType"]
if gt == "Collection":
times = None
ct = attrs["CollectionType"]
if ct == "Temporal":
grd = self._make_dataset(parent_node, dset_type="temporal",
name=attrs["Name"])
self._inject_info(el, grd)
ttag = el.find("./Time")
if ttag is not None:
times, tattrs = self._parse_time(ttag)
elif ct == "Spatial":
grd = self._make_dataset(parent_node, name=attrs["Name"])
self._inject_info(el, grd)
else:
logger.warn("Unknown collection type %s, ignoring grid", ct)
for i, subgrid in enumerate(el.findall("./Grid")):
t = times[i] if (times is not None and i < len(times)) else time
# print(subgrid, grd, t)
self._parse_grid(subgrid, parent_node=grd, time=time)
if len(grd.children) > 0:
grd.activate(0)
elif gt == "Uniform":
if not (topoattrs and geoattrs):
logger.warn("Xdmf Uniform grids must have "
"topology / geometry.")
else:
grd = self._make_grid(parent_node, name=attrs["Name"],
**self._grid_opts)
self._inject_info(el, grd)
for attribute in el.findall("./Attribute"):
fld = self._parse_attribute(grd, attribute, crds,
topoattrs, time)
if time:
fld.time = time
grd.add_field(fld)
elif gt == "Tree":
logger.warn("Xdmf Tree Grids not implemented, ignoring "
"this grid")
elif gt == "Subset":
logger.warn("Xdmf Subset Grids not implemented, ignoring "
"this grid")
else:
logger.warn("Unknown grid type %s, ignoring this grid", gt)
# fill attributes / data items
# if grid and gt == "Uniform":
# for a in el.findall("./Attribute"):
# fld = self._parse_attribute(a)
# grid.add_field(fld)
if grd:
if time is not None:
grd.time = time
if topoattrs is not None:
grd.topology_info = topoattrs
if geoattrs is not None:
grd.geometry_info = geoattrs
if crds is not None:
grd.set_crds(crds)
# EXPERIMENTAL AMR support, _last_amr_grid shouldn't be an attribute
# of self, since that will only remember the most recently generated
# amr grid, but that's ok for now
# if gt == "Uniform":
# print(">!", crds._TYPE, crds.xl_nc, grd.time)
# print(">!?", type(parent_node), parent_node.children._ordered,
# len(parent_node.children))
if gt == "Collection" and ct == "Spatial":
grd, is_amr = amr_grid.dataset_to_amr_grid(grd,
self._last_amr_skeleton)
if is_amr:
self._last_amr_skeleton = grd.skeleton
if parent_node is not None:
parent_node.add(grd)
return grd # can be None
def _parse_geometry(self, geo, topoattrs):
""" geo is the element tree item, returns Coordinate object and
xml attributes """
geoattrs = self._fill_attrs(geo)
# crds = None
crdlist = None
crdtype = None
crdkwargs = {}
topotype = topoattrs["TopologyType"]
# parse geometry into crds
geotype = geoattrs["GeometryType"]
if geotype.upper() == "XYZ":
data, attrs = self._parse_dataitem(geo.find("./DataItem"),
keep_flat=True)
# x = data[0::3]
# y = data[1::3]
# z = data[2::3]
# crdlist = (('z', z), ('y', y), ('x', x))
# quietly do nothing... we don't support unstructured grids
# or 3d spherical yet, and 2d spherical can be figured out
# if we assume the grid spans the whole sphere
crdlist = None
elif geotype.upper() == "XY":
data, attrs = self._parse_dataitem(geo.find("./DataItem"),
keep_flat=True)
# x = data[0::2]
# y = data[1::2]
# z = np.zeros(len(x))
# crdlist = (('z', z), ('y', y), ('x', x))
# quietly do nothing... we don't support unstructured grids
# or 3d spherical yet, and 2d spherical can be figured out
# if we assume the grid spans the whole sphere
crdlist = None
elif geotype.upper() == "X_Y_Z":
crdlookup = {'x': 0, 'y': 1, 'z': 2}
crdlist = [['x', None], ['y', None], ['z', None]]
# can't use ./DataItem[@Name='X'] so python2.6 works
dataitems = geo.findall("./DataItem")
for di in dataitems:
crd_name = di.attrib["Name"].lower()
data, attrs = self._parse_dataitem(di, keep_flat=True)
crdlist[crdlookup.pop(crd_name)][1] = data
if len(crdlookup) > 0:
raise RuntimeError("XDMF format error: Coords not specified "
"for {0} dimesions"
"".format(list(crdlookup.keys())))
crdkwargs["full_arrays"] = True
elif geotype.upper() == "VXVYVZ":
crdlookup = {'x': 0, 'y': 1, 'z': 2}
crdlist = [['x', None], ['y', None], ['z', None]]
# can't use ./DataItem[@Name='VX'] so python2.6 works
dataitems = geo.findall("./DataItem")
for di in dataitems:
crd_name = di.attrib["Name"].lstrip('V').lower()
data, attrs = self._parse_dataitem(di, keep_flat=True)
crdlist[crdlookup.pop(crd_name)][1] = data
if len(crdlookup) > 0:
raise RuntimeError("XDMF format error: Coords not specified "
"for {0} dimesions"
"".format(list(crdlookup.keys())))
crdkwargs["full_arrays"] = True
elif geotype.upper() == "ORIGIN_DXDYDZ":
# this is for grids with uniform spacing
dataitems = geo.findall("./DataItem")
data_o, _ = self._parse_dataitem(dataitems[0])
data_dx, _ = self._parse_dataitem(dataitems[1])
dtyp = data_o.dtype
nstr = None
if topoattrs["Dimensions"]:
nstr = topoattrs["Dimensions"]
elif topoattrs["NumberOfElements"]:
nstr = topoattrs["NumberOfElements"]
else:
raise ValueError("ORIGIN_DXDYDZ has no number of elements...")
n = [int(num) for num in nstr.split()]
# FIXME: OpenGGCM output uses ZYX ordering even though the xdmf
# website says it should be XYZ, BUT, the file opens correctly
# in Paraview with zyx, so... I guess i need to do this [::-1]
# nonsense here
data_o, data_dx, n = data_o[::-1], data_dx[::-1], n[::-1]
crdlist = [None] * 3
for i, crd in enumerate(['x', 'y', 'z']):
n_nc, n_cc = n[i], n[i] - 1
crd_arr = [data_o[i], data_o[i] + (n_cc * data_dx[i]), n_nc]
crdlist[i] = (crd, crd_arr)
crdkwargs["dtype"] = dtyp
crdkwargs["full_arrays"] = False
else:
logger.warn("Invalid GeometryType: %s", geotype)
if topotype in ['3DCoRectMesh', '2DCoRectMesh']:
crdtype = "uniform_cartesian"
elif topotype in ['3DRectMesh', '2DRectMesh']:
if crdkwargs.get("full_arrays", True):
crdtype = "nonuniform_cartesian"
else: # HACK, hopefully not used ever
crdtype = "uniform_cartesian"
elif topotype in ['2DSMesh']:
crdtype = "uniform_spherical" # HACK!
######## this doesn't quite work, but it's too heavy to be useful
######## anyway... if we assume a 2d spherical grid spans the
######## whole sphere, and radius doesnt matter, all we need are
######## the nr_phis / nr_thetas, so let's just do that
# # this asserts that attrs["Dimensions"] will have the xyz
# # dimensions
# # turn x, y, z -> phi, theta, r
# dims = [int(s) for
# s in reversed(topoattrs["Dimensions"].split(' '))]
# dims = [1] * (3 - len(dims)) + dims
# nr, ntheta, nphi = [d for d in dims]
# # dtype = crdlist[0][1].dtype
# # phi, theta, r = [np.empty((n,), dtype=dtype) for n in dims]
# x, y, z = (crdlist[i][1].reshape(dims) for i in range(3))
# nphitheta = nphi * ntheta
# r = np.sqrt(x[::nphitheta, 0, 0]**2 + y[::nphitheta, 0, 0]**2 +
# z[::nphitheta, 0, 0]**2)
# ir = nr // 2 # things get squirrly near the extrema
# theta = (180.0 / np.pi) * \
# (np.arccos(z[ir, :, ::nphi] / r[ir]).reshape(-1))
# itheta = ntheta // 2
# phi = (180.0 / np.pi) * \
# np.arctan2(y[ir, itheta, :], x[ir, itheta, :])
# print(dims, nr, ntheta, nphi)
# print("r:", r.shape, r)
# print("theta:", theta.shape, theta)
# print("phi:", phi.shape, phi)
# raise RuntimeError()
######## general names in spherical crds
# ntheta, nphi = [int(s) for s in topoattrs["Dimensions"].split(' ')]
# crdlist = [['theta', [0.0, 180.0, ntheta]],
# ['phi', [0.0, 360.0, nphi]]]
######## names on a map
ntheta, nphi = [int(s) for s in topoattrs["Dimensions"].split(' ')]
crdlist = [['phi', [0.0, 360.0, nphi]],
['theta', [0.0, 180.0, ntheta]]]
crdkwargs["full_arrays"] = False
crdkwargs["units"] = 'deg'
elif topotype in ['3DSMesh']:
raise NotImplementedError("3D spherical grids not yet supported")
else:
raise NotImplementedError("Unstructured grids not yet supported")
crds = coordinate.wrap_crds(crdtype, crdlist, **crdkwargs)
return crds, geoattrs
def _parse_attribute(self, parent_node, item, crds, topoattrs, time=0.0):
"""
Args:
parent_node (Dataset, Grid, or None): Hint what the parent will
be. Necessary if _make_field makes decisions based on the
info dict
"""
attrs = self._fill_attrs(item)
data, dataattrs = self._parse_dataitem(item.find("./DataItem"))
name = attrs["Name"]
center = attrs["Center"]
fldtype = attrs["AttributeType"]
fld = self._make_field(parent_node, fldtype, name, crds, data,
center=center, time=time, zyx_native=True)
self._inject_info(item, fld)
return fld
def _parse_dataitem(self, item, keep_flat=False):
""" returns the data as a numpy array, or HDF data item """
attrs = self._fill_attrs(item)
dimensions = attrs["Dimensions"]
if dimensions:
dimensions = [int(d) for d in dimensions.split(' ')]
numbertype = attrs["NumberType"]
precision = attrs["Precision"]
nptype = np.dtype({'Float': 'f', 'Int': 'i', 'UInt': 'u',
'Char': 'i', 'UChar': 'u'}[numbertype] + str(precision))
fmt = attrs["Format"]
if fmt == "XML":
arr = np.fromstring(item.text, sep=' ', dtype=nptype)
if dimensions and not keep_flat:
arr = arr.reshape(dimensions)
return arr, attrs
if fmt == "HDF":
fname, loc = item.text.strip().split(':')
# FIXME: startswith '/' is unix path name specific
if self.h5_root_dir is not None:
fname = os.path.join(self.h5_root_dir, fname)
elif not fname.startswith('/'):
fname = os.path.join(self.dirname, fname)
h5file = self._load_child_file(fname, index_handle=False,
file_type=FileLazyHDF5)
arr = h5file.get_data(loc)
return arr, attrs
if fmt == "Binary":
raise NotImplementedError("binary xdmf data not implemented")
logger.warn("Invalid DataItem Format.")
return (None, None)
def _parse_time(self, timetag):
""" returns the time(s) as float, or numpy array, time attributes"""
attrs = self._fill_attrs(timetag)
timetype = attrs["Type"]
if timetype == 'Single':
return float(timetag.get('Value')), attrs
elif timetype == 'List':
return self._parse_dataitem(timetag.find('.//DataItem'))[0], attrs
elif timetype == 'Range':
raise NotImplementedError("Time Range not yet implemented")
# dat, dattrs = self._parse_dataitem(timetag.find('.//DataItem'))
# TODO: this is not the most general, but I think it'll work
# as a first stab, plus I will probably not need Range ever
# tgridtag = timetag.find("ancestor::Grid[@GridType='Collection']"
# "[@CollectionType='Temporal'][1]"))
# n = len(tgridtag.find(.//Grid[@GridType='Collection']
# [CollectionType=['Spatial']]))
# return np.linspace(dat[0], dat[1], n)
# return np.arange(dat[0], dat[1])
elif timetype == 'HyperSlab':
dat, dattrs = self._parse_dataitem(timetag.find('.//DataItem'))
arr = np.array([dat[0] + i * dat[1] for i in range(int(dat[2]))])
return arr, attrs
else:
logger.warn("invalid TimeType.\n")
def _parse_information(self, information):
""" parse generic information tag """
attrs = self._fill_attrs(information)
name = attrs["Name"]
val = attrs["Value"]
if val is None:
_di = information.find(".//DataItem")
if _di:
val, _ = self._parse_dataitem(_di)
else:
val = information.text
return name, val
def _inject_info(self, el, target):
for _, information in enumerate(el.findall("./Information")):
_name, _val = self._parse_information(information)
target.set_info(_name, _val)
def _main():
import viscid
f = FileXDMF(os.path.join(viscid.sample_dir, 'local_0001.py_0.xdmf'))
sys.stderr.write("{0}\n".format(f))
return 0
if __name__ == '__main__':
sys.exit(_main())
##
## EOF
##
``` |
{
"source": "JIMhackKING/seahub-1",
"score": 2
} |
#### File: api2/endpoints/dtable.py
```python
import os
import logging
import time
import jwt
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.response import Response
from django.utils.translation import ugettext as _
from pysearpc import SearpcError
from seaserv import seafile_api, ccnet_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.dtable.models import Workspaces, DTables
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.group.utils import group_id_to_name
from seahub.utils import is_valid_dirent_name, is_org_context, normalize_file_path, \
check_filename_with_rename, gen_file_upload_url
from seahub.settings import MAX_UPLOAD_FILE_NAME_LEN, DTABLE_PRIVATE_KEY
from seahub.dtable.utils import check_dtable_permission
from seahub.constants import PERMISSION_ADMIN, PERMISSION_READ_WRITE
logger = logging.getLogger(__name__)
FILE_TYPE = '.dtable'
WRITE_PERMISSION_TUPLE = (PERMISSION_READ_WRITE, PERMISSION_ADMIN)
class WorkspacesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request):
"""get all workspaces
"""
username = request.user.username
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
if org_id and org_id > 0:
groups = ccnet_api.get_org_groups_by_user(org_id, username)
else:
groups = ccnet_api.get_groups(username, return_ancestors=True)
owner_list = list()
owner_list.append(username)
for group in groups:
group_user = '%s@seafile_group' % group.id
owner_list.append(group_user)
workspace_list = list()
for owner in owner_list:
try:
workspace = Workspaces.objects.get_workspace_by_owner(owner)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not workspace:
if '@seafile_group' in owner:
continue
# permission check
if not request.user.permissions.can_add_repo():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile",
org_id
)
else:
repo_id = seafile_api.create_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile"
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
workspace = Workspaces.objects.create_workspace(owner, repo_id)
# resource check
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
logger.warning('Library %s not found.' % repo_id)
continue
res = workspace.to_dict()
table_list = DTables.objects.get_dtable_by_workspace(workspace)
res["table_list"] = table_list
if '@seafile_group' in owner:
group_id = owner.split('@')[0]
res["owner_name"] = group_id_to_name(group_id)
res["owner_type"] = "Group"
else:
res["owner_name"] = email2nickname(owner)
res["owner_type"] = "Personal"
workspace_list.append(res)
return Response({"workspace_list": workspace_list}, status=status.HTTP_200_OK)
class DTablesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def post(self, request):
"""create a table file
Permission:
1. owner
2. group member
"""
# argument check
table_owner = request.POST.get('owner')
if not table_owner:
error_msg = 'owner invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_name = request.POST.get('name')
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_file_name = table_name + FILE_TYPE
if not is_valid_dirent_name(table_file_name):
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
workspace = Workspaces.objects.get_workspace_by_owner(table_owner)
if not workspace:
if not request.user.permissions.can_add_repo():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
try:
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile",
org_id
)
else:
repo_id = seafile_api.create_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile"
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
workspace = Workspaces.objects.create_workspace(table_owner, repo_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# repo status check
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# create new empty table
table_file_name = check_filename_with_rename(repo_id, '/', table_file_name)
try:
seafile_api.post_empty_file(repo_id, '/', table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
dtable = DTables.objects.create_dtable(username, workspace, table_name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({"table": dtable.to_dict()}, status=status.HTTP_201_CREATED)
class DTableView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def put(self, request, workspace_id):
"""rename a table
Permission:
1. owner
2. group member
"""
# argument check
old_table_name = request.data.get('old_name')
if not old_table_name:
error_msg = 'old_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
new_table_name = request.data.get('new_name')
if not new_table_name:
error_msg = 'new_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
new_table_file_name = new_table_name + FILE_TYPE
if not is_valid_dirent_name(new_table_file_name):
error_msg = 'new_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if len(new_table_file_name) > MAX_UPLOAD_FILE_NAME_LEN:
error_msg = 'new_name is too long.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, old_table_name)
if not dtable:
error_msg = 'dtable %s not found.' % old_table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
old_table_file_name = old_table_name + FILE_TYPE
old_table_path = normalize_file_path(old_table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, old_table_path)
if not table_file_id:
error_msg = 'file %s not found.' % old_table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# repo status check
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# rename table
new_table_file_name = check_filename_with_rename(repo_id, '/', new_table_file_name)
try:
seafile_api.rename_file(repo_id, '/', old_table_file_name, new_table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
dtable.name = new_table_name
dtable.modifier = username
dtable.save()
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({"table": dtable.to_dict()}, status=status.HTTP_200_OK)
def delete(self, request, workspace_id):
"""delete a table
Permission:
1. owner
2. group member
"""
# argument check
table_name = request.data.get('name')
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_file_name = table_name + FILE_TYPE
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# repo status check
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# delete asset
asset_dir_path = '/asset/' + str(dtable.uuid)
asset_dir_id = seafile_api.get_dir_id_by_path(repo_id, asset_dir_path)
if asset_dir_id:
parent_dir = os.path.dirname(asset_dir_path)
file_name = os.path.basename(asset_dir_path)
try:
seafile_api.del_file(repo_id, parent_dir, file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
# delete table
try:
seafile_api.del_file(repo_id, '/', table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
DTables.objects.delete_dtable(workspace, table_name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True}, status=status.HTTP_200_OK)
class DTableAssetUploadLinkView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id):
"""get table file upload link
Permission:
1. owner
2. group member
3. shared user with `rw` or `admin` permission
"""
# argument check
table_name = request.GET.get('name', None)
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if check_dtable_permission(username, workspace, dtable) not in WRITE_PERMISSION_TUPLE:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
token = seafile_api.get_fileserver_access_token(repo_id, 'dummy', 'upload',
'', use_onetime=False)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
upload_link = gen_file_upload_url(token, 'upload-api')
# create asset dir
asset_dir_path = '/asset/' + str(dtable.uuid)
asset_dir_id = seafile_api.get_dir_id_by_path(repo_id, asset_dir_path)
if not asset_dir_id:
seafile_api.mkdir_with_parents(repo_id, '/', asset_dir_path[1:], username)
dtable.modifier = username
dtable.save()
res = dict()
res['upload_link'] = upload_link
res['parent_path'] = asset_dir_path
return Response(res)
class DTableAccessTokenView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id, name):
"""get dtable access token
"""
table_name = name
table_file_name = table_name + FILE_TYPE
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace, dtable):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# generate json web token
payload = {
'exp': int(time.time()) + 86400 * 3,
'dtable_uuid': dtable.uuid.hex,
'username': username,
}
try:
access_token = jwt.encode(
payload, DTABLE_PRIVATE_KEY, algorithm='HS256'
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'access_token': access_token})
```
#### File: api2/endpoints/dtable_related_users.py
```python
import logging
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.response import Response
import seaserv
from seaserv import seafile_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error, get_user_common_info
from seahub.dtable.models import Workspaces, DTables
from seahub.dtable.utils import check_dtable_permission, list_dtable_related_users
from seahub.utils import normalize_file_path
from seahub.api2.endpoints.dtable import FILE_TYPE
logger = logging.getLogger(__name__)
class DTableRelatedUsersView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id, name):
"""list dtable related users
"""
table_name = name
table_file_name = table_name + FILE_TYPE
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if '@seafile_group' in workspace.owner:
group_id = workspace.owner.split('@')[0]
group = seaserv.get_group(group_id)
if not group:
error_msg = 'Group %s not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace, dtable):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# main
user_list = list()
try:
email_list = list_dtable_related_users(workspace, dtable)
for email in email_list:
user_info = get_user_common_info(email)
user_list.append(user_info)
except Exception as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
return Response({'user_list': user_list})
```
#### File: management/commands/send_file_updates.py
```python
from datetime import datetime
import logging
import os
import re
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse
from django.utils.html import escape as e
from django.utils import translation
from django.utils.translation import ugettext as _
from seahub.avatar.templatetags.avatar_tags import avatar
from seahub.avatar.util import get_default_avatar_url
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.constants import HASH_URLS
from seahub.options.models import (
UserOptions, KEY_FILE_UPDATES_EMAIL_INTERVAL,
KEY_FILE_UPDATES_LAST_EMAILED_TIME
)
from seahub.profile.models import Profile
from seahub.utils import (get_site_name, seafevents_api,
send_html_email, get_site_scheme_and_netloc)
from seahub.utils.timeutils import utc_to_local
# Get an instance of a logger
logger = logging.getLogger(__name__)
########## Utility Functions ##########
def td(con):
return con
# return '<td>%s</td>' % con
def a_tag(con, href='#', style=''):
return '<a href="%s" style="%s">%s</a>' % (href, style, e(con))
def repo_url(repo_id, repo_name):
p = reverse('lib_view', args=[repo_id, repo_name, ''])
return get_site_scheme_and_netloc() + p
def file_url(repo_id, file_path):
p = reverse('view_lib_file', args=[repo_id, file_path])
return get_site_scheme_and_netloc() + p
def dir_url(repo_id, repo_name, dir_path):
p = reverse('lib_view', args=[repo_id, repo_name, dir_path.strip('/')])
return get_site_scheme_and_netloc() + p
def user_info_url(username):
p = reverse('user_profile', args=[username])
return get_site_scheme_and_netloc() + p
#######################################
class Command(BaseCommand):
help = 'Send Email notifications to user if he/she has '
'file updates notices every period of seconds .'
label = "notifications_send_file_updates"
def handle(self, *args, **options):
logger.debug('Start sending file updates emails...')
self.do_action()
logger.debug('Finish sending file updates emails.\n')
def get_avatar(self, username, default_size=32):
img_tag = avatar(username, default_size)
pattern = r'src="(.*)"'
repl = r'src="%s\1"' % get_site_scheme_and_netloc()
return re.sub(pattern, repl, img_tag)
def get_avatar_src(self, username, default_size=32):
avatar_img = self.get_avatar(username, default_size)
m = re.search('<img src="(.*?)".*', avatar_img)
if m:
return m.group(1)
else:
return ''
def get_default_avatar(self, default_size=32):
# user default avatar
img_tag = """<img src="%s" width="%s" height="%s" class="avatar" alt="" />""" % \
(get_default_avatar_url(), default_size, default_size)
pattern = r'src="(.*)"'
repl = r'src="%s\1"' % get_site_scheme_and_netloc()
return re.sub(pattern, repl, img_tag)
def get_default_avatar_src(self, default_size=32):
avatar_img = self.get_default_avatar(default_size)
m = re.search('<img src="(.*?)".*', avatar_img)
if m:
return m.group(1)
else:
return ''
def get_user_language(self, username):
return Profile.objects.get_user_language(username)
def format_file_operation(self, ev):
lib_link = a_tag(ev.repo_name, repo_url(ev.repo_id, ev.repo_name))
small_lib_link = a_tag(ev.repo_name, repo_url(ev.repo_id, ev.repo_name), 'color:#868e96;font-size:87.5%;')
if ev.obj_type == 'repo':
if ev.op_type == 'create':
op = _('Created library')
details = td(lib_link)
elif ev.op_type == 'rename':
op = _('Renamed library')
details = td('%s => %s' % (e(ev.old_repo_name), lib_link))
elif ev.op_type == 'delete':
op = _('Deleted library')
details = td(e(ev.repo_name))
elif ev.op_type == 'recover':
op = _('Restored library')
details = td(lib_link)
else: # ev.op_type == 'clean-up-trash':
if ev.days == 0:
op = _('Removed all items from trash.')
else:
op = _('Removed items older than %s days from trash.' %
ev.days)
details = td(lib_link)
elif ev.obj_type == 'file':
file_name = os.path.basename(ev.path)
file_link = a_tag(file_name, file_url(ev.repo_id, ev.path))
if ev.op_type == 'create':
op = _('Created file')
details = td("%s<br />%s" % (file_link, small_lib_link))
elif ev.op_type == 'delete':
op = _('Deleted file')
details = td("%s<br />%s" % (e(file_name), small_lib_link))
elif ev.op_type == 'recover':
op = _('Restored file')
details = td("%s<br />%s" % (file_link, small_lib_link))
elif ev.op_type == 'rename':
op = _('Renamed file')
old_name = os.path.basename(ev.old_path)
details = td("%s => %s<br />%s" % (
e(old_name), file_link, small_lib_link)
)
elif ev.op_type == 'move':
op = _('Moved file')
file_path_link = a_tag(ev.path, file_url(ev.repo_id, ev.path))
details = td('%s => %s<br />%s' % (
e(ev.old_path), file_path_link, small_lib_link)
)
else: # ev.op_type == 'edit':
op = _('Updated file')
details = td("%s<br />%s" % (file_link, small_lib_link))
else: # dir
dir_name = os.path.basename(ev.path)
dir_link = a_tag(dir_name, dir_url(ev.repo_id, ev.repo_name, ev.path))
if ev.op_type == 'create':
op = _('Created folder')
details = td('%s<br />%s' % (dir_link, small_lib_link))
elif ev.op_type == 'delete':
op = _('Deleted folder')
details = td('%s<br />%s' % (e(dir_name), small_lib_link))
elif ev.op_type == 'recover':
op = _('Restored folder')
details = td('%s<br />%s' % (dir_link, small_lib_link))
elif ev.op_type == 'rename':
op = _('Renamed folder')
old_name = os.path.basename(ev.old_path)
details = td('%s => %s<br />%s' % (e(old_name), dir_link,
small_lib_link))
else: # ev.op_type == 'move':
op = _('Moved folder')
details = td('%s => %s<br />%s' % (e(ev.old_path), dir_link,
small_lib_link))
return (op, details)
def do_action(self):
today = datetime.utcnow().replace(hour=0).replace(minute=0).replace(
second=0).replace(microsecond=0)
emails = []
user_file_updates_email_intervals = []
for ele in UserOptions.objects.filter(
option_key=KEY_FILE_UPDATES_EMAIL_INTERVAL):
try:
user_file_updates_email_intervals.append(
(ele.email, int(ele.option_val))
)
emails.append(ele.email)
except Exception as e:
logger.error(e)
continue
user_last_emailed_time_dict = {}
for ele in UserOptions.objects.filter(
option_key=KEY_FILE_UPDATES_LAST_EMAILED_TIME).filter(
email__in=emails):
try:
user_last_emailed_time_dict[ele.email] = datetime.strptime(
ele.option_val, "%Y-%m-%d %H:%M:%S")
except Exception as e:
logger.error(e)
continue
for (username, interval_val) in user_file_updates_email_intervals:
# save current language
cur_language = translation.get_language()
# get and active user language
user_language = self.get_user_language(username)
translation.activate(user_language)
logger.debug('Set language code to %s for user: %s' % (
user_language, username))
self.stdout.write('[%s] Set language code to %s' % (
str(datetime.now()), user_language))
# get last_emailed_time if any, defaults to today
last_emailed_time = user_last_emailed_time_dict.get(username, today)
now = datetime.utcnow().replace(microsecond=0)
if (now - last_emailed_time).seconds < interval_val:
continue
# get file updates(from: last_emailed_time, to: now) for repos
# user can access
res = seafevents_api.get_user_activities_by_timestamp(
username, last_emailed_time, now)
if not res:
continue
# remove my activities
res = [x for x in res if x.op_user != username]
if not res:
continue
# format mail content & send file updates email to user
try:
for ele in res:
ele.user_avatar = self.get_avatar_src(ele.op_user)
ele.local_timestamp = utc_to_local(ele.timestamp)
ele.op_user_link = a_tag(email2nickname(ele.op_user),
user_info_url(ele.op_user))
ele.operation, ele.op_details = self.format_file_operation(ele)
except Exception as e:
logger.error('Failed to format mail content for user: %s' %
username)
logger.error(e, exc_info=True)
continue
nickname = email2nickname(username)
contact_email = Profile.objects.get_contact_email_by_user(username)
c = {
'name': nickname,
'updates_count': len(res),
'updates': res,
}
try:
send_html_email(_('New file updates on %s') % get_site_name(),
'notifications/file_updates_email.html', c,
None, [contact_email])
# set new last_emailed_time
UserOptions.objects.set_file_updates_last_emailed_time(
username, now)
except Exception as e:
logger.error('Failed to send email to %s, error detail: %s' %
(contact_email, e))
self.stderr.write('[%s] Failed to send email to %s, error '
'detail: %s' % (str(now), contact_email, e))
finally:
# reset lang
translation.activate(cur_language)
```
#### File: api/endpoints/test_dtable_api_token.py
```python
import json
from django.core.urlresolvers import reverse
from seaserv import seafile_api, ccnet_api
from tests.common.utils import randstring
from seahub.dtable.models import Workspaces
from seahub.test_utils import BaseTestCase
from seahub.group.utils import is_group_admin_or_owner
class DTableAPITokensTest(BaseTestCase):
def setUp(self):
# create workspace
self.workspace = Workspaces.objects.create_workspace(
self.user.username,
self.repo.id
)
self.group_id = self.group.id
self.login_as(self.user)
# create dtable
self.dtable_url = reverse('api-v2.1-dtables')
resp = self.client.post(self.dtable_url, {'name': 'table6', 'owner': self.user.username})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp["table"]["name"] == 'table6'
# url
self.api_tokens_url = reverse('api-v2.1-dtable-api-tokens', args=[self.workspace.id, 'table6'])
def tearDown(self):
assert len(Workspaces.objects.all()) == 1
workspace = Workspaces.objects.get_workspace_by_owner(self.user.username)
workspace_id = workspace.id
Workspaces.objects.delete_workspace(workspace_id)
self.remove_repo()
def test_can_create(self):
resp = self.client.post(self.api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['api_token']
assert json_resp['app_name'] == 'mail_client'
assert json_resp['api_token']
assert json_resp['generated_by'] == self.user.username
assert json_resp['generated_at']
assert json_resp['last_access']
assert json_resp['permission'] == 'rw'
def test_can_not_create_by_not_owner(self):
self.logout()
self.login_as(self.admin)
resp = self.client.post(self.api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(403, resp.status_code)
def test_can_create_by_group_admin(self):
repo = seafile_api.get_repo(self.create_repo(
name='group-repo', desc='', username=self.user.username, passwd=None))
workspace = Workspaces.objects.create_workspace(
str(self.group_id) + '@seafile_group', repo.id)
# create dtable
self.dtable_url = reverse('api-v2.1-dtables')
resp = self.client.post(self.dtable_url, {'name': 'table11', 'owner': str(self.group_id) + '@seafile_group'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp["table"]["name"] == 'table11'
# main
self.logout()
self.add_admin_to_group()
ccnet_api.group_set_admin(self.group_id, self.admin.username)
assert is_group_admin_or_owner(self.group_id, self.admin.username)
self.login_as(self.admin)
api_tokens_url = reverse('api-v2.1-dtable-api-tokens', args=[workspace.id, 'table11'])
resp = self.client.post(api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['api_token']
assert json_resp['app_name'] == 'mail_client'
assert json_resp['api_token']
assert json_resp['generated_by'] == self.admin.username
assert json_resp['generated_at']
assert json_resp['last_access']
assert json_resp['permission'] == 'rw'
Workspaces.objects.delete_workspace(workspace.id)
self.remove_repo(repo.id)
def test_can_not_create_by_group_member(self):
repo = seafile_api.get_repo(self.create_repo(
name='group-repo', desc='', username=self.user.username, passwd=None))
workspace = Workspaces.objects.create_workspace(
str(self.group_id) + '@seafile_group', repo.id)
# create dtable
self.dtable_url = reverse('api-v2.1-dtables')
resp = self.client.post(self.dtable_url, {'name': 'table11', 'owner': str(self.group_id) + '@seafile_group'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp["table"]["name"] == 'table11'
# main
self.logout()
self.add_admin_to_group()
self.login_as(self.admin)
api_tokens_url = reverse('api-v2.1-dtable-api-tokens', args=[workspace.id, 'table11'])
resp = self.client.post(api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(403, resp.status_code)
Workspaces.objects.delete_workspace(workspace.id)
self.remove_repo(repo.id)
def test_can_list(self):
# create
resp = self.client.post(self.api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['api_token']
assert json_resp['app_name'] == 'mail_client'
assert json_resp['api_token']
assert json_resp['generated_by'] == self.user.username
assert json_resp['generated_at']
assert json_resp['last_access']
assert json_resp['permission'] == 'rw'
# list
resp = self.client.get(self.api_tokens_url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['api_tokens']
assert json_resp['api_tokens'][0]
assert json_resp['api_tokens'][0]['app_name'] == 'mail_client'
assert json_resp['api_tokens'][0]['api_token']
assert json_resp['api_tokens'][0]['generated_by'] == self.user.username
assert json_resp['api_tokens'][0]['generated_at']
assert json_resp['api_tokens'][0]['last_access']
assert json_resp['api_tokens'][0]['permission'] == 'rw'
class DTableAPITokenTest(BaseTestCase):
def setUp(self):
# create workspace
self.workspace = Workspaces.objects.create_workspace(
self.user.username,
self.repo.id
)
self.group_id = self.group.id
self.login_as(self.user)
# create dtable
self.dtable_url = reverse('api-v2.1-dtables')
resp = self.client.post(self.dtable_url, {'name': 'table8', 'owner': self.user.username})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp["table"]["name"] == 'table8'
self.api_tokens_url = reverse('api-v2.1-dtable-api-tokens', args=[self.workspace.id, 'table8'])
# create api token
resp = self.client.post(self.api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['api_token']
assert json_resp['app_name'] == 'mail_client'
assert json_resp['api_token']
assert json_resp['generated_by'] == self.user.username
assert json_resp['generated_at']
assert json_resp['last_access']
assert json_resp['permission'] == 'rw'
self.api_token = json_resp['api_token']
self.app_name = json_resp['app_name']
# url
self.api_token_url = reverse('api-v2.1-dtable-api-token', args=[self.workspace.id, 'table8', self.app_name])
def tearDown(self):
assert len(Workspaces.objects.all()) == 1
workspace = Workspaces.objects.get_workspace_by_owner(self.user.username)
workspace_id = workspace.id
Workspaces.objects.delete_workspace(workspace_id)
self.remove_repo()
def test_can_delete(self):
resp = self.client.delete(self.api_token_url)
self.assertEqual(200, resp.status_code)
def test_can_not_delete_by_not_owner(self):
self.logout()
self.login_as(self.admin)
resp = self.client.delete(self.api_token_url)
self.assertEqual(403, resp.status_code)
def test_can_put(self):
data = 'permission=r'
resp = self.client.put(self.api_token_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
def test_can_not_put_by_not_owner(self):
self.logout()
self.login_as(self.admin)
data = 'permission=r'
resp = self.client.put(self.api_token_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_can_not_put_by_exist_permission(self):
data = 'permission=rw'
resp = self.client.put(self.api_token_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(400, resp.status_code)
def test_can_not_put_by_invalid_permission(self):
data = 'permission=z'
resp = self.client.put(self.api_token_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(400, resp.status_code)
class DTableAppAccessTokenTest(BaseTestCase):
def setUp(self):
# create workspace
self.workspace = Workspaces.objects.create_workspace(
self.user.username,
self.repo.id
)
self.group_id = self.group.id
self.login_as(self.user)
# create dtable
self.dtable_url = reverse('api-v2.1-dtables')
resp = self.client.post(self.dtable_url, {'name': 'table9', 'owner': self.user.username})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp["table"]["name"] == 'table9'
self.api_tokens_url = reverse('api-v2.1-dtable-api-tokens', args=[self.workspace.id, 'table9'])
# create api token
resp = self.client.post(self.api_tokens_url, {'app_name': 'mail_client', 'permission': 'rw'})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['api_token']
assert json_resp['app_name'] == 'mail_client'
assert json_resp['api_token']
assert json_resp['generated_by'] == self.user.username
assert json_resp['generated_at']
assert json_resp['last_access']
assert json_resp['permission'] == 'rw'
self.api_token = json_resp['api_token']
# url
self.app_access_token_url = reverse(
'api-v2.1-dtable-app-access-token', args=[self.workspace.id, 'table9'])
def tearDown(self):
assert len(Workspaces.objects.all()) == 1
workspace = Workspaces.objects.get_workspace_by_owner(self.user.username)
workspace_id = workspace.id
Workspaces.objects.delete_workspace(workspace_id)
self.remove_repo()
def test_can_get_app_access_token_by_api_token(self):
self.logout()
headers = {'HTTP_AUTHORIZATION': 'Token ' + str(self.api_token)}
resp = self.client.get(self.app_access_token_url, **headers)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['access_token']
assert json_resp['dtable_uuid']
def test_can_not_get_app_access_token_by_invalid_api_token(self):
self.logout()
headers = {'HTTP_AUTHORIZATION': 'Token ' + str(self.api_token[:-5] + randstring(5))}
resp = self.client.get(self.app_access_token_url, **headers)
self.assertEqual(404, resp.status_code)
``` |
{
"source": "JimHafner/GovLens",
"score": 3
} |
#### File: management/commands/create_scraper_user.py
```python
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
SCRAPER_USERNAME = "scraper"
class Command(BaseCommand):
help = "Get or create a scraper user with a Django REST Framework token"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
user, created = User.objects.get_or_create(username=SCRAPER_USERNAME)
user.save()
if created:
self.stdout.write(f"Created new user with username {SCRAPER_USERNAME}")
else:
self.stdout.write(f"User {SCRAPER_USERNAME} already exists.")
token, created = Token.objects.get_or_create(user=user)
self.stdout.write(f"The token for the user {SCRAPER_USERNAME} is {token}")
``` |
{
"source": "JimHagan/apm-report",
"score": 2
} |
#### File: JimHagan/apm-report/apm-report.py
```python
import os
import csv
from python_graphql_client import GraphqlClient
FORMATTED_CSV_ORDER = [
'Tribe',
'Squad',
'squad',
'name',
'reporting',
'language',
'Environment',
'Application',
'applicationId',
'Cost-Center',
'feature',
'Location',
'accountid',
'Service',
'trustedAccountId',
'feature',
'account',
'squad',
'Alerting',
'Contact',
'guid',
'accountId',
'permalink',
'has_traces',
'apmSummary_apdexScore',
'apmSummary_errorRate',
'apmSummary_hostCount',
'apmSummary_instanceCount',
'apmSummary_nonWebThroughput',
'apmSummary_Throughput',
'apmSummary_webThroughput',
]
def get_trace_app_index():
# This file was generated by the following query:
# FROM TransactionTrace select count(*) facet applicationIds since 3 days ago limit max
#
with open('traces_by_app.csv', 'r') as f:
app_index = set()
reader = csv.DictReader(f)
for row in reader:
apps = (row['Application Ids']).split(":")
for app in apps:
if app:
app_index.add(int(app))
#print(app_index)
return app_index
def get_apm_metadata():
headers = {}
headers['Api-Key'] = os.getenv('USER_API_KEY')
headers['Content-Type'] = 'application/json'
client = GraphqlClient(endpoint="https://api.newrelic.com/graphql")
client.headers=headers
query = """
{
actor {
entitySearch(queryBuilder: {domain: APM, type: APPLICATION, name: ""}) {
results {
entities {
tags {
key
values
}
guid
name
reporting
permalink
accountId
account {
id
name
}
... on ApmApplicationEntityOutline {
guid
name
apmSummary {
apdexScore
errorRate
hostCount
instanceCount
nonWebResponseTimeAverage
nonWebThroughput
responseTimeAverage
throughput
webResponseTimeAverage
webThroughput
}
applicationId
}
}
}
}
}
}
"""
_result = client.execute(query=query)
return [data for data in _result['data']['actor']['entitySearch']['results']['entities']]
apps_with_traces = get_trace_app_index()
data = get_apm_metadata()
key_set = set()
apm_objects = []
for item in data:
scrubbed = {}
scrubbed['Tribe'] = 'UNKNOWN'
scrubbed['reporting'] = False
scrubbed['accountid'] = item['account']['id']
scrubbed['account'] = item['account']['name']
scrubbed['name'] = item['name']
scrubbed['applicationId'] = item['applicationId']
if item['applicationId'] in apps_with_traces:
scrubbed['has_traces'] = True
else:
scrubbed['has_traces'] = False
if (item['apmSummary']):
scrubbed['apmSummary_apdexScore'] = item['apmSummary']['apdexScore']
scrubbed['apmSummary_errorRate'] = item['apmSummary']['errorRate']
scrubbed['apmSummary_hostCount'] = item['apmSummary']['hostCount']
scrubbed['apmSummary_instanceCount'] = item['apmSummary']['instanceCount']
scrubbed['apmSummary_nonWebThroughput'] = item['apmSummary']['nonWebThroughput']
scrubbed['apmSummary_Throughput'] = item['apmSummary']['throughput']
scrubbed['apmSummary_webThroughput'] = item['apmSummary']['webThroughput']
scrubbed['reporting'] = item['reporting']
scrubbed['permalink'] = item['permalink']
for tag in item['tags']:
scrubbed[tag['key']] = tag['values'][0]
for k in scrubbed.keys():
key_set.add(k)
apm_objects.append(scrubbed)
apm_objects.sort(key = lambda i: (i['Tribe'], i['reporting']))
with open('apm-report.csv', 'w') as f:
w = csv.DictWriter(f, FORMATTED_CSV_ORDER, extrasaction='ignore')
w.writeheader()
w.writerows(apm_objects)
``` |
{
"source": "jim-hart/article-rate-limit-snippets",
"score": 3
} |
#### File: jim-hart/article-rate-limit-snippets/class_decorator.py
```python
import functools
import threading
import time
import timeit
timer = timeit.default_timer
class throttle:
def __init__(self, limit):
if callable(limit):
self.wrapped = self._wrap(limit)
self.limit = -1
else:
self.wrapped = None
self.limit = 1 / limit
self.lock = threading.Lock()
self.last = -1
def __call__(self, *args, **kwargs):
if self.wrapped is None:
self.wrapped = self._wrap(args[0])
return self
else:
return self._wait(*args, **kwargs)
def __get__(self, instance, owner):
return functools.partial(self, instance)
def _wait(self, *args, **kwargs):
with self.lock:
elapsed = timer() - self.last
if elapsed < self.limit:
print(f'\twaiting {self.limit - elapsed:.3f}s')
time.sleep(self.limit - elapsed)
else:
print(f'\tthrottle not required')
self.last = timer()
return self.wrapped(*args, **kwargs)
def _wrap(self, func):
functools.update_wrapper(self, func)
return func
```
#### File: jim-hart/article-rate-limit-snippets/example.py
```python
import threading
import time
from class_decorator_with_inheritance import throttle
class Example:
@throttle(limit=2)
def do_work(self):
pass
@throttle(limit=5)
def do_work():
pass
@throttle # disabled
def do_fast_work():
pass
def run_example(label, target, times, limit):
print(f'--- running: {label}, limit: {limit}/s ---')
time.sleep(1)
threads = []
for _ in range(times):
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print()
if __name__ == '__main__':
example = Example()
run_example('method@do_work', target=example.do_work, times=5, limit=2)
run_example('function@do_work', target=do_work, times=5, limit=5)
run_example(
'function@do_fast_work', target=do_fast_work, times=5, limit='inf')
``` |
{
"source": "jim-hart/steam-id-discord-bot",
"score": 2
} |
#### File: steam-id-discord-bot/steam_id_discord_bot/__main__.py
```python
from decouple import config
from discord.ext import commands
from join_code import JoinCode
def main():
bot = commands.Bot(command_prefix='!')
bot.add_cog(JoinCode(bot))
bot.run(config('CLIENT_TOKEN'))
if __name__ == '__main__':
main()
``` |
{
"source": "JimHaughwout/datetimestats",
"score": 3
} |
#### File: datetimestats/datetimestats/datetimestats.py
```python
import datetime as dt
import pytz
_OPER_ERR_MSG = """unsupported operand.
datetimestat requires Iterable of datetime objects (all naive or all with tz). You passed:
"""
_LEN_ERR_MSG = """zero-length operand
datetimestat requires Iterable of length greater than zero. You passed an empty iterable.
"""
_VAL_ERR_MSG = """unsupported value.
datetimestat requires Iterable of datetime objects. Iterable contained:
"""
def validate_dt(candidate):
"""
.. py:function:: validate_dt(candidate)
If candidate is a datetime object, return it. Otherwise raise a TypeError.
:param object candidate: object to validate
:return: candidate (if validated)
:rtype: datetime.datetime
:raises TypeError: if object is not of type datetime.datetime
"""
if not isinstance(candidate, dt.datetime):
raise TypeError(_VAL_ERR_MSG + str(candidate))
else:
return candidate
def mean(dt_list):
"""
.. py:function:: mean(dt_list)
Returns the mean datetime from an Iterable collection of datetime objects.
Collection can be all naive datetime objects or all datatime objects with tz
(if non-naive datetimes are provided, result will be cast to UTC).
However, collection cannot be a mix of naive and non-naive datetimes.
Can handle micro-second level datetime differences. Can handle Collection of
datetime objects with different timezones. Works with lists or pandas.Series.
:param collection.Iterable dt_list: Iterable list or Series of datetime objects
:return: mean datetime
:rtype: datetime.datetime
:raises TypeError: if operand is not type Iterable or
if operand contains naive and non-naive datetime objects or
if result is not type datetime.datetime
"""
try:
list_size = len(dt_list)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
if list_size == 1:
mean_dt = dt_list[0]
elif (list_size == 2) and (dt_list[0] == dt_list[1]):
mean_dt = dt_list[0]
else:
try:
if dt_list[0].tzinfo:
base_dt = dt.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
else:
base_dt = dt.datetime(1970, 1, 1)
delta_total = 0
for item in dt_list:
delta_total += (item - base_dt).total_seconds()
delta = delta_total / float(list_size)
mean_dt = base_dt + dt.timedelta(seconds=delta)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
except IndexError:
raise IndexError(_LEN_ERR_MSG)
return validate_dt(mean_dt)
def median(dt_list):
"""
.. py:function:: median(dt_list)
Returns the median datetime from an Iterable collection of datetime objects.
Collection can be all naive datetime objects or all datatime objects with tz.
If non-naive datetimes are provided and list size is odd, result will be
middle-most datetime (with whatever time zone is provided). If list size is
even, result will be datetimestats.mean of two middle-most values (in UTC).
However, collection cannot be a mix of naive and non-naive datetimes.
Includes short-circuiting steps to speed computations on small collections.
If Collection has even number of elements it will return the mean of inner
two middle values.
Can handle micro-second level datetime differences. Can handle Collection of
datetime objects with different timezones. Works with lists or pandas.Series.
:param collection.Iterable dt_list: Iterable list or Series of datetime objects
:return: median datetime
:rtype: datetime.datetime
:raises TypeError: if operand is not type Iterable or
if operand contains naive and non-naive datetime objects or
if result is not type datetime.datetime
"""
try:
sorted_dt_list = sorted(dt_list)
list_size = len(sorted_dt_list)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
if list_size == 0:
raise IndexError(_LEN_ERR_MSG)
elif list_size == 1:
median_dt = sorted_dt_list[0]
elif list_size == 2:
median_dt = mean(sorted_dt_list)
elif list_size % 2:
middle = list_size >> 1
median_dt = sorted_dt_list[middle]
else:
upper = list_size >> 1
lower = upper - 1
middle = [sorted_dt_list[lower], sorted_dt_list[upper]]
median_dt = mean(middle)
return validate_dt(median_dt)
def min(dt_list):
"""
.. py:function:: min(dt_list)
Returns the earliest datetime from an Iterable collection of datetime objects.
Collection can be all naive datetime objects or all datatime objects with tz.
However, collection cannot be a mix of naive and non-naive datetimes.
Can handle micro-second level datetime differences. Can handle Collection of
datetime objects with different timezones. Works with lists or pandas.Series.
:param collection.Iterable dt_list: Iterable list or Series of datetime objects
:return: min datetime
:rtype: datetime.datetime
:raises TypeError: if operand is not type Iterable or
if operand contains naive and non-naive datetime objects or
if result is not type datetime.datetime
"""
try:
sorted_dt_list = sorted(dt_list)
return validate_dt(sorted_dt_list[0])
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
def max(dt_list):
"""
.. py:function:: max(dt_list)
Returns the latest datetime from an Iterable collection of datetime objects.
Collection can be all naive datetime objects or all datatime objects with tz.
However, collection cannot be a mix of naive and non-naive datetimes.
Can handle micro-second level datetime differences. Can handle Collection of
datetime objects with different timezones. Works with lists or pandas.Series.
:param collection.Iterable dt_list: Iterable list or Series of datetime objects
:return: min datetime
:rtype: datetime.datetime
:raises TypeError: if operand is not type Iterable or
if operand contains naive and non-naive datetime objects or
if result is not type datetime.datetime
"""
try:
sorted_dt_list = sorted(dt_list)
return validate_dt(sorted_dt_list[-1])
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
``` |
{
"source": "JimHaughwout/geo_utils",
"score": 3
} |
#### File: JimHaughwout/geo_utils/geocode_addresses.py
```python
from sys import argv, exit, exc_info
from pygeocoder import Geocoder
from time import sleep
from csv import DictReader, DictWriter
import getopt
# INPUT SETTINGS - Infile Attributes
# Set these to the requesite column names used in your input CSV
# ST_NUM_KEY, POSTAL_KEY and COUNTRY_KEY are optional
ST_NUM_KEY = None
STREET_KEY = 'stp_address'
CITY_KEY = 'cty_name'
STATE_KEY = 'stp_state'
POSTAL_KEY = 'stp_zipcode'
COUNTRY_KEY = None
def geocode_address(address="77 Massachusetts Avenue, Cambridge, MA"):
"""
Geocode an address query
:param string address: the address you which to encode
Result is class GeocoderResult with following useful properties:
lat, lng = result.coordinates
latitude = result.latitude
longitude = result.longitude
street_number = result.street_number
street = result.route
city/locality = result.city
county = result.county
neighborhood = result.neighborhood
state = result.state
province = result.province
postal_code = result.postal_code
country = result.country
formatted_address = result.formatted_address
valid_address is TRUE or FALSE
:returns GeocoderResult result: Resulting pygeocoder object
"""
assert isinstance(address, str), "geocode_address TypeError: Did not pass a str: %s" % addr
try:
result = Geocoder.geocode(address)
except: #Catch all extraneous exceptions and exit
e = exc_info()[1]
print "Geocoder %s for %s" % (e, address)
result = None
return result
def process_address(addr):
"""
Processes an address dictionary:
- Extracts address for geocoding based on INPUT SETTINGS
- Geocodes it
- Append the resulting geocoding features to the dict
:param dict addr: address dictionary from CSV import
:returns dict addr: original address dict appended with geocode features
:returns boolean geocodeable: True=Geocodable address, False=Not
"""
assert isinstance(addr, dict), "process_address TypeError: Did not pass a valid dict: %s" % addr
result = geocode_address(extract_address(addr))
if result == None: geocodeable = False
else: geocodeable = result.valid_address
addr['geocodable'] = geocodeable
addr['g_latitude'] = result.latitude if geocodeable else None
addr['g_longitude'] = result.longitude if geocodeable else None
addr['g_street_num'] = result.street_number if geocodeable else None
addr['g_street'] = result.route if geocodeable else None
addr['g_city'] = result.city if geocodeable else None
addr['g_county'] = result.county if geocodeable else None
addr['g_neighborhood'] = result.neighborhood if geocodeable else None
addr['g_state'] = result.state if geocodeable else None
addr['g_province'] = result.province if geocodeable else None
addr['g_postal_code'] = result.postal_code if geocodeable else None
addr['g_country'] = result.country if geocodeable else None
addr['g_formatted_address'] = result.formatted_address if geocodeable else None
return addr, geocodeable
def extract_address(csv_row):
"""
Build addres string for geocoding from CSV dict keys
Exits with error if required fields are not present.
:param dict csv_row: Imported CSV row as a dict (via csv.DictReader)
:returns dict address: Resulting extracted 1-line address for CSV row
"""
assert isinstance(csv_row, dict), "extract_address TypeError: Did not pass a valid dict: %s" % csv_row
try:
address = ""
if ST_NUM_KEY: address += "%s " % csv_row[ST_NUM_KEY]
address += "%s, %s, %s" % (csv_row[STREET_KEY], csv_row[CITY_KEY], csv_row[STATE_KEY])
if POSTAL_KEY: address += " %s" % csv_row[POSTAL_KEY]
if COUNTRY_KEY: address += ", %s" % csv_row[COUNTRY_KEY]
except:
e = exc_info()[1]
exit("build_address Error: %s\nCould not build address from %s" % (e, csv_row))
return address
def print_usage_and_exit(msg=None):
"""
Pretty print exit on error
:param str msg: Message to append to show user.
"""
print "\nUsage: python %s [-s][-h]" % argv[0]
print "\t-h Print usage help"
print "\t-s Source CSV file - Required"
if msg: print msg + "\n"
exit(1)
def parse_opts(argv):
"""
Parse opts, ensure we have required opts to determine mode, source, target.
Checks if source file is a .csv. Generated outfile based on infile name.
:param list argv: Arguments passed on Python invocation
:returns str infile: CSV input filename
:returns str outfile: Generated CSV output filename
"""
infile = None
try:
opts, args = getopt.getopt(argv, "hs:")
if not opts:
print_usage_and_exit('No options supplied')
except getopt.GetoptError as e:
print_usage_and_exit('Could not parse options: %s' % e)
for opt, arg in opts:
if opt == '-h':
print_usage_and_exit()
elif opt == '-s':
infile = arg
if not(infile):
print_usage_and_exit('-s source_file not specified')
elif infile[-4:] != '.csv':
print_usage_and_exit('source_file is not a .csv file')
else:
outfile = infile[:-4] + '_geocoded.csv'
return infile, outfile
def main(argv):
"""
Main method. Reads in file based on INPUT SETTINGS
Geocodes addresses and adds geocoder attributes to output
Write alls to target csv and prints summary.
"""
infile, outfile = parse_opts(argv)
# Build a list of processed addresses
processed_list = list()
address_count = 0
geocodeable_count = 0
try:
with open(infile) as csv_infile:
reader = DictReader(csv_infile)
print "Importing from %s" % infile
for csv_row in reader:
sleep(0.2) # So we do not exceed 10 / second API limit
print '.',
addr_result, geocodeable = process_address(csv_row)
processed_list.append(addr_result)
address_count += 1
if geocodeable: geocodeable_count += 1
except IOError as e:
print_usage_and_exit(("\nCould not open %r to read: %s" % (infile, e)))
# Write results to file (original plus new geocoder attriutes)
try:
with open(outfile, 'wb') as csv_outfile:
target = DictWriter(csv_outfile, processed_list[0].keys())
target.writeheader()
target.writerows(processed_list)
except IOError as e:
print_usage_and_exit(("\nCould not open %r to write: %s" % (outfile, e)))
geocode_rate = float(geocodeable_count) / float(address_count) * 100.0
print "\nImported %d records. %d (%.2f %%) were geocodeable." % (address_count,
geocodeable_count, geocode_rate)
print "Wrote results to %s." % outfile
if __name__ == '__main__':
main(argv[1:])
``` |
{
"source": "JimHaughwout/roll_stats",
"score": 4
} |
#### File: JimHaughwout/roll_stats/roll_it.py
```python
from random import randint
from itertools import repeat
"""
Implementing parts of
https://www.geeknative.com/61483/12-different-ways-roll-dnd-character/
"""
def best_rolls(num_rolls, top=3, discard_ones=False, base=6, advantage=True):
"""
Returns sum of top number of specified roles for a given base
"""
rolls = list()
for _ in repeat(None, num_rolls):
if discard_ones:
rolls.append(randint(2, base))
else:
rolls.append(randint(1, base))
rolls.sort(reverse=advantage)
return sum(rolls[:top])
def three_d6(count=6):
"""
Roll 3d6 and assign the totals to your attributes in any order you want
"""
stats = list()
for _ in repeat(None, count):
roll = best_rolls(num_rolls=3, top=3, discard_ones=False)
stats.append(roll)
stats.sort(reverse=True)
return stats
def four_d6_drop_worst(count=6):
"""
Roll 4d6, drop the lowest in each roll
"""
stats = list()
for _ in repeat(None, count):
roll = best_rolls(num_rolls=4, top=3, discard_ones=False)
stats.append(roll)
stats.sort(reverse=True)
return stats
def five_d6_drop_worst(count=6):
"""
Roll 3d6 and assign the totals to your attributes in any order you want
"""
stats = list()
for _ in repeat(None, count):
roll = best_rolls(num_rolls=5, top=3, discard_ones=False)
stats.append(roll)
stats.sort(reverse=True)
return stats
def four_d6_drop_worst_best_of_n(tries= 8, top=6):
"""
Roll 4d6, drop the lowest die in each roll and do it 8 times.
Assign the best 6 totals to your attributes
"""
stats = list()
for _ in repeat(None, tries):
roll = best_rolls(num_rolls=4, top=3, discard_ones=False)
stats.append(roll)
stats.sort(reverse=True)
return stats[:top]
def four_d6_reroll_below_n(count=6, min_stat=8):
"""
Roll 4d6, drop the lowest die and
re-roll any total that is below 8.
"""
stats = list()
for _ in repeat(None, count):
roll = min_stat - 1
while (roll < min_stat):
roll = best_rolls(num_rolls=4, top=3, discard_ones=False)
stats.append(roll)
stats.sort(reverse=True)
return stats
def four_d6_replace_below_n(count=6, min_stat=8):
"""
Roll 4d6, drop the lowest die and
re-roll any total that is below 8.
"""
stats = list()
for _ in repeat(None, count):
roll = best_rolls(num_rolls=4, top=3, discard_ones=False)
stats.append(max(roll, min_stat))
stats.sort(reverse=True)
return stats
def reroll_if_no_stat_above_x(x=15):
"""
Roll 4d6, drop the lowest die
but reroll the entire collection if no total is above 15.
"""
max_stat = 0
while (max_stat < x):
stats = four_d6_drop_worst()
max_stat = max(stats)
return stats
def reroll_if_total_below_x(x=70):
"""
Roll 4d6, drop the lowest die and reroll the lowest total
until the cumulative total value is over 70 (or 75, etc)
"""
total = 0
while (total < x):
stats = four_d6_drop_worst()
total = sum(stats)
return stats
def four_d6_drop_worst_reroll_1s(count=6):
"""
Roll 4d6, reroll 1s and drop the lowest die
"""
stats = list()
for _ in repeat(None, count):
roll = best_rolls(num_rolls=4, top=3, discard_ones=True)
stats.append(roll)
stats.sort(reverse=True)
return stats
def four_d6_drop_worst_only_one_above_x(x=16, count=6):
"""
Roll 4d6s, drop the lowest and assign as required
however only one stat can be 16 or higher.
Note: This could iterate many times, but should be fast enough for humans
"""
stats = list()
count_above_max = 0
while(count_above_max != 1):
stats = four_d6_drop_worst()
count_above_max = sum(i > x for i in stats)
return stats
def roll_2d6_plus6(count=6):
"""
Roll 2d6+6 and assign as required
"""
stats = list()
for _ in repeat(None, count):
stat = best_rolls(num_rolls=2, top=2, discard_ones=False, base=6) + 6
stats.append(stat)
stats.sort(reverse=True)
return stats
## Try and print all techniques
print "3d6: \t\t\t\t", three_d6()
print "4d6: \t\t\t\t", four_d6_drop_worst()
print "5d6: \t\t\t\t", five_d6_drop_worst()
print "2d6+6\t\t\t\t", roll_2d6_plus6()
print "4d6 (6 of 8):\t\t\t", four_d6_drop_worst_best_of_n()
print "4d6, reroll below 8:\t\t", four_d6_reroll_below_n()
print "4d6, never below 8:\t\t", four_d6_replace_below_n()
print "4d6, reroll if none > 15:\t", reroll_if_no_stat_above_x()
print "4d6, reroll if total < 70:\t", reroll_if_total_below_x()
print "4d6, reroll 1s:\t\t\t", four_d6_drop_worst_reroll_1s()
print "4d6, only one can be > 16:\t", four_d6_drop_worst_only_one_above_x()
``` |
{
"source": "JimHaughwout/utils",
"score": 4
} |
#### File: JimHaughwout/utils/csv_io.py
```python
import csv
import json
"""
CSV I/O. Reads in CSV, converts rows and writes out
Uses a generator pattern for memory efficiency
"""
INFILE = 'foo.csv'
OUTFILE = 'bar.csv'
def isodate_it(x):
"""
Excel dumps all date times to CSV in crappy m/d/y H:M format.
This parses this Excel format into an ISO-8601 datetime in UTC
"""
try:
dt, ts = x.split()
m, d, y = dt.split('/')
H, M = ts.split(':')
x_prime = "%s-%s-%s %s:%s:00.000" % (y.zfill(2), m.zfill(2), d.zfill(2), H.zfill(2), M.zfill(2))
y = datetime.datetime.strptime(x_prime, "%y-%m-%d %H:%M:%S.%f").strftime("%Y-%m-%dT%H:%M:%S.000Z")
return y
except:
print_usage_and_exit("Could not convert to ISO date:\n%r\n\n" % x)
def covert(row):
row['ts'] = isodate_it(row['ts'])
return row
row_cnt = 1
with open(OUTFILE, 'wb') as target:
writer = csv.writer(target)
with open(INFILE, 'r') as source:
data = csv.DictReader(source)
for row in data:
if row_cnt > 1:
writer.writerow(row.values())
else:
writer.writerow(row.keys())
writer.writerow(row.values())
row_cnt += 1
``` |
{
"source": "jimhester/arrow",
"score": 3
} |
#### File: pyarrow/tests/pandas_examples.py
```python
from collections import OrderedDict
import numpy as np
import pandas as pd
import pyarrow as pa
def dataframe_with_arrays():
"""
Dataframe with numpy arrays columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
"""
dtypes = [('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('f4', pa.float32()), ('f8', pa.float64())]
arrays = OrderedDict()
fields = []
for dtype, arrow_dtype in dtypes:
fields.append(pa.field(dtype, pa.list_(arrow_dtype)))
arrays[dtype] = [
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
]
fields.append(pa.field('str', pa.list_(pa.string())))
arrays['str'] = [
np.array([u"1", u"ä"], dtype="object"),
None,
np.array([u"1"], dtype="object"),
np.array([u"1", u"2", u"3"], dtype="object")
]
fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms'))))
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.Schema.from_fields(fields)
return df, schema
def dataframe_with_lists():
"""
Dataframe with list columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
"""
arrays = OrderedDict()
fields = []
fields.append(pa.field('int64', pa.list_(pa.int64())))
arrays['int64'] = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
None,
[0]
]
fields.append(pa.field('double', pa.list_(pa.float64())))
arrays['double'] = [
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.],
None,
[0.]
]
fields.append(pa.field('str_list', pa.list_(pa.string())))
arrays['str_list'] = [
[u"1", u"ä"],
None,
[u"1"],
[u"1", u"2", u"3"]
]
df = pd.DataFrame(arrays)
schema = pa.Schema.from_fields(fields)
return df, schema
```
#### File: pyarrow/tests/test_array.py
```python
import pytest
import sys
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pyarrow as pa
import pyarrow.formatting as fmt
def test_total_bytes_allocated():
assert pa.total_allocated_bytes() == 0
def test_repr_on_pre_init_array():
arr = pa.Array()
assert len(repr(arr)) > 0
def test_getitem_NA():
arr = pa.from_pylist([1, None, 2])
assert arr[1] is pa.NA
def test_list_format():
arr = pa.from_pylist([[1], None, [2, 3, None]])
result = fmt.array_format(arr)
expected = """\
[
[1],
NA,
[2,
3,
NA]
]"""
assert result == expected
def test_string_format():
arr = pa.from_pylist(['', None, 'foo'])
result = fmt.array_format(arr)
expected = """\
[
'',
NA,
'foo'
]"""
assert result == expected
def test_long_array_format():
arr = pa.from_pylist(range(100))
result = fmt.array_format(arr, window=2)
expected = """\
[
0,
1,
...
98,
99
]"""
assert result == expected
def test_to_pandas_zero_copy():
import gc
arr = pa.from_pylist(range(10))
for i in range(10):
np_arr = arr.to_pandas()
assert sys.getrefcount(np_arr) == 2
np_arr = None # noqa
assert sys.getrefcount(arr) == 2
for i in range(10):
arr = pa.from_pylist(range(10))
np_arr = arr.to_pandas()
arr = None
gc.collect()
# Ensure base is still valid
# Because of py.test's assert inspection magic, if you put getrefcount
# on the line being examined, it will be 1 higher than you expect
base_refcount = sys.getrefcount(np_arr.base)
assert base_refcount == 2
np_arr.sum()
def test_array_slice():
arr = pa.from_pylist(range(10))
sliced = arr.slice(2)
expected = pa.from_pylist(range(2, 10))
assert sliced.equals(expected)
sliced2 = arr.slice(2, 4)
expected2 = pa.from_pylist(range(2, 6))
assert sliced2.equals(expected2)
# 0 offset
assert arr.slice(0).equals(arr)
# Slice past end of array
assert len(arr.slice(len(arr))) == 0
with pytest.raises(IndexError):
arr.slice(-1)
# Test slice notation
assert arr[2:].equals(arr.slice(2))
assert arr[2:5].equals(arr.slice(2, 3))
assert arr[-5:].equals(arr.slice(len(arr) - 5))
with pytest.raises(IndexError):
arr[::-1]
with pytest.raises(IndexError):
arr[::2]
def test_dictionary_from_numpy():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
mask = np.array([False, False, True, False, False, False])
d1 = pa.DictionaryArray.from_arrays(indices, dictionary)
d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)
for i in range(len(indices)):
assert d1[i].as_py() == dictionary[indices[i]]
if mask[i]:
assert d2[i] is pa.NA
else:
assert d2[i].as_py() == dictionary[indices[i]]
def test_dictionary_from_boxed_arrays():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
iarr = pa.Array.from_numpy(indices)
darr = pa.Array.from_numpy(dictionary)
d1 = pa.DictionaryArray.from_arrays(iarr, darr)
for i in range(len(indices)):
assert d1[i].as_py() == dictionary[indices[i]]
def test_dictionary_with_pandas():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
mask = np.array([False, False, True, False, False, False])
d1 = pa.DictionaryArray.from_arrays(indices, dictionary)
d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)
pandas1 = d1.to_pandas()
ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary)
tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1))
pandas2 = d2.to_pandas()
ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1, indices),
categories=dictionary)
tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2))
```
#### File: pyarrow/tests/test_schema.py
```python
import pytest
import pyarrow as pa
import numpy as np
# XXX: pyarrow.schema.schema masks the module on imports
sch = pa._schema
def test_type_integers():
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']
for name in dtypes:
factory = getattr(pa, name)
t = factory()
assert str(t) == name
def test_type_list():
value_type = pa.int32()
list_type = pa.list_(value_type)
assert str(list_type) == 'list<item: int32>'
def test_type_string():
t = pa.string()
assert str(t) == 'string'
def test_type_timestamp_with_tz():
tz = 'America/Los_Angeles'
t = pa.timestamp('ns', tz=tz)
assert t.unit == 'ns'
assert t.tz == tz
def test_type_from_numpy_dtype_timestamps():
cases = [
(np.dtype('datetime64[s]'), pa.timestamp('s')),
(np.dtype('datetime64[ms]'), pa.timestamp('ms')),
(np.dtype('datetime64[us]'), pa.timestamp('us')),
(np.dtype('datetime64[ns]'), pa.timestamp('ns'))
]
for dt, pt in cases:
result = sch.type_from_numpy_dtype(dt)
assert result == pt
def test_field():
t = pa.string()
f = pa.field('foo', t)
assert f.name == 'foo'
assert f.nullable
assert f.type is t
assert repr(f) == "Field('foo', type=string)"
f = pa.field('foo', t, False)
assert not f.nullable
def test_schema():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
sch = pa.schema(fields)
assert len(sch) == 3
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
assert sch.field_by_name('foo').name == 'foo'
assert sch.field_by_name('foo').type == fields[0].type
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>"""
def test_field_empty():
f = pa.Field()
with pytest.raises(ReferenceError):
repr(f)
def test_schema_equals():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
sch1 = pa.schema(fields)
print(dir(sch1))
sch2 = pa.schema(fields)
assert sch1.equals(sch2)
del fields[-1]
sch3 = pa.schema(fields)
assert not sch1.equals(sch3)
``` |
{
"source": "jim-hill-r/BlueSteel",
"score": 2
} |
#### File: frontend/deploy/deployConfigure.py
```python
from datetime import datetime
from common import uploadDirectoryToS3
def deploy():
release = datetime.utcnow().isoformat()
print ('Deploying configure release... ' + release)
uploadDirectoryToS3('../src/configure/dist/pwa','eel3-app','configure/' + release,'us-east-2')
print (release + ' successfully deployed.')
if __name__ == '__main__':
deploy()
```
#### File: infra/provision/s3.py
```python
import json
try:
import boto3
except ImportError:
print(f'Error: boto3 is required. Please install.')
print(f'Try: pip install boto3')
def provision(bucket, region):
s3 = boto3.client('s3', region_name=region)
print ('Creating app s3 bucket...')
try:
location = {'LocationConstraint': region}
s3.create_bucket(Bucket=bucket,CreateBucketConfiguration=location)
except:
print('Exception: Probably already exists')
#TODO: Check that this is already exists and ignore
website_configuration = {
'ErrorDocument': {'Key': 'error.html'},
'IndexDocument': {'Suffix': 'index.html'},
}
policy = {
'Version': '2012-10-17',
'Statement': [{
'Sid': 'AddPerm',
'Effect': 'Allow',
'Principal': '*',
'Action': ['s3:GetObject'],
'Resource': f'arn:aws:s3:::{bucket}/*'
}]
}
# Convert the policy from JSON dict to string
policy = json.dumps(policy)
# Set the website configuration
s3 = boto3.client('s3')
s3.put_bucket_website(Bucket=bucket,WebsiteConfiguration=website_configuration)
s3.put_bucket_policy(Bucket=bucket, Policy=policy)
if __name__ == '__main__':
provision('eel3-app','us-east-2')
``` |
{
"source": "jimhoekstra/machine-learning",
"score": 2
} |
#### File: machine_learning_jh/metrics/mse.py
```python
import numpy as np
def mse(y_true, y_pred):
return np.mean((y_true - y_pred)**2)
``` |
{
"source": "jimholdaway/pythonista",
"score": 3
} |
#### File: pythonista/extensions/Web Image.py
```python
import appex
import photos
import os
import piexif
from PIL import Image
def main():
"""
Script for iOS share extensions in Photos app to remove EXIF data from image when shared. Original image is preserved. Image with no EXIF is saved as JPEG to camera roll with creation date and time as filename.
Requires piexif. Add piexif to the site_packages_3 folder or use stash to use pip
"""
if not appex.is_running_extension():
print("This extension is designed to work with iOS share functionality, select and share photos from the Photos app")
else:
img = appex.get_image(image_type="pil")
if img:
if "exif" in img.info:
exif_dict = piexif.load(img.info["exif"])
if piexif.ImageIFD.Orientation in exif_dict["0th"]:
orientation = exif_dict["0th"][piexif.ImageIFD.Orientation]
if piexif.ExifIFD.DateTimeOriginal in exif_dict["Exif"]:
date_time = exif_dict["Exif"][piexif.ExifIFD.DateTimeOriginal]
path = date_time.decode("utf-8").replace(' ', '')
path = path.replace(':', '')
path = f"{path}.jpg"
else:
path = "web_image.jpg"
print('No EXIF data')
if orientation == 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
img = img.rotate(180)
elif orientation == 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 5:
img = img.rotate(-90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
img = img.rotate(-90, expand=True)
elif orientation == 7:
img = img.rotate(90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
img = img.rotate(90, expand=True)
img.save(path, quality = 95)
photos.create_image_asset(path)
os.remove(path)
else:
print('No input image found')
if __name__ == '__main__':
main()
```
#### File: scripts/jpg_exif_remove/jpg_exif_remove.py
```python
import sys
import argparse
import os
import piexif
from PIL import Image
def jpg_exif_remove(args):
"""
Removes EXIF data from directory of jpeg images whilst preserving image orientation and quality.
Parameters
----------
in_dir: string, directory containing jpgs to have EXIF removed, default 'images'
out_dir: string, destination directory of cleaned jpgs, default 'images_cleaned'
abs_path: boolean, if True in_dir and out_dir must be full absolute paths
"""
# Set args
in_dir = args.in_dir
out_dir = args.out_dir
abs_path = args.abs_path
# Set paths according to arg boolean
if (abs_path == True):
in_path = in_dir
out_path = out_dir
elif (abs_path == False):
in_path = os.getcwd() + "/" + in_dir + "/"
out_path = os.getcwd() + "/" + out_dir + "/"
else:
print("Option 'abs_path' must be boolean")
# Check if output path exists, create if not
try:
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Loop through files in input path directory
for filename in os.listdir(in_path):
img = Image.open(in_path + filename)
# Check if image has EXIF data
if "exif" in img.info:
exif_dict = piexif.load(img.info["exif"])
# Check if EXIF data has orientation entry
if piexif.ImageIFD.Orientation in exif_dict["0th"]:
orientation = exif_dict["0th"].pop(piexif.ImageIFD.Orientation)
# Rotate according to orientation entry
if orientation == 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
img = img.rotate(180)
elif orientation == 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 5:
img = img.rotate(-90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
img = img.rotate(-90, expand=True)
elif orientation == 7:
img = img.rotate(90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
img = img.rotate(90, expand=True)
# Save image without EXIF, with max useful quality, no subsampling
img.save(out_path + filename, quality = 95, subsampling = 0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Open JPEGs in a directory, removes EXIF data and saves to another directory. If default arguments are not used, ALL arguments must be set.")
parser.add_argument("in_dir", nargs='?', type=str, default="images", help="Path to directory of images to have EXIF removed")
parser.add_argument("out_dir", nargs='?', type=str, default="images_cleaned", help="Path of directory to save EXIF removed images too")
parser.add_argument("abs_path", nargs='?', type=bool, default=False, help="Set to true is absolute path to be used")
args = parser.parse_args()
jpg_exif_remove(args)
``` |
{
"source": "Jim-Holmstroem/guildai",
"score": 2
} |
#### File: guild/commands/publish_impl.py
```python
from __future__ import absolute_import
from __future__ import division
import os
from guild import batch_util
from guild import cli
from guild import publish as publishlib
from guild import util
from . import runs_impl
def publish(args, ctx=None):
if args.files and args.all_files:
cli.error("--files and --all-files cannot both be used")
if args.refresh_index:
_refresh_publish_index(args)
else:
_publish(args, ctx)
_report_dir_size(args)
def _publish(args, ctx):
preview = "You are about to publish the following run(s) to %s:" % (
args.dest or publishlib.DEFAULT_DEST_HOME
)
confirm = "Continue?"
no_runs = "No runs to publish."
def publish_f(runs, formatted):
_publish_runs(runs, formatted, args)
_refresh_publish_index(args, no_dest=True)
def select_runs_f(args, ctx, default_runs_arg, force_deleted):
runs = runs_impl.runs_op_selected(args, ctx, default_runs_arg, force_deleted)
return [
run for run in runs if args.include_batch or not batch_util.is_batch(run)
]
runs_impl.runs_op(
args,
ctx,
False,
preview,
confirm,
no_runs,
publish_f,
runs_impl.ALL_RUNS_ARG,
True,
select_runs_f,
)
def _publish_runs(runs, formatted, args):
if args.all_files:
copy_files = publishlib.COPY_ALL_FILES
elif args.files or args.include_links:
copy_files = publishlib.COPY_DEFAULT_FILES
else:
copy_files = None
for run, frun in zip(runs, formatted):
cli.out(
"Publishing [%s] %s... " % (frun["short_id"], frun["operation"]), nl=False
)
frun["_run"] = run
try:
publishlib.publish_run(
run,
dest=args.dest,
template=args.template,
copy_files=copy_files,
include_links=args.include_links,
md5s=not args.no_md5,
formatted_run=frun,
)
except publishlib.PublishError as e:
cli.error("error publishing run %s:\n%s" % (run.id, e))
else:
dest = args.dest or publishlib.DEFAULT_DEST_HOME
size = util.dir_size(os.path.join(dest, run.id))
cli.out("using %s" % util.format_bytes(size))
def _refresh_publish_index(args, no_dest=False):
if no_dest:
dest_suffix = ""
else:
dest_suffix = " in %s" % (args.dest or publishlib.DEFAULT_DEST_HOME)
print("Refreshing runs index%s" % dest_suffix)
publishlib.refresh_index(args.dest)
def _report_dir_size(args):
dest = args.dest or publishlib.DEFAULT_DEST_HOME
size = util.dir_size(dest)
cli.out("Published runs using %s" % util.format_bytes(size))
```
#### File: guild/commands/watch_impl.py
```python
from __future__ import absolute_import
from __future__ import division
import logging
import os
import re
import sys
import time
import psutil
from guild import cli
from guild import run as runlib
from guild import util
from guild import var
from . import remote_impl_support
from . import runs_impl
log = logging.getLogger("guild")
def main(args, ctx):
if args.pid:
_check_non_pid_args(args)
_watch_pid(args)
elif args.remote:
_watch_remote(args)
elif args.run:
run = runs_impl.one_run(args, ctx)
_watch_run(run)
else:
_watch_default_running(args)
def _check_non_pid_args(args):
if args.run or args.ops or args.labels or args.unlabeled:
cli.error("--pid may not be used with other options")
def _watch_pid(args):
run = _run_for_pid_arg(args.pid)
_watch_run(run)
def _run_for_pid_arg(pid):
return util.find_apply(
[
_run_for_job_pidfile,
_run_for_pidfile,
_run_for_pid,
_handle_no_run_for_pid_arg,
],
pid,
)
def _run_for_job_pidfile(pid_arg):
m = re.search(r"(.+)/\.guild/JOB$", pid_arg)
if not m:
return None
run_dir = m.group(1)
return runlib.for_dir(run_dir)
def _run_for_pidfile(pid_arg):
pid = _read_pid(pid_arg)
if pid is None:
return None
return _run_for_pid(pid)
def _read_pid(path):
try:
f = open(path, "r")
except IOError as e:
if e.errno != 2:
raise
return None
else:
raw = f.readline().strip()
try:
return int(raw)
except ValueError:
cli.error("pidfile %s does not contain a valid pid" % path)
def _run_for_pid(pid):
pid = _try_int(pid)
if pid is None:
return None
for run_id, run_dir in var.iter_run_dirs():
run = runlib.Run(run_id, run_dir)
if run.pid and (run.pid == pid or _parent_pid(run.pid) == pid):
return run
cli.error("cannot find run for pid %i" % pid)
def _try_int(pid):
try:
return int(pid)
except ValueError:
return None
def _parent_pid(pid):
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess:
return None
else:
return p.parent().pid
def _handle_no_run_for_pid_arg(pid_arg):
# Assume pid_arg is a pidfile path.
cli.error("%s does not exist" % pid_arg)
def _watch_run(run):
try:
_tail(run)
_print_run_status(run)
except KeyboardInterrupt:
_stopped_msg(run)
def _stopped_msg(run):
msg = "\nStopped watching %s" % run.id
if run.pid and psutil.Process(run.pid).is_running():
msg += " (still running)"
cli.out(msg)
def _tail(run):
if os.getenv("NO_WATCHING_MSG") != "1":
cli.out("Watching run %s" % run.id, err=True)
if run.pid is None:
_print_output(run)
return
proc = psutil.Process(run.pid)
output_path = run.guild_path("output")
f = _wait_for_output(proc, output_path)
if not f:
return
with f:
while True:
line = f.readline()
if line:
sys.stdout.write(line)
sys.stdout.flush()
elif proc.is_running():
time.sleep(0.1)
else:
break
def _wait_for_output(proc, output_path):
while proc.is_running():
f = _try_open(output_path)
if f:
return f
time.sleep(1.0)
return _try_open(output_path)
def _print_output(run):
output_path = run.guild_path("output")
f = _try_open(output_path)
if not f:
return
while True:
line = f.readline()
if not line:
break
sys.stdout.write(line)
sys.stdout.flush()
def _try_open(path):
try:
return open(path, "r")
except (IOError, OSError) as e:
if e.errno != 2:
raise
return None
def _print_run_status(run):
cli.out("Run %s stopped with a status of '%s'" % (run.id, run.status), err=True)
def _watch_default_running(args):
args.running = True
runs = runs_impl.filtered_runs(args)
if not runs:
cli.error(
"nothing to watch\n"
"You can view the output of a specific run using "
"'guild watch RUN'."
)
_watch_run(runs[0])
def _watch_remote(args):
try:
remote_impl_support.watch_run(args)
except KeyboardInterrupt:
cli.out("\nStopped watching remote run")
```
#### File: guildai/guild/deps.py
```python
from __future__ import absolute_import
from __future__ import division
import logging
import os
import re
from guild import namespace
from guild import resolver as resolverlib
from guild import resource
from guild import util
log = logging.getLogger("guild")
RESOURCE_TERM = r"[a-zA-Z0-9_\-\.]+"
class DependencyError(Exception):
pass
class ResolutionContext(object):
def __init__(self, target_dir, opdef, resource_config):
self.target_dir = target_dir
self.opdef = opdef
self.resource_config = resource_config
class Resource(object):
def __init__(self, resdef, location, ctx):
self.resdef = resdef
self.location = location
self.ctx = ctx
self.config = self._init_resource_config()
self.dependency = None
def _init_resource_config(self):
for name, config in self.ctx.resource_config.items():
if name in [self.resdef.fullname, self.resdef.name]:
return config
return None
def resolve(self, unpack_dir=None):
resolved_acc = []
for source in self.resdef.sources:
paths = self.resolve_source(source, unpack_dir)
resolved_acc.extend(paths)
return resolved_acc
def resolve_source(self, source, unpack_dir=None):
resolver = resolverlib.for_resdef_source(source, self)
if not resolver:
raise DependencyError(
"unsupported source '%s' in %s resource" % (source, self.resdef.name)
)
try:
source_paths = resolver.resolve(unpack_dir)
except resolverlib.ResolutionError as e:
msg = "could not resolve '%s' in %s resource: %s" % (
source,
self.resdef.name,
e,
)
if source.help:
msg += "\n%s" % source.help
raise DependencyError(msg)
except Exception as e:
log.exception(
"resolving required source '%s' in %s resource",
source,
self.resdef.name,
)
raise DependencyError(
"unexpected error resolving '%s' in %s resource: %r"
% (source, self.resdef.name, e)
)
else:
for path in source_paths:
self._link_to_source(path, source)
return source_paths
def _link_to_source(self, source_path, source):
source_path = util.strip_trailing_sep(source_path)
link = self._link_path(source_path, source)
_symlink(source_path, link)
def _link_path(self, source_path, source):
basename = os.path.basename(source_path)
res_path = self.resdef.path or ""
if source.path:
res_path = os.path.join(res_path, source.path)
if os.path.isabs(res_path):
raise DependencyError(
"invalid path '%s' in %s resource (path must be relative)"
% (res_path, self.resdef.name)
)
if source.rename:
basename = _rename_source(basename, source.rename)
return os.path.join(self.ctx.target_dir, res_path, basename)
def _rename_source(name, rename):
for spec in rename:
try:
renamed = re.sub(spec.pattern, spec.repl, name)
except Exception as e:
raise DependencyError(
"error renaming source %s (%r %r): %s"
% (name, spec.pattern, spec.repl, e)
)
else:
if renamed != name:
return renamed
return name
def _symlink(source_path, link):
assert os.path.isabs(link), link
if os.path.lexists(link) or os.path.exists(link):
log.debug("%s already exists, skipping link", link)
return
util.ensure_dir(os.path.dirname(link))
log.debug("resolving source %s as link %s", source_path, link)
rel_source_path = _rel_source_path(source_path, link)
util.symlink(rel_source_path, link)
def _rel_source_path(source, link):
source_dir, source_name = os.path.split(source)
real_link = util.realpath(link)
link_dir = os.path.dirname(real_link)
source_rel_dir = os.path.relpath(source_dir, link_dir)
return os.path.join(source_rel_dir, source_name)
class ResourceProxy(object):
def __init__(self, dependency, name, config, ctx):
self.dependency = dependency
self.name = name
self.config = config
self.ctx = ctx
def resolve(self):
source_path = self.config # the only type of config supported
if not os.path.exists(source_path):
raise DependencyError(
"could not resolve %s: %s does not exist" % (self.name, source_path)
)
log.info("Using %s for %s resource", source_path, self.name)
basename = os.path.basename(source_path)
link = os.path.join(self.ctx.target_dir, basename)
_symlink(source_path, link)
return [source_path]
def _dep_desc(dep):
return "%s:%s" % (dep.opdef.modeldef.name, dep.opdef.name)
def resolve(dependencies, ctx):
resolved = {}
for res in resources(dependencies, ctx):
log.info("Resolving %s dependency", res.resdef.name)
resolved_sources = res.resolve()
log.debug("resolved sources for %s: %r", res.dependency, resolved_sources)
if not resolved_sources:
log.warning("Nothing resolved for %s dependency", res.resdef.name)
resolved.setdefault(res.resdef.name, []).extend(resolved_sources)
return resolved
def resources(dependencies, ctx):
flag_vals = util.resolve_all_refs(ctx.opdef.flag_values())
return [_dependency_resource(dep, flag_vals, ctx) for dep in dependencies]
def _dependency_resource(dep, flag_vals, ctx):
if dep.inline_resource:
return _inline_resource(dep.inline_resource, ctx)
spec = util.resolve_refs(dep.spec, flag_vals)
try:
res = util.find_apply(
[_model_resource, _guildfile_resource, _packaged_resource], spec, ctx
)
except DependencyError as e:
if spec in ctx.resource_config:
log.warning(str(e))
return ResourceProxy(dep, spec, ctx.resource_config[spec], ctx)
raise
if res:
res.dependency = spec
return res
raise DependencyError(
"invalid dependency '%s' in operation '%s'" % (spec, ctx.opdef.fullname)
)
def _inline_resource(resdef, ctx):
return Resource(resdef, resdef.modeldef.guildfile.dir, ctx)
def _model_resource(spec, ctx):
m = re.match(r"(%s)$" % RESOURCE_TERM, spec)
if m is None:
return None
res_name = m.group(1)
return _modeldef_resource(ctx.opdef.modeldef, res_name, ctx)
def _modeldef_resource(modeldef, res_name, ctx):
resdef = modeldef.get_resource(res_name)
if resdef is None:
raise DependencyError(
"resource '%s' required by operation '%s' is not defined"
% (res_name, ctx.opdef.fullname)
)
return Resource(resdef, modeldef.guildfile.dir, ctx)
def _guildfile_resource(spec, ctx):
m = re.match(r"(%s):(%s)$" % (RESOURCE_TERM, RESOURCE_TERM), spec)
if m is None:
return None
model_name = m.group(1)
modeldef = ctx.opdef.guildfile.models.get(model_name)
if modeldef is None:
raise DependencyError(
"model '%s' in resource '%s' required by operation "
"'%s' is not defined" % (model_name, spec, ctx.opdef.fullname)
)
res_name = m.group(2)
return _modeldef_resource(modeldef, res_name, ctx)
def _packaged_resource(spec, ctx):
m = re.match(r"(%s)/(%s)$" % (RESOURCE_TERM, RESOURCE_TERM), spec)
if m is None:
return None
pkg_name = m.group(1)
res_name = m.group(2)
try:
resources = list(resource.for_name(res_name))
except LookupError:
pass
else:
for res in resources:
if namespace.apply_namespace(res.dist.project_name) == pkg_name:
location = os.path.join(
res.dist.location, res.dist.key.replace(".", os.path.sep)
)
return Resource(res.resdef, location, ctx)
raise DependencyError(
"resource '%s' required by operation '%s' is not installed"
% (spec, ctx.opdef.fullname)
)
```
#### File: guild/remotes/s3.py
```python
from __future__ import absolute_import
from __future__ import division
import hashlib
import itertools
import logging
import os
import subprocess
import sys
import uuid
from guild import click_util
from guild import log as loglib
from guild import remote as remotelib
from guild import util
from guild import var
from guild.commands import runs_impl
log = logging.getLogger("guild.remotes.s3")
RUNS_PATH = ["runs"]
DELETED_RUNS_PATH = ["trash", "runs"]
class S3Remote(remotelib.Remote):
def __init__(self, name, config):
self.name = name
self.bucket = config["bucket"]
self.root = config.get("root", "/")
self.region = config.get("region")
self.env = _init_env(config.get("env"))
self.local_sync_dir = lsd = self._local_sync_dir()
self._runs_dir = os.path.join(lsd, *RUNS_PATH)
self._deleted_runs_dir = os.path.join(lsd, *DELETED_RUNS_PATH)
def _local_sync_dir(self):
base_dir = var.remote_dir(self.name)
uri_hash = hashlib.md5(self._s3_uri().encode()).hexdigest()
return os.path.join(base_dir, "meta", uri_hash)
def _s3_uri(self, *subpath):
joined_path = _join_path(self.root, *subpath)
return "s3://%s/%s" % (self.bucket, joined_path)
def list_runs(self, verbose=False, **filters):
self._sync_runs_meta()
runs_dir = self._runs_dir_for_filters(**filters)
if not os.path.exists(runs_dir):
return
args = click_util.Args(verbose=verbose, **filters)
args.archive = runs_dir
args.deleted = False
args.remote = None
args.json = False
runs_impl.list_runs(args)
def _runs_dir_for_filters(self, deleted, **_filters):
if deleted:
return self._deleted_runs_dir
else:
return self._runs_dir
def _sync_runs_meta(self, force=False):
log.info(loglib.dim("Synchronizing runs with %s"), self.name)
if not force and self._meta_current():
return
self._clear_local_meta_id()
sync_args = [
self._s3_uri(),
self.local_sync_dir,
"--exclude",
"*",
"--include",
"*/.guild/opref",
"--include",
"*/.guild/attrs/*",
"--include",
"*/.guild/LOCK*",
"--include",
"meta-id",
"--delete",
]
self._s3_cmd("sync", sync_args, to_stderr=True)
def _meta_current(self):
local_id = self._local_meta_id()
if local_id is None:
log.debug("local meta-id not found, meta not current")
return False
remote_id = self._remote_meta_id()
log.debug("local meta-id: %s", local_id)
log.debug("remote meta-id: %s", remote_id)
return local_id == remote_id
def _clear_local_meta_id(self):
id_path = os.path.join(self.local_sync_dir, "meta-id")
util.ensure_deleted(id_path)
def _local_meta_id(self):
id_path = os.path.join(self.local_sync_dir, "meta-id")
return util.try_read(id_path, apply=str.strip)
def _remote_meta_id(self):
with util.TempFile("guild-s3-") as tmp:
args = [
"--bucket",
self.bucket,
"--key",
_join_path(self.root, "meta-id"),
tmp.path,
]
self._s3api_output("get-object", args)
return open(tmp.path, "r").read().strip()
def _s3api_output(self, name, args):
cmd = [_aws_cmd()]
if self.region:
cmd.extend(["--region", self.region])
cmd.extend(["s3api", name] + args)
log.debug("aws cmd: %r", cmd)
try:
return subprocess.check_output(
cmd, env=self._cmd_env(), stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
raise remotelib.RemoteProcessError.for_called_process_error(e)
def _cmd_env(self):
env = dict(os.environ)
if self.env:
env.update(self.env)
return env
def _s3_cmd(self, name, args, to_stderr=False):
cmd = [_aws_cmd()]
if self.region:
cmd.extend(["--region", self.region])
cmd.extend(["s3", name] + args)
log.debug("aws cmd: %r", cmd)
try:
_subprocess_call(cmd, to_stderr, self._cmd_env())
except subprocess.CalledProcessError as e:
raise remotelib.RemoteProcessError.for_called_process_error(e)
def filtered_runs(self, **filters):
self._sync_runs_meta()
args = click_util.Args(**filters)
args.archive = self._runs_dir
args.remote = None
args.runs = []
return runs_impl.runs_for_args(args)
def delete_runs(self, **opts):
self._sync_runs_meta()
args = click_util.Args(**opts)
args.archive = self._runs_dir
if args.permanent:
preview = (
"WARNING: You are about to permanently delete "
"the following runs on %s:" % self.name
)
confirm = "Permanently delete these runs?"
else:
preview = "You are about to delete the following runs on %s:" % self.name
confirm = "Delete these runs?"
no_runs_help = "Nothing to delete."
def delete_f(selected):
self._delete_runs(selected, args.permanent)
self._new_meta_id()
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
False,
preview,
confirm,
no_runs_help,
delete_f,
confirm_default=not args.permanent,
)
except SystemExit as e:
self._reraise_system_exit(e)
def _reraise_system_exit(self, e, deleted=False):
if not e.args[0]:
raise e
exit_code = e.args[1]
msg = e.args[0].replace(
"guild runs list",
"guild runs list %s-r %s" % (deleted and "-d " or "", self.name),
)
raise SystemExit(msg, exit_code)
def _delete_runs(self, runs, permanent):
for run in runs:
run_uri = self._s3_uri(*(RUNS_PATH + [run.id]))
if permanent:
self._s3_rm(run_uri)
else:
deleted_uri = self._s3_uri(*(DELETED_RUNS_PATH + [run.id]))
self._s3_mv(run_uri, deleted_uri)
def _s3_rm(self, uri):
rm_args = ["--recursive", uri]
self._s3_cmd("rm", rm_args)
def _s3_mv(self, src, dest):
mv_args = ["--recursive", src, dest]
self._s3_cmd("mv", mv_args)
def restore_runs(self, **opts):
self._sync_runs_meta()
args = click_util.Args(**opts)
args.archive = self._deleted_runs_dir
preview = "You are about to restore the following runs on %s:" % self.name
confirm = "Restore these runs?"
no_runs_help = "Nothing to restore."
def restore_f(selected):
self._restore_runs(selected)
self._new_meta_id()
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
False,
preview,
confirm,
no_runs_help,
restore_f,
confirm_default=True,
)
except SystemExit as e:
self._reraise_system_exit(e, deleted=True)
def _restore_runs(self, runs):
for run in runs:
deleted_uri = self._s3_uri(*(DELETED_RUNS_PATH + [run.id]))
restored_uri = self._s3_uri(*(RUNS_PATH + [run.id]))
self._s3_mv(deleted_uri, restored_uri)
def purge_runs(self, **opts):
self._sync_runs_meta()
args = click_util.Args(**opts)
args.archive = self._deleted_runs_dir
preview = (
"WARNING: You are about to permanently delete "
"the following runs on %s:" % self.name
)
confirm = "Permanently delete these runs?"
no_runs_help = "Nothing to purge."
def purge_f(selected):
self._purge_runs(selected)
self._new_meta_id()
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
False,
preview,
confirm,
no_runs_help,
purge_f,
confirm_default=False,
)
except SystemExit as e:
self._reraise_system_exit(e, deleted=True)
def _purge_runs(self, runs):
for run in runs:
uri = self._s3_uri(*(DELETED_RUNS_PATH + [run.id]))
self._s3_rm(uri)
def status(self, verbose=False):
try:
self._s3api_output("get-bucket-location", ["--bucket", self.bucket])
except remotelib.RemoteProcessError as e:
self._handle_status_error(e)
else:
sys.stdout.write(
"%s (S3 bucket %s) is available\n" % (self.name, self.bucket)
)
def _handle_status_error(self, e):
output = e.output.decode()
if "NoSuchBucket" in output:
raise remotelib.OperationError(
"%s is not available - %s does not exist" % (self.name, self.bucket)
)
else:
raise remotelib.OperationError(
"%s is not available: %s" % (self.name, output)
)
def start(self):
log.info("Creating S3 bucket %s", self.bucket)
try:
self._s3_cmd("mb", ["s3://%s" % self.bucket])
except remotelib.RemoteProcessError:
raise remotelib.OperationError()
def reinit(self):
self.start()
def stop(self):
log.info("Deleting S3 bucket %s", self.bucket)
try:
self._s3_cmd("rb", ["--force", "s3://%s" % self.bucket])
except remotelib.RemoteProcessError:
raise remotelib.OperationError()
def get_stop_details(self):
return "- S3 bucket %s will be deleted - THIS CANNOT BE UNDONE!" % self.bucket
def push(self, runs, delete=False):
for run in runs:
self._push_run(run, delete)
self._new_meta_id()
self._sync_runs_meta(force=True)
def _push_run(self, run, delete):
local_run_src = os.path.join(run.path, "")
remote_run_dest = self._s3_uri(*RUNS_PATH + [run.id]) + "/"
args = ["--no-follow-symlinks", local_run_src, remote_run_dest]
if delete:
args.insert(0, "--delete")
log.info("Copying %s to %s", run.id, self.name)
self._s3_cmd("sync", args)
def _new_meta_id(self):
meta_id = _uuid()
with util.TempFile("guild-s3-") as tmp:
with open(tmp.path, "w") as f:
f.write(meta_id)
args = [
"--bucket",
self.bucket,
"--key",
_join_path(self.root, "meta-id"),
"--body",
tmp.path,
]
self._s3api_output("put-object", args)
def pull(self, runs, delete=False):
for run in runs:
self._pull_run(run, delete)
def _pull_run(self, run, delete):
remote_run_src = self._s3_uri(*RUNS_PATH + [run.id]) + "/"
local_run_dest = os.path.join(var.runs_dir(), run.id, "")
args = [remote_run_src, local_run_dest]
if delete:
args.insert(0, "--delete")
log.info("Copying %s from %s", run.id, self.name)
self._s3_cmd("sync", args)
def label_runs(self, **opts):
raise NotImplementedError("TODO")
def run_info(self, **opts):
self._sync_runs_meta()
args = click_util.Args(**opts)
args.archive = self._runs_dir
args.remote = None
args.private_attrs = False
runs_impl.run_info(args, None)
def one_run(self, run_id_prefix):
raise NotImplementedError("TODO")
def run_op(self, opspec, flags, restart, no_wait, stage, **opts):
raise remotelib.OperationNotSupported()
def watch_run(self, **opts):
raise remotelib.OperationNotSupported()
def check(self, **opts):
raise remotelib.OperationNotSupported()
def stop_runs(self, **opts):
raise remotelib.OperationNotSupported()
def _init_env(env_config):
if isinstance(env_config, dict):
return env_config
elif isinstance(env_config, str):
return _env_from_file(env_config)
else:
log.warning("invalid value for remote env %r - ignoring", env_config)
return {}
def _env_from_file(path):
if path.lower().endswith(".gpg"):
env_str = _try_read_gpg(path)
else:
env_str = util.try_read(path)
if not env_str:
log.warning("cannot read remote env from %s - ignorning", path)
return {}
return _decode_env(env_str)
def _try_read_gpg(path):
path = os.path.expanduser(path)
cmd = _gpg_cmd() + [path]
try:
p = subprocess.Popen(
cmd, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except OSError as e:
log.error("cannot decode %s with command '%s' (%s)", path, " ".join(cmd), e)
else:
out, err = p.communicate()
if p.returncode != 0:
log.error(err.decode(errors="replace").strip())
return None
return out.decode(errors="replace")
def _gpg_cmd():
gpg_env = os.getenv("GPG_CMD")
if gpg_env:
return util.shlex_split(gpg_env)
return ["gpg", "-d"]
def _decode_env(s):
return dict([_split_env_line(line) for line in s.split("\n")])
def _split_env_line(s):
parts = s.split("=", 1)
if len(parts) == 1:
parts.append("")
return _strip_export(parts[0]), parts[1]
def _strip_export(s):
s = s.strip()
if s.startswith("export "):
s = s[7:]
return s
def _aws_cmd():
cmd = util.which("aws")
if not cmd:
raise remotelib.OperationError(
"AWS Command Line Interface (CLI) is not available\n"
"Refer to https://docs.aws.amazon.com/cli for help installing it."
)
return cmd
def _join_path(root, *parts):
path = [part for part in itertools.chain([root], parts) if part not in ("/", "")]
return "/".join(path)
def _subprocess_call(cmd, to_stderr, env):
if to_stderr:
_subprocess_call_to_stderr(cmd, env)
else:
subprocess.check_call(cmd, env=env)
def _subprocess_call_to_stderr(cmd, env):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
while True:
line = p.stdout.readline()
if not line:
break
sys.stderr.write(line.decode())
def _list(d):
try:
return os.listdir(d)
except OSError as e:
if e.errno != 2:
raise
return []
def _ids_for_prefixes(prefixes):
def strip(s):
if s.endswith("/"):
return s[:-1]
return s
return [strip(p) for p in prefixes]
def _uuid():
try:
return uuid.uuid1().hex
except ValueError:
# Workaround https://bugs.python.org/issue32502
return uuid.uuid4().hex
```
#### File: guildai/guild/view.py
```python
from __future__ import absolute_import
from __future__ import division
import logging
import os
import socket
import subprocess
import sys
import threading
import time
from werkzeug.exceptions import NotFound
from werkzeug.utils import redirect
from guild import serving_util
from guild import util
from guild import var
log = logging.getLogger("guild")
MODULE_DIR = os.path.dirname(__file__)
TB_RUNS_MONITOR_INTERVAL = 5
TB_REFRESH_INTERVAL = 5
class ViewData(object):
"""Interface for providing View related data."""
def runs(self):
"""Returns a list of unformatted runs.
"""
raise NotImplementedError()
def runs_data(self):
"""Returns a list of formatted runs data.
"""
raise NotImplementedError()
def one_run(self, run_id_prefix):
"""Returns one unformatted run for a run ID prefix.
The scope must be extended to all runs - not just runs per the
current filter.
If a run doesn't exist that matches `run_id_prefix` returns None.
"""
def one_run_data(self, run_id_prefix):
"""Returns a formatted run for a run ID prefix.
If a run doesn't exist that matches `run_id_prefix` returns None.
"""
raise NotImplementedError()
def config(self):
"""Returns dict of config for request params.
Config dict must contain:
cwd string Cwd used for runs
titleLabel string Label suitable for browser title
version string Guild version
"""
raise NotImplementedError()
class DevServer(threading.Thread):
def __init__(self, host, port, view_port):
super(DevServer, self).__init__()
self.host = host or socket.gethostname()
self.port = port
self.view_port = view_port
self._view_base_url = util.local_server_url(host, view_port)
self._ready = False
def run(self):
args = [
self._devserver_bin(),
"--host",
self.host,
"--config",
self._devserver_config(),
"--progress",
]
env = {
"HOST": self.host,
"PORT": str(self.port),
"VIEW_BASE": self._view_base_url,
"PATH": os.environ["PATH"],
}
p = subprocess.Popen(args, env=env)
p.wait()
def wait_for_ready(self):
url_base = util.local_server_url(self.host, self.port)
while not self._ready:
ping_url = "{}/assets/favicon.png".format(url_base)
try:
util.http_get(ping_url)
except util.HTTPConnectionError:
time.sleep(0.1)
else:
self._ready = True
@staticmethod
def _devserver_bin():
path = os.path.join(MODULE_DIR, "view/node_modules/.bin/webpack-dev-server")
if not os.path.exists(path):
raise AssertionError(
"{} does not exits - did you resolve node dependencies by "
"running npm install?".format(path)
)
return path
@staticmethod
def _devserver_config():
return os.path.join(MODULE_DIR, "view/build/webpack.dev.conf.js")
class TBServer(object):
def __init__(self, tensorboard, key, data):
self._tb = tensorboard
self._key = key
self._data = data
self.log_dir = None
self._monitor = None
self._app = None
self._started = False
@property
def running(self):
return self._started
def start(self):
if self._started:
raise RuntimeError("already started")
self.log_dir = util.mktempdir("guild-tensorboard-")
self._monitor = self._tb.RunsMonitor(
self.log_dir, self._list_runs, TB_RUNS_MONITOR_INTERVAL
)
self._monitor.run_once(exit_on_error=True)
self._monitor.start()
self._app = self._tb.create_app(
self.log_dir, TB_REFRESH_INTERVAL, path_prefix=self._path_prefix()
)
self._started = True
def _list_runs(self):
if self._key == "0":
return self._data.runs()
else:
run = self._data.one_run(self._key)
if not run:
return []
return [run]
def _path_prefix(self):
return "/tb/{}/".format(self._key)
def __call__(self, env, start_resp):
if not self.running:
raise RuntimeError("not started")
assert self._app
return self._app(env, start_resp)
def stop(self):
if not self._started:
raise RuntimeError("not started")
self._monitor.stop()
util.rmtempdir(self.log_dir)
class TBServers(object):
def __init__(self, data):
self._lock = threading.Lock()
self._servers = {}
self._data = data
self._tb = None
def __enter__(self):
self._lock.acquire()
def __exit__(self, *_exc):
self._lock.release()
def __getitem__(self, key):
return self._servers[key]
def start_server(self, key, _run=None):
tensorboard = self._ensure_tensorboard()
server = TBServer(tensorboard, key, self._data)
log.debug("starting TensorBoard server (%s)", server)
server.start()
self._servers[key] = server
log.debug(
"using log dir %s for TensorBoard server (%s)", server.log_dir, server
)
return server
def _ensure_tensorboard(self):
if self._tb is None:
from guild import tensorboard
self._tb = tensorboard
return self._tb
def iter_servers(self):
for key in self._servers:
yield self._servers[key]
def stop_servers(self):
for server in self._servers.values():
if server.running:
log.debug("stopping TensorBoard server (%s)", server)
server.stop()
class DistFiles(serving_util.StaticDir):
def __init__(self):
dist_dir = os.path.join(MODULE_DIR, "view/dist")
super(DistFiles, self).__init__(dist_dir)
class RunFiles(serving_util.StaticBase):
def __init__(self):
super(RunFiles, self).__init__({"/files": var.runs_dir()})
def handle(self, _req):
def app(env, start_resp0):
def start_resp(status, headers):
headers.append(("Access-Control-Allow-Origin", "*"))
start_resp0(status, headers)
return self._app(env, start_resp)
return app
class RunOutput(object):
def __init__(self):
self._output_run_id = None
self._output = None
def handle(self, req, run):
self._ensure_output(run)
start = req.args.get("s", None, int)
end = req.args.get("e", None, int)
lines = [
(time, stream, line) for time, stream, line in self._output.read(start, end)
]
return serving_util.json_resp(lines)
def _ensure_output(self, run_id):
if self._output_run_id == run_id:
return
run_dir = os.path.join(var.runs_dir(), run_id)
if not os.path.exists(run_dir):
raise NotFound()
self._output = util.RunOutputReader(run_dir)
self._output_run_id = run_id
def serve_forever(data, host, port, no_open=False, dev=False, logging=False):
if dev:
_serve_dev(data, host, port, no_open, logging)
else:
_serve_prod(data, host, port, no_open, logging)
def _serve_dev(data, host, port, no_open, logging):
view_port = util.free_port(port + 1)
dev_server = DevServer(host, port, view_port)
dev_server.start()
dev_server.wait_for_ready()
view_url = util.local_server_url(host, view_port)
if not no_open:
util.open_url(util.local_server_url(host, port))
sys.stdout.write(" I Guild View backend: {}\n".format(view_url))
_start_view(data, host, view_port, logging)
sys.stdout.write("\n")
def _serve_prod(data, host, port, no_open, logging):
view_url = util.local_server_url(host, port)
if not no_open:
try:
util.open_url(view_url)
except util.URLOpenError:
sys.stdout.write("Unable to open browser window for Guild View\n")
sys.stdout.write("Running Guild View at {}\n".format(view_url))
_start_view(data, host, port, logging)
sys.stdout.write("\n")
def _start_view(data, host, port, logging):
tb_servers = TBServers(data)
app = _view_app(data, tb_servers)
server = serving_util.make_server(host, port, app, logging)
sys.stdout.flush()
server.serve_forever()
tb_servers.stop_servers()
def _view_app(data, tb_servers):
dist_files = DistFiles()
run_files = RunFiles()
run_output = RunOutput()
routes = serving_util.Map(
[
("/runs", _handle_runs, (data,)),
("/compare", _handle_compare, (data,)),
("/files/<path:_>", run_files.handle, ()),
("/runs/<run>/output", run_output.handle, ()),
("/config", _handle_config, (data,)),
("/tb/", _route_tb, ()),
("/tb/<key>/", _handle_tb_index, (tb_servers, data)),
("/tb/<key>/<path:_>", _handle_tb, (tb_servers,)),
("/", dist_files.handle_index, ()),
("/<path:_>", dist_files.handle, ()),
]
)
return serving_util.App(routes)
def _handle_runs(req, data):
runs_data = _runs_data(req, data)
return serving_util.json_resp(runs_data)
def _runs_data(req, data):
try:
run_id_prefix = req.args["run"]
except KeyError:
return data.runs_data()
else:
data = data.one_run_data(run_id_prefix)
if not data:
raise NotFound()
return [data]
def _handle_compare(_req, data):
compare_data = data.compare_data()
return serving_util.json_resp(compare_data)
def _handle_config(_req, data):
return serving_util.json_resp(data.config())
def _route_tb(req):
if "run" in req.args:
key = req.args["run"]
else:
key = "0"
return redirect("/tb/{}/".format(key), code=303)
def _handle_tb_index(req, tb_servers, data, key):
try:
return _handle_tb(req, tb_servers, key)
except NotFound:
if key != "0":
key = _try_run_id(key, data)
with tb_servers:
return tb_servers.start_server(key)
def _handle_tb(_req, tb_servers, key):
with tb_servers:
try:
return tb_servers[key]
except KeyError:
raise NotFound()
def _try_run_id(key, data):
run = data.one_run(key)
if not run:
raise NotFound()
return run.id
``` |
{
"source": "jimhs/snippets",
"score": 3
} |
#### File: algo/arrays/missing_ranges.py
```python
def mr(a, p, q):
"""
in:
a: sorted 1d array
p: low end
q: high end
ou:
r: list of range tuples
"""
r = []
s = p
a = sorted(a)
if a == []:
return [(p, q)]
for i in a:
if s == i:
# bypassing the cut
s += 1
# elif i > s:
else:
r.append((s, i-1))
# bypassing the cut
s = i + 1
# dealing the tailing range
if s <= q:
r.append((s, q))
return r
```
#### File: algo/arrays/summary_ranges.py
```python
def sr(a):
"""
in:
a: 1d array
ou:
list of range tupples
"""
r = []
i = 0
# enter loop directly, no need to worry 1-el list
while i < len(a):
n = a[i]
# w/i boundary and in sequence
while i + 1 < len(a) and a[i + 1] - a[i] == 1:
i += 1
if a[i] != n:
r.append((n, a[i]))
else:
r.append((n, n))
i += 1
return r
```
#### File: python/cookbook/meta.py
```python
import weakref
import logging
import types
import abc
import operator
import sys
import collections
import bisect
from collections import OrderedDict
from inspect import Signature, Parameter, signature
from time import localtime
#from decorator import LazyProperty
# cookbook 9.13
class NoInstance(type):
def __call__(self, *args, **kwargs):
raise TypeError('cant instantiate directly')
class Singleton(type):
def __init__(self, *args, **kwargs):
self.__instance = None
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if self.__instance is None:
self.__instance = super().__call__(*args, **kwargs)
return self.__instance
else:
return self.__instance
class Cached(type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__cache = weakref.WeakValueDictionary()
def __call__(self, *args):
if args in self.__cache:
return self.__cache[args]
else:
obj = super().__call__(*args)
self.__cache[args] = obj
return obj
class Spam(metaclass=Cached):
def __init__(self, name):
print('creating spam({!r})'.format(name))
self.name = name
@staticmethod
def grok(x):
print('spam.grok:',x)
# cookbook 8.13
# base class, use a descriptor to set value
class Descriptor:
def __init__(self, name=None, **opts):
self.name = name
for key, value in opts.items():
setattr(self, key, value)
def __set__(self, instance, value):
instance.__dict__[self.name] = value
class Typed(Descriptor):
_expected_type = type(None)
def __set__(self, instance, value):
if not isinstance(value, self._expected_type):
raise TypeError('expected '+ str(self._expected_type))
super().__set__(instance, value)
# descriptors for enforcing value
class Unsigned(Descriptor):
def __set__(self, instance, value):
if value < 0 :
raise ValueError('expect >= 0')
super().__set__(instance, value)
class MaxSized(Descriptor):
def __init__(self, name=None, **opts):
if 'size' not in opts:
raise TypeError("missing 'size' option")
super().__init__(name, **opts)
def __set__(self, instance, value):
if len(value) > self.size :
raise ValueError('size must be <= ' + str(self.size))
super().__set__(instance, value)
# cookbook 9.14
# a set of descriptors for various types
#class Typed:
# _expected_type = type(None)
# def __init__(self, name=None):
# self._name = name
#
# def __set__(self, instance, value):
# if not isinstance(value, self._expected_type):
# raise TypeError('expected '+ str(self._expected_type))
# instance.__dict__[self._name] = value
#OPTIMIZE page 284~287 include some other similar methods
class Interger(Typed):
_expected_type = int
class Float(Typed):
_expected_type = float
class String(Typed):
_expected_type = str
class UnsignedInterger(Interger, Unsigned):
pass
class UnsignedFloat(Float, Unsigned):
pass
class SizedString(String, MaxSized):
pass
# metaclass that use an OrderedDict for class body
class OrderedMeta(type):
def __new__(cls, clsname, bases, clsdict):
d = dict(clsdict)
order = []
for name, value in clsdict.items():
if isinstance(value, Typed):
value._name = name
order.append(name)
d['_order'] = order
return type.__new__(cls, clsname, bases, d)
@classmethod
def __prepare__(cls, clsname, bases):
return OrderedDict()
class Structure(metaclass=OrderedMeta):
def as_csv(self):
return ','.join(str(getattr(self, name)) for name in self._order)
_stock_formats = {
'str_' : '{0.share} shares of {0.name} @{0.price}',
'repr_' : 'Stock({0.name!r}, {0.share!r}, {0.price!r})',
'desc_' : 'name={0.name!r}, share={0.share}, price={0.price}'
}
class Stock(Structure):
# name = String()
# share = Interger()
# price = Float()
name = SizedString('name', size=4)
share = UnsignedInterger('share')
price = UnsignedFloat('price')
#??? 'name' in __slots__ conflicts with class variable
# __slots__ = ['name', 'share', 'price']
def __init__(self, name, share, price):
self.name = name
self.share = share
self.price = price
# 8.1
def __str__(self):
return _stock_formats['str_'].format(self)
#??? eval(repr(s)) == s, is False
def __repr__(self):
return _stock_formats['repr_'].format(self)
def __format__(self, code):
if code == '':
code = 'desc_'
fmt = _stock_formats[code]
return fmt.format(self)
# @decorator.lazyproperty
@property
def amo(self):
print('calculating amo...')
return self.share * self.price
# cookbook 9.14
# prevents duplicated method def
class NoDupOrderedDict(OrderedDict):
def __init__(self, clsname):
self.clsname = clsname
super().__init__()
def __setitem__(self, name, value):
if name in self:
raise TypeError('{} already defined in {}'.format(name, self.clsname))
super().__setitem__(name, value)
class OrderedMeta(type):
def __new__(cls, clsname, bases, clsdict):
d= dict(clsdict)
d['_order'] = [name for name in clsdict if name[0] != '_']
return type.__new__(cls, clsname, bases, d)
@classmethod
def __prepare__(cls, clsname, bases):
return NoDupOrderedDict(clsname)
# duplicated example
class DupA(metaclass=OrderedMeta):
def spam(self):
pass
def spam2(self):
pass
# cookbook 9.16
# fixed parm signature
# make a signature for a func(x, y=42, *, z=None)
parms = [Parameter('x', Parameter.POSITIONAL_OR_KEYWORD),
Parameter('y', Parameter.POSITIONAL_OR_KEYWORD, default=42),
Parameter('z', Parameter.KEYWORD_ONLY, default=None)]
sig = Signature(parms)
# print(sig)
def func(*args, **kwargs):
bound_value = sig.bind(*args, **kwargs)
for name, value in bound_value.arguments.items():
print(name, value)
# cookbook 9.16
# fixed parm signature 2
def make_sig(*names):
parms = [Parameter(name, Parameter.POSITIONAL_OR_KEYWORD)
for name in names]
return Signature(parms)
class SigStructure:
__signature__ = make_sig()
def __init__(self, *args, **kwargs):
bound_value = self.__signature__.bind(*args, **kwargs)
for name, value in bound_value.arguments.items():
setattr(self, name, value)
# example
class SigStock(SigStructure):
__signature__ = make_sig('name', 'share', 'price')
class SigPoint(SigStructure):
__signature__ = make_sig('x', 'y')
# cookbook 9.17
class NoMixedCaseMeta(type):
def __new__(cls, clsname, bases, clsdict):
for name in clsdict:
if name.lower() != name:
raise TypeError('bad attribute name ' + name)
return super().__new__(cls, clsname, bases, clsdict)
class NMCMRoot(metaclass=NoMixedCaseMeta):
pass
class NMCM_A(NMCMRoot):
def foo_bar(self):
pass
'''
class NMCM_B(NMCMRoot):
def fooBar(self):
pass
'''
class MatchSignatureMeta(type):
def __init__(self, clsname, bases, clsdict):
super().__init__(clsname, bases, clsdict)
sup = super(self, self)
for name, value in clsdict.items():
if name.startswith('_') or not callable(value):
continue
# get the prev def and compare the sig
prev_dfn = getattr(sup, name, None)
if prev_dfn:
prev_sig = signature(prev_dfn)
val_sig = signature(value)
if prev_sig != val_sig:
logging.warning('signature mismatch in %s: %s != %s',
value.__qualname__, prev_sig, val_sig)
# example
class MSMRoot(metaclass=MatchSignatureMeta):
pass
class MSM_A(MSMRoot):
def foo(self, x, y):
pass
def bar(self, x, *, z):
pass
class MSM_B(MSM_A):
def foo(self, a, b):
pass
def bar(self, x, z):
pass
# cookbook 9.18
# making a class manually through parts
# method
def __init__(self, name, share, price):
self.name = name
self.share = share
self.price = price
def cost(self):
return self.share * self.price
cls_dict = {
'__init__' : __init__,
'cost' : cost,
}
# made a class
Stock2 = types.new_class('Stock2',
(),
{'metaclass': abc.ABCMeta},
lambda ns: ns.update(cls_dict))
Stock2.__module__ = __name__
# making a class using named tuple and frame hack
def named_tuple(classname, fieldname):
# populate a dictionary of field property accessors
cls_dict = {name: property(operator.itemgetter(n))
for n, name in enumerate(fieldname)}
# make a __new__ function and add to class dict
def __new__(cls, *args):
if len(args) != len(fieldname):
raise TypeError('expect {} arguments'.format(len(fieldname)))
return tuple.__new__(cls, args)
cls_dict['__new__'] = __new__
# make the class
cls = types.new_class(classname,
(tuple,),
{},
lambda ns: ns.update(cls_dict))
# set the module to that of the caller
cls.__module = sys._getframe(1).f_globals['__name__']
return cls
# initiate class members when define
class StructTupleMeta(type):
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
for n, name in enumerate(cls._fields):
setattr(cls, name, property(operator.itemgetter(n)))
class StructTuple(tuple, metaclass= StructTupleMeta):
_fields = []
def __new__(cls, *args):
if len(args) != len(cls._fields):
raise ValueError('expect {} arguments'.format(len(cls._fields)))
return super().__new__(cls, args)
class Stock3(StructTuple):
_fields = ['name', 'share', 'price']
class Point(StructTuple):
_fields = ['x', 'y']
# cookbook 9.21
# avoid duplicated type check
def typed_property(name, expected_type):
storage_name = '_' + name
@property
def prop(self):
return getattr(self, storage_name)
@prop.setter
def prop(self, value):
if not isinstance(value, expected_type):
raise TypeError('{} must be {}'.format(name, expected_type))
setattr(self, storage_name, value)
return prop
# example
class Person:
name = typed_property('name', str)
age = typed_property('age', int)
def __init__(self, name, age):
self.name = name
self.age = age
# cookbook 8.14
# customized container
class SortedItems(collections.Sequence):
def __init__(self, initial=None):
self._items = sorted(initial) if initial is not None else []
# required sequence methods
def __getitem__(self, index):
return self._items[index]
def __len__(self):
return len(self._items)
# method for adding item to right location
def add(self, item):
bisect.insort(self._items, item)
# cookbook 8.15
# proxy and delegate
# basic structure
class Delegate_From:
def foo(self, x):
print(self.__class__)
pass
def bar(self):
print(self.__class__)
pass
class Delegate_To:
def __init__(self):
self._a = Delegate_From()
def spam(self):
pass
def __getattr__(self, name):
return getattr(self._a, name)
# a proxy class that wraps around another obj,
# but only exposes its public attributes
class Proxy:
def __init__(self, obj):
self._obj = obj
# delegate attr lookup to internal obj
def __getattr__(self, name):
print('getting attr:', name)
return getattr(self._obj, name)
# delegate attr assignment
def __setattr__(self, name, value):
if name.startswith('_'):
super().__setattr__(name, value)
else:
print('setting attr:', name, value)
setattr(self._obj, name, value)
# delegate attr deletion
def __delattr__(self, name):
if name.startswith('_'):
super().__delattr__(name)
else:
print('deleting attr:', name)
delattr(self._obj, name)
# cookbook 8.17
# de-serializing
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
@classmethod
def today(cls):
d = cls.__new__(cls)
t = localtime()
d.year = t.tm_year
d.month = t.tm_mon
d.day = t.tm_mday
return d
# cookbook 8.18
# Mixin
``` |
{
"source": "jimialex/django-wise-template-mysql",
"score": 2
} |
#### File: v1/serializers/password.py
```python
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import Serializer
from apps.accounts import response_codes
from apps.accounts.api.v1.serializers.login import UsernameOrEmailSerializer
from apps.accounts.models import User
from django.utils.translation import ugettext_lazy as _
PASSWORD_MAX_LENGTH = User._meta.get_field('password').max_length # noqa: WPS437
user_read_only_fields = (
'id', 'username', 'date_joined', 'last_login', 'new_email',
'password', 'is_superuser', 'is_staff', 'is_active', 'date_joined',
'email_token', 'token', 'groups', 'user_permissions',
)
class CheckValidPasswordMixin(serializers.Serializer):
"""Validates a password."""
password = serializers.CharField(
help_text=_('<PASSWORD>'),
max_length=PASSWORD_MAX_LENGTH,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request = self.context.get('request', None)
self.user = getattr(self.request, 'user', None)
def validate_password(self, password):
if not self.user.check_password(password):
raise ValidationError(**response_codes.INVALID_PASSWORD)
return password
class PasswordSetSerializer(serializers.Serializer):
"""Validates a password and its confirmation."""
password = serializers.CharField(
help_text=_('<PASSWORD>'),
max_length=PASSWORD_MAX_LENGTH,
)
confirm_password = serializers.CharField(
help_text=_('<PASSWORD>'),
max_length=PASSWORD_MAX_LENGTH,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = self.context['request'].user
def validate_password(self, password): # noqa: D102
if self.user.has_usable_password():
raise ValidationError(**response_codes.USER_HAS_PASSWORD)
return password
def validate(self, attrs): # noqa: D102
attrs = super().validate(attrs)
if attrs['password'] != attrs['confirm_password']:
raise ValidationError(**response_codes.PASSWORD_MISTMATCH)
return attrs
class PasswordUpdateSerializer(CheckValidPasswordMixin):
"""Validates a new password and its confirmation."""
new_password = serializers.CharField(
help_text=_('<PASSWORD>'),
max_length=PASSWORD_MAX_LENGTH,
)
confirm_password = serializers.CharField(
help_text=_('<PASSWORD>'),
max_length=PASSWORD_MAX_LENGTH,
)
def validate(self, attrs): # noqa: D102
attrs = super().validate(attrs)
# it's repeated for readability
if attrs['new_password'] != attrs['confirm_password']:
raise ValidationError(**response_codes.PASSWORD_MISTMATCH)
return attrs
class PasswordResetSerializer(UsernameOrEmailSerializer):
"""Serializer to request a password reset e-mail."""
redirect_uri = serializers.URLField(required=False)
class PasswordResetConfirmSerializer(Serializer):
"""Serializer to request and validate password."""
DEFAULT_PASSWORD_LENGTH = 128
token = serializers.CharField()
password = serializers.CharField(max_length=DEFAULT_PASSWORD_LENGTH)
```
#### File: functional/api_views/test_google_login.py
```python
import pytest
from django.urls import reverse
from doubles import allow
from rest_framework import status
from rest_framework.exceptions import NotAuthenticated
from apps.accounts.api.v1.serializers.session import SessionSerializer
from apps.accounts.response_codes import INVALID_GOOGLE_TOKEN_ID, INVALID_GOOGLE_TOKEN_ISSUER
from apps.accounts.services.session import SessionService
@pytest.mark.django_db
class GoogleLoginTests:
google_login_url = reverse('api-accounts:v1:google-login')
def test_missing_token(self, api_client):
response = api_client.post(self.google_login_url, {})
response_json = response.json()
assert 'token' in response_json
assert response_json['token'][0]['code'] == 'required'
def test_valid_token(self, api_client, test_user):
allow(SessionService).process_google_token.and_return(test_user)
login_data = {'token': 'valid_<PASSWORD>'}
response = api_client.post(self.google_login_url, login_data)
response_json = response.json()
assert response.status_code == status.HTTP_200_OK
user_data = SessionSerializer(test_user).data
assert response_json.keys() == user_data.keys()
def test_invalid_token(self, api_client):
allow(SessionService).process_google_token.and_raise(
NotAuthenticated(**INVALID_GOOGLE_TOKEN_ID)
)
login_data = {'token': '<PASSWORD>'}
response = api_client.post(self.google_login_url, login_data)
response_json = response.json()
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response_json.get('code') == INVALID_GOOGLE_TOKEN_ID.get('code')
def test_invalid_issuer(self, api_client):
allow(SessionService).process_google_token.and_raise(
NotAuthenticated(**INVALID_GOOGLE_TOKEN_ISSUER)
)
login_data = {'token': 'invalid_token'}
response = api_client.post(self.google_login_url, data=login_data)
response_json = response.json()
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response_json.get('code') == INVALID_GOOGLE_TOKEN_ISSUER.get('code')
```
#### File: unit/services/test_session.py
```python
import json
import pytest
import requests_mock
from doubles import allow
from rest_framework import status
from rest_framework.exceptions import NotAuthenticated
from apps.accounts.models import User
from apps.accounts.response_codes import (
INVALID_GOOGLE_TOKEN_ISSUER,
INVALID_GOOGLE_TOKEN_ID,
INVALID_CREDENTIALS,
INACTIVE_ACCOUNT, INVALID_FACEBOOK_ACCESS_TOKEN,
)
from apps.accounts.services.session import SessionService
from apps.accounts.services.user import UserService
from google.oauth2 import id_token
@pytest.mark.django_db
class SessionServiceTests:
@staticmethod
def test_process_google_token(test_user):
allow(id_token).verify_oauth2_token.and_return({
'iss': SessionService.GOOGLE_ACCOUNTS_URL,
})
allow(UserService).create_or_update_for_social_networks.and_return(test_user)
user = SessionService.process_google_token('valid_token')
assert user is not None
assert isinstance(user, User)
@staticmethod
def test_process_google_token_invalid_issuer():
allow(id_token).verify_oauth2_token.and_return({
'iss': 'https://any.server',
})
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.process_google_token('valid_token')
assert exec_info.value.detail.code == INVALID_GOOGLE_TOKEN_ISSUER['code']
@staticmethod
def test_process_google_token_invalid_token():
allow(id_token).verify_oauth2_token.and_raise(ValueError('Token Error'))
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.process_google_token('valid_token')
assert exec_info.value.detail.code == INVALID_GOOGLE_TOKEN_ID['code']
@staticmethod
def test_process_facebook_valid_access_token(test_user):
allow(UserService).create_or_update_for_social_networks.and_return(test_user)
access_token = 'valid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text=json.dumps({
'email': test_user.email,
'first_name': test_user.first_name,
'last_name': test_user.last_name,
}),
status_code=status.HTTP_200_OK,
)
user = SessionService.process_facebook_token(access_token)
assert user is not None
assert isinstance(user, User)
@staticmethod
def test_process_facebook_token_invalid_access_token():
access_token = 'invalid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text=json.dumps({'error': 'facebook_raised_error'}),
status_code=status.HTTP_200_OK,
)
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.process_facebook_token(access_token)
assert exec_info.value.detail.code == INVALID_FACEBOOK_ACCESS_TOKEN['code']
@staticmethod
def test_process_facebook_token_invalid_access_token_from_format(test_user):
access_token = 'invalid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text='',
status_code=status.HTTP_200_OK,
)
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.process_facebook_token(access_token)
assert exec_info.value.detail.code == INVALID_FACEBOOK_ACCESS_TOKEN['code']
@staticmethod
def test_make_user_session(test_user):
session = SessionService.make_user_session(test_user)
assert 'access_token' in session
assert 'refresh_token' in session
@staticmethod
def test_validate_session(test_user):
plain_password = '<PASSWORD>'
test_user.set_password(<PASSWORD>_password)
test_user.save()
assert SessionService.validate_session(test_user, plain_password)
@staticmethod
def test_validate_session_invalid_credentials(test_user):
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.validate_session(None, 'new_password')
assert exec_info.value.detail.code == INVALID_CREDENTIALS['code']
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.validate_session(test_user, 'new_password')
assert exec_info.value.detail.code == INVALID_CREDENTIALS['code']
@staticmethod
def test_validate_session_inactive_account(test_user):
plain_password = '<PASSWORD>'
test_user.set_password(<PASSWORD>)
test_user.is_active = False
test_user.save()
with pytest.raises(NotAuthenticated) as exec_info:
SessionService.validate_session(test_user, plain_password)
assert exec_info.value.detail.code == INACTIVE_ACCOUNT['code']
```
#### File: accounts/views/reset_password.py
```python
from django.views import View
from django.shortcuts import render
from apps.accounts.forms import ResetPasswordForm
from apps.accounts.models.choices import ActionCategory
from apps.accounts.models.pending_action import PendingAction
class ResetPasswordView(View):
"""Process a password reset."""
def get(self, request, token, **kwargs):
"""Renders the html template to init password reset."""
context = {}
try:
context['pending_action'] = PendingAction.objects.get(
token=token, category=ActionCategory.RESET_PASSWORD.value
)
except PendingAction.DoesNotExist:
context['pending_action'] = None
return render(request, 'transactions/reset_password.html', context)
def post(self, request, token, **kwargs):
"""Processes password reset."""
context = {}
try:
pending_action = PendingAction.objects.get(
token=token, category=ActionCategory.RESET_PASSWORD.value
)
context['pending_action'] = pending_action
user = pending_action.user
form = ResetPasswordForm(data=request.POST)
context['form'] = form
if form.is_valid():
password = form.cleaned_data['<PASSWORD>']
user.set_password(password)
user.save()
pending_action.delete()
return render(request, 'transactions/reset_password_done.html', context)
except PendingAction.DoesNotExist:
context['pending_action'] = None
return render(request, 'transactions/reset_password.html', context)
```
#### File: contrib/api/responses.py
```python
from rest_framework import status as status_code
from rest_framework.response import Response
from django.utils.translation import ugettext_lazy as _
class DoneResponse(Response): # noqa: D107
"""Base class for REST Exceptions based on CEH from @vicobits."""
def __init__(self, detail=None, code=None, status=None): # noqa: D107
response = {
'message': detail if detail else _('Successful operation!'),
'code': code if code else 'successful_action',
}
status = status or status_code.HTTP_200_OK
super().__init__(data=response, status=status)
```
#### File: unit/api/test_exceptions.py
```python
from rest_framework.exceptions import NotAuthenticated, ValidationError
from rest_framework_simplejwt.exceptions import InvalidToken, AuthenticationFailed
from apps.contrib.api.exceptions import SimpleJWTExceptionParser
def test_invalid_token_format():
received_exc = InvalidToken()
exc = SimpleJWTExceptionParser.parse(received_exc)
assert isinstance(exc, NotAuthenticated)
def test_authentication_failed_format():
received_exc = AuthenticationFailed()
exc = SimpleJWTExceptionParser.parse(received_exc)
assert isinstance(exc, NotAuthenticated)
def test_another_exception_format():
received_exc = ValidationError()
exc = SimpleJWTExceptionParser.parse(received_exc)
assert type(received_exc) == type(exc)
```
#### File: tests/unit/test_logger.py
```python
from apps.contrib.logging import Logger
class LoggerTests:
message = 'ANYTHING'
def test_debug(self):
assert Logger.debug(self.message) is None
def test_info(self):
assert Logger.info(self.message) is None
def test_error(self):
assert Logger.error(self.message) is None
def test_warning(self):
assert Logger.warning(self.message) is None
```
#### File: utils/testing/decorators.py
```python
from contextlib import contextmanager
@contextmanager
def temporarily(obj, **kwargs):
original_values = {k: getattr(obj, k) for k in kwargs}
for k, v in kwargs.items():
setattr(obj, k, v)
obj.save(update_fields=kwargs.keys())
try:
yield
finally:
for k, v in original_values.items():
setattr(obj, k, v)
obj.save(update_fields=original_values.keys())
``` |
{
"source": "jimi-c/ansible_demo_website",
"score": 2
} |
#### File: ansible_demo_website/main/views.py
```python
from __future__ import unicode_literals
from django.contrib.auth import login as django_login, logout as django_logout
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.decorators.cache import cache_page
# Create your views here.
def get_base_context():
context = dict()
return context
def index(request):
context = get_base_context()
return render(request, 'main.html', context)
def register_user(request):
username = request.GET.get('username', None)
if username is not None:
new_user, created = User.objects.get_or_create(username=username)
if created:
new_user.first_name = request.GET.get('first_name', '')
new_user.last_name = request.GET.get('last_name', '')
new_user.save()
return redirect('/')
def login(request):
username = request.GET.get('username', 'admin')
user = User.objects.get(username=username)
django_login(request, user)
return redirect('/')
def logout(request):
django_logout(request)
return redirect('/')
def private_page(request):
if not request.user.is_authenticated():
raise PermissionDenied
context = get_base_context()
return render(request, 'private.html', context)
``` |
{
"source": "jimichailidis/UVA_AML18",
"score": 3
} |
#### File: UVA_AML18/week_3/cnn.py
```python
import os
print(os.getcwd())
from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD
from dataset_utils import load_mnist
import numpy as np
from convolution_layer import ConvLayer
from maxpool_layer import MaxPool2x2
from flatten_layer import FlattenLayer
import sys
def iterate_minibatches(x, y, batch_size=16, verbose=True):
assert len(x) == len(y)
indices = np.arange(len(x))
np.random.shuffle(indices)
for i, start_idx in enumerate(range(0, len(x) - batch_size + 1, batch_size)):
if verbose:
print('\rBatch: {}/{}'.format(i + 1, len(x) // batch_size), end='')
sys.stdout.flush()
excerpt = indices[start_idx:start_idx + batch_size]
yield x[excerpt], y[excerpt]
def get_cnn():
nn = SequentialNN()
nn.add(ConvLayer(1, 2, filter_size=3)) # The output is of size N_obj 2 28 28
nn.add(ReLU()) # The output is of size N_obj 2 28 28
nn.add(MaxPool2x2()) # The output is of size N_obj 2 14 14
nn.add(ConvLayer(2, 4, filter_size=3)) # The output is of size N_obj 4 14 14
nn.add(ReLU()) # The output is of size N_obj 4 14 14
nn.add(MaxPool2x2()) # The output is of size N_obj 4 7 7
nn.add(FlattenLayer()) # The output is of size N_obj 196
nn.add(Dense(4 * 7 * 7, 32))
nn.add(ReLU())
nn.add(Dense(32, 1))
return nn
nn = get_cnn()
loss = Hinge()
optimizer = SGD(nn)
train = list(load_mnist(dataset='training', path='.'))
train_images = np.array([im[1] for im in train])
train_targets = np.array([im[0] for im in train])
# We will train a 0 vs. 1 classifier
x_train = train_images[train_targets < 2][:1000]
y_train = train_targets[train_targets < 2][:1000]
y_train = y_train * 2 - 1
y_train = y_train.reshape((-1, 1))
x_train = x_train.astype('float32') / 255.0
x_train = x_train.reshape((-1, 1, 28, 28))
# It will train for about 5 minutes
num_epochs = 3
batch_size = 32
# We will store the results here
history = {'loss': [], 'accuracy': []}
# `num_epochs` represents the number of iterations
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
# We perform iteration a one-by-one iteration of the mini-batches
for x_batch, y_batch in iterate_minibatches(x_train, y_train, batch_size):
# Predict the target value
y_pred = nn.forward(x_batch)
# Compute the gradient of the loss
loss_grad = loss.backward(y_pred, y_batch)
# Perform backwards pass
nn.backward(x_batch, loss_grad)
# Update the params
optimizer.update_params()
# Save loss and accuracy values
history['loss'].append(loss.forward(y_pred, y_batch))
prediction_is_correct = (y_pred > 0) == (y_batch > 0)
history['accuracy'].append(np.mean(prediction_is_correct))
print()
#%%
import matplotlib.pyplot as plt
# Let's plot the results to get a better insight
plt.figure(figsize=(8, 5))
ax_1 = plt.subplot()
ax_1.plot(history['loss'], c='g', lw=2, label='train loss')
ax_1.set_ylabel('loss', fontsize=16)
ax_1.set_xlabel('#batches', fontsize=16)
ax_2 = plt.twinx(ax_1)
ax_2.plot(history['accuracy'], lw=3, label='train accuracy')
ax_2.set_ylabel('accuracy', fontsize=16)
```
#### File: UVA_AML18/week_3/flatten_layer.py
```python
from Blocks import Layer
def flatten_forward(x_input):
"""Perform the reshaping of the tensor of size `(K, L, M, N)`
to the tensor of size `(K, L*M*N)`
# Arguments
x_input: np.array of size `(K, L, M, N)`
# Output
output: np.array of size `(K, L*M*N)`
"""
K, L, M, N = x_input.shape
output = x_input.reshape(K, L*M*N)
return output
def flatten_grad_input(x_input, grad_output):
"""Calculate partial derivative of the loss with respect to the input
# Arguments
x_input: partial derivative of the loss
with respect to the output
np.array of size `(K, L*M*N)`
# Output
output: partial derivative of the loss
with respect to the input
np.array of size `(K, L, M, N)`
"""
K, L, M, N = x_input.shape
grad_input = grad_output.reshape(K, L, M, N)
return grad_input
class FlattenLayer(Layer):
def forward(self, x_input):
self.output = flatten_forward(x_input)
return self.output
def backward(self, x_input, grad_output):
output = flatten_grad_input(x_input, grad_output)
return output
``` |
{
"source": "jimi-c/receptor",
"score": 3
} |
#### File: receptor/receptor/node.py
```python
import asyncio
import logging
from .protocol import BasicProtocol, create_peer
logger = logging.getLogger(__name__)
def mainloop(receptor, ping_interval=None, loop=asyncio.get_event_loop(), skip_run=False):
config = receptor.config
if config.server.server_enable:
listener = loop.create_server(
lambda: BasicProtocol(receptor, loop),
config.server.address, config.server.port, ssl=config.get_server_ssl_context())
loop.create_task(listener)
logger.info("Serving on %s:%s", config.server.address, config.server.port)
for peer in config.peers:
loop.create_task(create_peer(receptor, loop, *peer.split(":", 1)))
if ping_interval:
ping_time = (((int(loop.time()) + 1) // ping_interval) + 1) * ping_interval
loop.call_at(ping_time, loop.create_task, send_pings_and_reschedule(receptor, loop, ping_time, ping_interval))
if not skip_run:
try:
loop.run_until_complete(receptor.shutdown_handler())
except KeyboardInterrupt:
pass
finally:
loop.stop()
async def send_pings_and_reschedule(receptor, loop, ping_time, ping_interval):
logger.debug(f'Scheduling mesh ping.')
for node_id in receptor.router.get_nodes():
await receptor.router.ping_node(node_id)
loop.call_at(ping_time + ping_interval,
loop.create_task, send_pings_and_reschedule(
receptor, loop, ping_time + ping_interval, ping_interval))
``` |
{
"source": "JimiJenneskens/sqlalchemy-challenge",
"score": 3
} |
#### File: JimiJenneskens/sqlalchemy-challenge/climate_app.py
```python
from flask import Flask, json, jsonify
import datetime as dt
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
# List all routes that are available.
@app.route("/")
def welcome():
session = Session(engine)
return (
f"Welcome to the Homepage<br/>"
f"<br/>"
f"Available routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start_date<br/>"
f"/api/v1.0/start_date/end_date/"
)
# Return the JSON representation of your dictionary
@app.route('/api/v1.0/precipitation/')
def precipitation():
session = Session(engine)
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date
last_date = dt.datetime.strptime(last_date, "%Y-%m-%d")
first_date = last_date - dt.timedelta(days=365)
last_year_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= first_date).all()
return jsonify(last_year_data)
# Return a JSON-list of stations from the dataset.
@app.route('/api/v1.0/stations/')
def stations():
session = Session(engine)
stations = session.query(Station.station).all()
return jsonify(stations)
# Return a JSON-list of Temperature Observations from the dataset.
@app.route('/api/v1.0/tobs/')
def tobs():
session = Session(engine)
stations = session.query(Measurement.station,func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
most_active_station = stations[0][0]
station_data = session.query(Measurement.date, Measurement.tobs).filter(Measurement.station == most_active_station).all()
station_data = list(np.ravel(station_data))
return jsonify(station_data)
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start date
@app.route('/api/v1.0/<start_date>/')
def calc_temps_start(start_date):
session = Session(engine)
results = session.query(func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs)).filter(Measurement.date > start_date).all()
temps = list(np.ravel(results))
return jsonify(temps)
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start-end range.
@app.route('/api/v1.0/<start_date>/<end_date>/')
def calc_temps_start_end(start_date, end_date):
session = Session(engine)
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
temps = list(np.ravel(results))
return jsonify(temps)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "JimiLab/RecSys2018",
"score": 2
} |
#### File: JimiLab/RecSys2018/DataManager.py
```python
import os
import math
import json
import time
from tqdm import tqdm
import random
from collections import defaultdict
import numpy as np
from sklearn.externals import joblib
from scipy.sparse import lil_matrix, csr_matrix
import re
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
class DataManager:
"""
Sets up Empty Data Manager object
Main data structure is three list of lists: playlist ID, track IDs
where track ID is found in the dictionary uri_to_id and id_to_uri
Each list represents the three parts of the data set: train, test, challenge
"""
def __init__(self, path, track_prior, train_size=10000, test_size=2000, challenge_file=None, min_track_prior=0.0):
self.DATA_DIR = path
self.CHALLENGE_FILE = challenge_file
self.track_prior = track_prior
self.min_track_prior = min_track_prior
self.data_cache = dict()
self.train_size = train_size
self.train = []
self.train_title = []
self.train_description = []
self.word_index_playlist = defaultdict(dict) # token -> playlist -> score
self.word_index_track = defaultdict(dict) # token -> track _ ->score
self.test_size = math.ceil(test_size/10.0)*10 # needs to be a multiple of 10
self.subtest_size = self.test_size /10
self.test = [] # all test lists are of size 10 for each of the 10 subchallanges
self.test_uri = []
self.test_truth = []
self.test_truth_uri = []
self.test_title = []
for i in range(10):
self.test.append([])
self.test_uri.append([])
self.test_truth.append([])
self.test_truth_uri.append([])
self.test_title.append([])
self.subtest_name = ["title only", "title / first",
"title / first 5", "first 5",
"title / first 10", "first 10 ",
"title / first 25", "title / random 25 ",
"title / first 100", "title / random 100" ]
self.subtest_setup = [(True, 0, True), (True, 1, True),
(True, 5, True), (False, 5, True),
(True, 10, True), (False, 10, True),
(True, 25, True), (True, 25, False),
(True, 100, True), (True, 100, False)] # (has_title, num_tracks, first_or_random)
self.challenge = []
self.challenge_title = []
self.uri_to_id = dict()
self.id_to_uri = dict()
self.track_frequency = []
self.track_timestamps = [] #list of modified timestamps for playlists in which the track appears
self.artist_to_track_id = defaultdict(list)
self.album_to_track_id = defaultdict(list)
self.pid_to_spotify_pid = []
self.X = None
self.X_test = None
self.X_test_words = None
self.X_challenge = None
self.X_challenge_words = None
self.popularity_vec = None # prior probability of track occuring on a playlist
self.prefix = "spotify:track:"
self.stemmer = PorterStemmer()
self.stop_words = set(stopwords.words('english'))
def text_process(self, str):
str = self.normalize_name(str)
tokens = word_tokenize(str)
stemmed_tokens = list()
for word in tokens:
if word not in self.stop_words:
stemmed_tokens.append(self.stemmer.stem(word))
return stemmed_tokens
def normalize_name(self, name):
name = name.lower()
name = re.sub(r"[.,\/#!$%\^\*;:{}=\_`~()@]", ' ', name)
name = re.sub(r'\s+', ' ', name).strip()
return name
def add_tokens_to_index(self, index, id, title, description):
str_lists =[[self.normalize_name(title)], self.text_process(title), self.text_process(description)]
weights = [1.0, 0.5, 0.25]
for i in range(len(str_lists)):
for t in str_lists[i]:
if t in index.keys():
if id in index[t]:
index[t][id] += weights[i]
else:
index[t][id] = weights[i]
else:
index[t] = {id : weights[i]}
def tfidf_index(self, index, num_docs, mode="ltc"):
print("Word Index Mode", mode)
#num_docs = len(index)
for term in index.keys():
idf = 1
if (mode[1] == 't'):
idf = math.log10(num_docs / len(index[term].keys()))
for id in index[term]:
tf = index[term][id]
if mode[0] == 'l':
tf = 1+ math.log10(tf)
index[term][id] = tf * idf
if tf*idf < 0:
pass
#length normalization - 2-pass algorithm - sum of squares
if mode[2] == 'c':
doc_len = defaultdict(float)
for term in index.keys():
for id in index[term].keys():
doc_len[id] += index[term][id] ** 2
for term in index.keys():
for id in index[term].keys():
index[term][id] /= math.sqrt(doc_len[id])
# check to make sure that each playlist is length 1
#check_doc_len = defaultdict(float)
#for term in self.word_index_playlist.keys():
# for pid in self.word_index_playlist[term].keys():
# check_doc_len[pid] += self.word_index_playlist[term][pid] ** 2
#pass
def _add_train_playlist(self, playlist):
pid = len(self.train)
self.train.append([])
title = playlist["name"]
self.train_title.append(title)
description = ""
if "description" in playlist:
description = playlist["description"]
self.train_description.append(description)
self.add_tokens_to_index(self.word_index_playlist, pid, title, description)
modified = playlist["modified_at"]
for track in playlist['tracks']:
track_uri = track['track_uri']
track_uri = track_uri[len(self.prefix):]
if self.track_prior[track_uri] < self.min_track_prior:
continue
# new track that has never been encountered before
if track_uri not in self.uri_to_id.keys():
tid = len(self.id_to_uri)
self.uri_to_id[track_uri] = tid
self.id_to_uri[tid] = [track['track_uri'], track['track_name'],
track['artist_uri'], track['artist_name'],
track['album_uri'], track['album_name']]
self.track_frequency.append(0)
self.track_timestamps.append(list())
self.artist_to_track_id[track['artist_uri']].append(tid)
self.album_to_track_id[track['album_uri']].append(tid)
track_id = self.uri_to_id[track_uri]
self.train[pid].append(track_id)
self.track_frequency[track_id] += 1
self.track_timestamps[track_id].append(modified)
self.add_tokens_to_index(self.word_index_track, track_id, title, description)
def _add_test_playlist(self, playlist):
subtest = random.randint(0,9)
# if subtest is already full
if len(self.test_uri[subtest]) >= self.subtest_size:
return
num_tracks = playlist["num_tracks"]
# not enough tracks to hid any tracks
# (minimum number of track holdout in challenge data set is 5)
if num_tracks - 5 <= self.subtest_setup[subtest][1]:
return
pid = len(self.test[subtest])
self.test_title[subtest].append(self.normalize_name(playlist["name"]))
uri_list = list()
for track in playlist['tracks']:
track_uri = track['track_uri']
track_uri = track_uri[len(self.prefix):]
uri_list.append(track_uri)
#random tracks from playlist
if self.subtest_setup[subtest][2] == False:
random.shuffle(uri_list)
# number of tracks in the playlist
split = self.subtest_setup[subtest][1]
self.test_uri[subtest].append(uri_list[0:split])
self.test_truth_uri[subtest].append(uri_list[split:])
pass
def load_playlist_data(self, mode='ltc'):
"""
Loads MPD JSON data files sequentially.
Create train and test list of lists where each track is
represented by internal id
if track does not appear in training set, it is represented with
an id = -1 in the test set playlist list
Args:
None
Returns:
None
"""
total_size = self.train_size+self.test_size
train_test_ratio = self.test_size / total_size
num_files_to_load = 1000
# num_files_to_load = math.ceil(total_size / 1000)+1
train_done = False
test_done = False
pbar = tqdm(total=self.train_size)
pbar.write('~~~~~~~ LOADING PLAYLIST DATA ~~~~~~~')
for file in os.listdir(self.DATA_DIR)[:num_files_to_load]:
if train_done and test_done:
break
if not file.startswith("mpd.slice"):
continue
data = json.load(open(self.DATA_DIR + file))
for playlist in data['playlists']:
# break if we have enough data
if train_done and test_done:
break
is_train = random.uniform(0, 1) > train_test_ratio
# POTENTIAL DATA LEAKER - Once training is full, everything else can be a test playlist
# skip playlist if we have already loaded enough of them for either train or test
if is_train and train_done:
is_train = False
if not is_train and test_done:
continue
if is_train:
self._add_train_playlist(playlist)
train_done = len(self.train) >= self.train_size
if train_done:
pass
pbar.update(1)
else:
self._add_test_playlist(playlist)
test_done = True
for i in range(10):
if len(self.test_uri[i]) < self.subtest_size:
test_done = False
break
pbar.close()
# TODO: need to explore variants of TF-IDF
self.tfidf_index(self.word_index_playlist, len(self.train), mode=mode)
self.tfidf_index(self.word_index_track, len(self.id_to_uri), mode=mode)
# resolve test playlist against training track corpus
# set unknown tracks to have id < 0 (e.g., -1, -2, -3, ...
for s in range(10):
miss_idx = -1
for p in range(len(self.test_uri[s])):
self.test[s].append([])
self.test_truth[s].append([])
for uri in self.test_uri[s][p]:
if uri not in self.uri_to_id.keys():
self.test[s][p].append(-1)
else:
self.test[s][p].append(self.uri_to_id[uri])
for uri in self.test_truth_uri[s][p]:
if uri not in self.uri_to_id.keys():
self.test_truth[s][p].append(miss_idx)
miss_idx -= 1
else:
self.test_truth[s][p].append(self.uri_to_id[uri])
return
def load_challenge_data(self):
data = json.load(open(self.CHALLENGE_FILE))
pbar = tqdm(total=10000)
pbar.write('~~~~~~~ LOADING PLAYLIST DATA ~~~~~~~')
for playlist in data['playlists']:
self.pid_to_spotify_pid.append(playlist['pid'])
if 'name' in playlist:
self.challenge_title.append(self.normalize_name(playlist['name']))
else:
self.challenge_title.append("")
track_ids = list()
for track in playlist['tracks']:
track_uri = track['track_uri']
track_uri = track_uri[len(self.prefix):]
if track_uri not in self.uri_to_id.keys():
track_ids.append(-1)
else:
track_ids.append(self.uri_to_id[track_uri])
self.challenge.append(track_ids)
pbar.update(1)
self.challenge_size = len(self.challenge)
pbar.close()
def pickle_data(self, filename):
# Use file handle to ensure file exists upon serialization
with open(filename, 'wb') as file:
joblib.dump(self, file)
def create_train_matrix(self):
print(" - train matrix")
num_rows = len(self.train)
num_cols = len(self.id_to_uri)
self.X = lil_matrix((num_rows, num_cols), dtype=np.int8)
for p in range(num_rows):
if p % 10000 == 0:
print(p, " of ", num_rows)
for t in self.train[p]:
self.X[p, t] = 1
self.X = self.X.tocsr()
def create_test_top_track_matrix(self):
print(" - test top tracks from artist and album matrix")
num_subtest = len(self.test)
num_rows = len(self.test[0])
num_cols = len(self.id_to_uri)
self.X_test_top_tracks = list()
# BUG HERE Make this 0 to num_subtest
for s in range(0,num_subtest):
mat = lil_matrix((num_rows, num_cols), dtype=np.int8)
for p in range(num_rows):
for track_id in self.test[s][p]:
if track_id >= 0:
artist_uri = self.id_to_uri[track_id][2]
for top_track_id in self.artist_top_tracks[artist_uri]:
if track_id != top_track_id:
mat[p, top_track_id] = 1
album_uri = self.id_to_uri[track_id][4]
for top_track_id in self.album_top_tracks[album_uri]:
if track_id != top_track_id:
mat[p, top_track_id] = 1
self.X_test_top_tracks.append(mat.tocsc())
def create_challenge_top_track_matrix(self):
print(" - challenge top tracks from artist and album matrix")
num_rows = len(self.challenge)
num_cols = len(self.id_to_uri)
mat = lil_matrix((num_rows, num_cols), dtype=np.int8)
for p in range(num_rows):
for track_id in self.challenge[p]:
if track_id >= 0:
artist_uri = self.id_to_uri[track_id][2]
for top_track_id in self.artist_top_tracks[artist_uri]:
if track_id != top_track_id:
mat[p, top_track_id] = 1
album_uri = self.id_to_uri[track_id][4]
for top_track_id in self.album_top_tracks[album_uri]:
if track_id != top_track_id:
mat[p, top_track_id] = 1
self.X_challenge_top_tracks= mat.tocsc()
def create_test_matrix(self):
print(" - test matrix")
num_subtest = len(self.test)
num_rows = len(self.test[0])
num_cols = len(self.id_to_uri)
self.X_test = list()
for s in range(num_subtest):
mat = lil_matrix((num_rows, num_cols), dtype=np.int8)
for p in range(num_rows):
for t in self.test[s][p]:
if t >= 0 :
mat[p,t] = 1
self.X_test.append(mat)
return
def create_challenge_matrix(self):
print(" - challenge matrix")
num_rows = len(self.challenge)
num_cols = len(self.id_to_uri)
self.X_challenge = lil_matrix((num_rows, num_cols), dtype=np.int8)
for p in range(num_rows):
for t in self.challenge[p]:
if t >= 0:
self.X_challenge[p, t] = 1
def calculate_popularity(self, top_k = 5):
print("Calculating Track Prior Proabability, Top Artist Tracks, and Top Album Tracks ")
self.popularity_vec = np.array(self.track_frequency) / self.train_size
self.artist_top_tracks = defaultdict(list)
for k,v in self.artist_to_track_id.items():
track_pops = self.popularity_vec[v]
idx = np.argsort(1 / track_pops)[0:min(top_k, len(track_pops))].tolist() #sort artist track by popularity
for i in idx:
self.artist_top_tracks[k].append(v[i])
self.album_top_tracks = defaultdict(list)
for k, v in self.album_to_track_id.items():
track_pops = self.popularity_vec[v]
idx = np.argsort(1 / track_pops)[0:min(top_k, len(track_pops))].tolist() # sort artist track by popularity
for i in idx:
self.album_top_tracks[k].append(v[i])
def create_test_word_matrix_by_playlist_neighbors(self):
print(" - test title and description word matrix by playlist neighbors:")
num_subtest = len(self.test)
num_rows = len(self.test[0])
num_cols = len(self.id_to_uri)
self.X_test_words = list()
pbar = tqdm(total=num_subtest)
for s in range(0,num_subtest):
mat = csr_matrix((num_rows, num_cols), dtype="float32")
for p in range(num_rows):
tokens = self.text_process(self.test_title[s][p])
if len(tokens) > 1: # add complete title as search token
tokens.append(self.normalize_name(self.test_title[s][p]))
if len(tokens) == 0:
continue
query_token_score = 1/math.sqrt(len(tokens))
scores = defaultdict(float)
for token in tokens:
if token in self.word_index_playlist.keys():
for pid in self.word_index_playlist[token]:
scores[pid] += self.word_index_playlist[token][pid] * query_token_score
#average playlist vectors for all playlists with matching terms
temp_mat = self.X[list(scores.keys()), :].todense()
temp_score = np.array(list(scores.values()))
temp_vec = np.sum(np.multiply(temp_mat.T, temp_score).T, axis=0) /(1+math.log(1+len(scores)))
# denominator is is used to scale the output so that the maximum value is close to 1
mat[p, :] = temp_vec
self.X_test_words.append(mat)
pbar.update(1)
print("done.")
def create_test_word_matrix_by_track_index(self):
print(" - test title and description word matrix by track index:")
num_subtest = len(self.test)
num_rows = len(self.test[0])
num_cols = len(self.id_to_uri)
self.X_test_words = list()
pbar = tqdm(total=num_subtest)
for s in range(0,num_subtest):
mat = lil_matrix((num_rows, num_cols), dtype="float32")
for p in range(num_rows):
tokens = self.text_process(self.test_title[s][p])
if len(tokens) > 1: # add complete title as search token
tokens.append(self.normalize_name(self.test_title[s][p]))
if len(tokens) == 0:
continue
query_token_score = 1/math.sqrt(len(tokens))
for token in tokens:
if token in self.word_index_track.keys():
for tid in self.word_index_track[token]:
mat[p,tid] += self.word_index_track[token][tid] * query_token_score
self.X_test_words.append(mat.tocsr())
pbar.update(1)
print("done.")
def create_challenge_word_matrix_by_playlist_neighbors(self):
print(" - challenge title and description word matrix")
num_rows = len(self.challenge)
num_cols = len(self.id_to_uri)
mat = csr_matrix((num_rows, num_cols), dtype="float32")
pbar = tqdm(total=num_rows)
for p in range(num_rows):
tokens = self.text_process(self.challenge_title[p])
query_token_score = 1 / math.sqrt(max(1,len(tokens)))
scores = defaultdict(float)
for token in tokens:
if token in self.word_index_playlist.keys():
for pid in self.word_index_playlist[token]:
scores[pid] += self.word_index_playlist[token][pid] * query_token_score
# average playlist vectors for all playlists with matching terms
temp_mat = self.X[list(scores.keys()), :].todense()
temp_score = np.array(list(scores.values()))
temp_vec = np.sum(np.multiply(temp_mat.T, temp_score).T, axis=0) / (1 + math.log(1 + len(scores)))
# denominator is is used to scale the output so that the maximum value is close to 1
mat[p, :] = temp_vec
pbar.update(1)
pbar.close()
self.X_challenge_words = mat
def create_challenge_word_matrix_by_track_index(self):
print(" - challenge title and description word matrix by track index:")
num_rows = len(self.challenge)
num_cols = len(self.id_to_uri)
mat = lil_matrix((num_rows, num_cols), dtype="float32")
pbar = tqdm(total=num_rows)
for p in range(num_rows):
pbar.update(1)
# REMOVE LATER: don't compute word matrix for last 5 subchallenges sets
#if p > 5000:
# continue
tokens = self.text_process(self.challenge_title[p])
if len(tokens) > 1: # add complete title as search token
tokens.append(self.normalize_name(self.challenge_title[p]))
if len(tokens) == 0:
continue
query_token_score = 1/math.sqrt(len(tokens))
for token in tokens:
if token in self.word_index_track.keys():
for tid in self.word_index_track[token]:
mat[p,tid] += self.word_index_track[token][tid] * query_token_score
self.X_challenge_words = mat.tocsr()
pbar.close()
print("done.")
def create_matrices(self ):
self.create_train_matrix()
self.create_test_matrix()
#self.create_test_word_matrix_by_playlist_neighbors()
self.create_test_word_matrix_by_track_index()
self.create_test_top_track_matrix()
if self.CHALLENGE_FILE is not None:
self.create_challenge_matrix()
#self.create_challenge_word_matrix_by_playlist_neighbors()
self.create_challenge_word_matrix_by_track_index()
self.create_challenge_top_track_matrix()
# END OF CLASS
def calculate_track_priors(path, pickle_file):
prefix = "spotify:track:"
playlist_count = 0
track_prior = defaultdict(float)
for file in os.listdir(path):
print(file)
data = json.load(open(path + file))
for playlist in data['playlists']:
playlist_count += 1
for track in playlist['tracks']:
track_uri = track['track_uri']
track_uri = track_uri[len(prefix):]
track_prior[track_uri] += 1.0
for k in track_prior.keys():
track_prior[k] /= playlist_count
joblib.dump(track_prior, pickle_file)
return track_prior
def load_data(train_size=10000, test_size=2000, load_challenge=False, create_matrices=False, generate_data=False,
create_pickle_file=True, mode="ltc", min_track_prior= 0.0):
""" Fixed Path Names """
data_folder = os.path.join(os.getcwd(), 'data/mpd.v1/data/')
challenge_file = os.path.join(os.getcwd(), 'data/challenge.v1/challenge_set.json')
pickle_folder = os.path.join(os.getcwd(), 'data/pickles/')
c_str = ""
c_file = None
if load_challenge:
c_str = "_with_challenge"
c_file = challenge_file
m_str = ""
if create_matrices:
m_str = "_with_matrices"
pickle_file = pickle_folder + "MPD_" + str(math.floor(train_size/1000.0)) + "KTrain_" + \
str(math.floor(test_size / 1000.0)) + \
"KTest" + c_str + m_str + ".pickle"
pickle_exists = os.path.isfile(pickle_file)
if generate_data or not pickle_exists:
track_prior_pickle_file = pickle_folder + "track_prior.pickle"
if os.path.isfile(track_prior_pickle_file):
print("Loading Track Priors")
track_prior = joblib.load(track_prior_pickle_file)
else:
print("Calculating Track Priors")
track_prior = calculate_track_priors(data_folder, track_prior_pickle_file)
d = DataManager(data_folder, track_prior, train_size=train_size, test_size=test_size, challenge_file=c_file,
min_track_prior=min_track_prior)
print("Load Playlist Data")
d.load_playlist_data(mode=mode)
d.calculate_popularity()
if load_challenge:
print("Load Challenge Set Data")
d.load_challenge_data()
if create_matrices:
print("Calculate Numpy Matrices")
d.create_matrices()
if create_pickle_file:
print("Pickle Data into file: "+pickle_file)
d.pickle_data(pickle_file)
else:
print("Load data from Pickle File: "+pickle_file)
d = joblib.load(pickle_file)
return d
if __name__ == '__main__':
generate_data_arg = True # True - load data for given parameter settings
# False - only load data if pickle file doesn't already exist
train_size_arg = 1000 # number of playlists for training
test_size_arg = 1000 # number of playlists for testing
load_challenge_arg = False # loads challenge data when creating a submission to contest
create_matrices_arg = True # creates numpy matrices for train, test, and (possibly) challenge data
create_pickle_file_arg = True #takes time to create pickle file
text_index_mode_arg = "ntc"
min_track_prior_arg = 0.0002
data_in = load_data(train_size_arg, test_size_arg, load_challenge_arg, create_matrices_arg,
generate_data_arg, create_pickle_file_arg, text_index_mode_arg, min_track_prior_arg)
pass
```
#### File: JimiLab/RecSys2018/predict_with_LSA.py
```python
from sklearn.decomposition import TruncatedSVD
from predict import *
from DataManager import load_data
import math
import numpy as np
np.seterr(divide='ignore', invalid='ignore') # Z-score divide by zero is handled
from tqdm import tqdm
import os
from metrics import get_all_metrics
from scipy.sparse import lil_matrix, csc_matrix
from scipy.stats import zscore
import random
from sklearn.externals import joblib
from bayes_opt import BayesianOptimization
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
class PredictWithLSA:
def __init__(self, data, num_components=64, lsa_min_track_prior=0.0):
"""
:param data:
:param num_components:
:param lsa_min_track_prior: minimum proportion of playlists that a tracks has to appear in to be used for SVD
projection. Default is 0.0, but a good baseline is 0.0002 or 0.02% of playlists. This retains about 22.5% of tracks.
"""
# Call init on super class
Predict.__init__(self)
self.d = data # DataManager Object
self.num_components = num_components
self.num_predictions = 500
self.svd = TruncatedSVD(n_components=self.num_components)
self.min_track_prior = lsa_min_track_prior
self.lsa_track_mask = np.extract(self.d.popularity_vec > self.min_track_prior,
np.arange(0,self.d.popularity_vec.shape[0]))
self.lsa_id_to_column = dict()
for i in range(len(self.lsa_track_mask)):
self.lsa_id_to_column[self.lsa_track_mask[i]] = i
def learn_model(self):
if not hasattr(self.d, 'X'):
print("Pickle File does not have pre-computed numpy X matrix. Aborting")
return
print("Learning LSA model...", end="")
self.svd.fit(self.d.X[:, self.lsa_track_mask])
print("done.")
def predict_from_matrices(self, X, pop_vec, X_top_tracks, X_words, weights, z_score=True, random_baseline=False):
#return np.random.randint(0, 10000 ,size=(X.shape[0], self.num_predictions))
embedded_test_vecs = self.svd.transform(X[:, self.lsa_track_mask])
lsa_vecs_hat_compressed = self.svd.inverse_transform(embedded_test_vecs)
if z_score:
lsa_vecs_hat_compressed = zscore(lsa_vecs_hat_compressed, axis=1, ddof=1)
np.nan_to_num(lsa_vecs_hat_compressed, copy=False)
lsa_vecs_hat = csc_matrix(X.shape, dtype="float32")
lsa_vecs_hat[:, self.lsa_track_mask] = lsa_vecs_hat_compressed
# linear combination of LSA score, popularity, and top tracks from artist and album
test_vecs_hat = weights[0] * lsa_vecs_hat + \
weights[1] * pop_vec + \
weights[2] * X_top_tracks + \
weights[3] * X_words
# effectively remove known tracks that already appear in the test playlists by given large negative weight
test_vecs_hat = test_vecs_hat - X * 99999999
test_rank = np.argsort(-1 * test_vecs_hat, axis=1)
if random_baseline: # Change to True for Random Baseline
np.random.shuffle(test_rank.T)
return test_rank[:, 0:self.num_predictions]
def predict_from_words(self, mat):
test_rank = np.argsort(-1 * mat.todense(), axis=1)
return test_rank[:, 0:self.num_predictions]
def predict_playlists(self, weights, z_score=False, random_baseline=False):
""" weights = (lsa_weight, popularity_weight, related_tracks_weight, word_weight)
weights can either be a tuple with 4 weights, or a list of 10 tuples of 4 weights each
"""
#print("\nStarting playlist prediction...")
print("Weights (LSA, Pop, Related Track, Title Words):", weights )
num_subtest = len(self.d.test)
num_playlists = len(self.d.test[0])
metric_names = ["r_prec", "ndcg", "clicks"]
num_metrics = len(metric_names)
results = np.zeros((num_subtest,num_playlists, num_metrics), dtype=float)
# create all popularity vecs so that 1st place is pop of 1.0
pop_vec = self.d.popularity_vec / np.max(self.d.popularity_vec)
#pbar = tqdm(total=num_subtest)
#pbar.write('~~~~~~~ Predicting Playlists ~~~~~~~')
for st in range(num_subtest):
if type(weights) == list:
w = weights[st]
else:
w = weights
test_rank = self.predict_from_matrices(self.d.X_test[st].tocsc(),
pop_vec,
self.d.X_test_top_tracks[st],
self.d.X_test_words[st],
w)
#test_rank = self.predict_from_words(self.d.X_test_words[st])
for pl in range(num_playlists):
rank_list = test_rank[pl,:].tolist()[0]
result = get_all_metrics(self.d.test_truth[st][pl], rank_list, self.num_predictions)
results[st][pl] = np.array(result)
# ignores test set songs not found in training set
#pbar.update(1)
#pbar.close()
average_result = np.mean(results, axis=1)
print("Number Training Playlists and Tracks:", self.d.X.shape)
print("Min Track Prior ", self.d.min_track_prior)
print("LSA dims: ", self.num_components)
print("LSA Track Corpus Size:", self.lsa_track_mask.size, "(LSA min track prior =", self.min_track_prior,")")
print()
self.print_subtest_results(self.d.subtest_name, metric_names, average_result)
print()
self.print_overall_results(metric_names, np.mean(average_result, axis=0))
return average_result
def predict_playlists_bayes(self, st, w0, w1, w2, w3):
st = int(st) # repace this later
num_playlists = len(self.d.test[0])
metric_names = ["r_prec", "ndcg", "clicks"]
num_metrics = len(metric_names)
results = np.zeros((num_playlists, num_metrics), dtype=float)
# create all popularity vecs so that 1st place is pop of 1.0
pop_vec = self.d.popularity_vec / np.max(self.d.popularity_vec)
w = (w0, w1, w2, w3)
test_rank = self.predict_from_matrices(self.d.X_test[st].tocsc(),
pop_vec,
self.d.X_test_top_tracks[st],
self.d.X_test_words[st],
w)
for pl in range(num_playlists):
rank_list = test_rank[pl, :].tolist()[0]
result = get_all_metrics(self.d.test_truth[st][pl], rank_list, self.num_predictions)
results[pl] = np.array(result)
average_result = np.mean(results, axis=0)
return average_result[1]
def generate_submission(self, filepath, weights, z_score=False):
print("Encoding and Recoding Challenge Set Matrix")
f = open(filepath, 'w')
f.write("team_info,main,JimiLab,<EMAIL>\n")
num_subtest = 10
num_playlists = len(self.d.challenge) # 10000
subtest_size = int(len(self.d.challenge) / num_subtest) # 1000
#rank = np.zeros(num_playlists, self.num_predictions)
# create all popularity vecs so that 1st place is pop of 1.0
pop_vec = self.d.popularity_vec / np.max(self.d.popularity_vec)
pbar = tqdm(total=num_subtest)
pbar.write('~~~~~~~ Generating Ranks by Subchallenge ~~~~~~~')
for i in range(num_subtest):
start = i*subtest_size
end = start+subtest_size
if type(weights) == list:
w = weights[i]
else:
w = weights
rank = self.predict_from_matrices(self.d.X_challenge[start:end, :].tocsc(),
pop_vec,
self.d.X_challenge_top_tracks[start:end, :],
self.d.X_challenge_words[start:end, :],
w)
(num_rows, num_columns) = rank.shape
for pid in range(num_rows):
spotify_pid = self.d.pid_to_spotify_pid[start+pid]
f.write(str(spotify_pid))
for tid in range(num_columns):
track_id = rank[pid, tid]
f.write("," + str(self.d.id_to_uri[track_id][0]))
f.write("\n")
pbar.update(1)
pbar.close()
f.close()
if __name__ == '__main__':
""" Parameters for Loading Data """
generate_data_arg = True # True - load data for given parameter settings
# False - only load data if pickle file doesn't already exist
create_pickle_file_arg = True #create a pickle file
train_size_arg = 500000 # number of playlists for training
test_size_arg = 5000 # number of playlists for testing
load_challenge_arg = True # loads challenge data when creating a submission to contest
create_matrices_arg = True # creates numpy matrices for train, test, and (possibly) challenge dat (should always be True)
random_baseline_arg = False # set to true if you want to run random baseline
min_track_prior_arg = 0.0001
text_index_text_mode_arg = "ntn"
num_components_arg = 128
lsa_min_track_prior_arg = 0.0002 # minimum prior probability needed to keep track in LSA training matrix size (default 0.0002 or 2 / 10000 playlists
lsa_zscore_arg = True # zscore the output of the LSA weight after embedding and projecting back into the original space
lsa_weight_arg = .4 # weight of LSA in linear combination
popularity_weight_arg = 0.0001 # set to 0 for no popularity bias, set to 1 for popularity baseline
related_track_weight_arg = .4 # weight for top tracks from albums and artists already in the playlist
words_weight_arg = .2
weights = (lsa_weight_arg, popularity_weight_arg, related_track_weight_arg, words_weight_arg)
a = [(0.4, 0.3, 0.1, 0.2), #100 per subtest
(0.3, 0.1, 0.2, 0.4),
(0.5, 0.0, 0.3, 0.2),
(0.4, 0.0, 0.4, 0.2),
(0.4, 0.0, 0.4, 0.2),
(0.5, 0.0, 0.3, 0.2),
(0.5, 0.0, 0.3, 0.2),
(0.4, 0.0, 0.6, 0.0),
(0.8, 0.0, 0.0, 0.2),
(0.4, 0.0, 0.6, 0.0)]
b = [(0.6, 0.15, 0.2, 0.05), # 400 per subtest
(0.25, 0.0001, 0.6, 0.15),
(0.8, 0.0001, 0.01, 0.16),
(0.25, 0.0001, 0.6, 0.15),
(0.7, 0.2, 0.01, 0.09),
(0.25, 0.0001, 0.6, 0.15),
(0.8, 0.0001, 0.01, 0.19),
(0.6, 0.15, 0.2, 0.05),
(0.6, 0.15, 0.2, 0.05),
(0.95, 0.0, 0.01, 0.04)]
c = [(1, 1, 0.47, 0.35), #optimized
(0.5, 0.2, 0.88, 0.37),
(1, 1, 1, 0.25),
(1, 0, 0.61, 0.52),
(0.14, 0.0025, 0.7121, 0.058),
(0.63, 0.1, 0.8, 0.15),
(0.79, 0.41, 0.54, 0.13),
(0.49, 0, 1, 0),
(0.63, 0.99, 0.99, 0.04),
(0.61, 0, 1, 0)]
dd = [(0.35, 0.35, 0.17, 0.12), #optimized and normalized
(0.26, 0.10, 0.45, 0.19),
(0.31, 0.31, 0.31, 0.08),
(0.47, 0.00, 0.29, 0.24),
(0.15, 0.00, 0.78, 0.06),
(0.38, 0.06, 0.48, 0.09),
(0.42, 0.22, 0.29, 0.07),
(0.33, 0.00, 0.67, 0.00),
(0.24, 0.37, 0.37, 0.02),
(0.38, 0.00, 0.62, 0.00)]
e = [(0.0, 0.3, 0.1, 0.2), # doug guess
(0.3, 0.1, 0.2, 0.4),
(0.5, 0.0, 0.3, 0.2),
(0.5, 0.0, 0.5, 0.0),
(0.4, 0.0, 0.4, 0.2),
(0.5, 0.0, 0.5, 0.0),
(0.5, 0.0, 0.3, 0.1),
(0.5, 0.0, 0.4, 0.1),
(0.5, 0.0, 0.4, 0.1),
(0.5, 0.0, 0.4, 0.1)]
f = [(0.0, 0.5, 0.3, 0.2), # doug guess simplfied
(0.3, 0.1, 0.2, 0.4),
(0.5, 0.0, 0.3, 0.2),
(0.5, 0.0, 0.5, 0.0),
(0.4, 0.0, 0.4, 0.2),
(0.5, 0.0, 0.5, 0.0),
(0.6, 0.0, 0.4, 0.0),
(0.6, 0.0, 0.4, 0.0),
(0.6, 0.0, 0.4, 0.0),
(0.6, 0.0, 0.4, 0.0)]
fiveK_weights = [(0.48270270281836813, 0.7448876242714548, 0.8873458428769633, 0.15564998404090447),
(0.6665980154381933, 0.9053823615161176, 0.4117130073449573, 0.2148710518378656),
(0.8827692081275599, 0.5576141929834891, 0.49192775259341104, 0.2999736449122169),
(0.8800370593956184, 0.7937380143368223, 0.8841046630093821, 0.34700353058398903),
(0.5274603443643752, 0.07455477305947611, 0.1880354271110969, 0.03071420816074444),
(0.6307804397623651, 0.27749035743731953, 0.7761038220705893, 0.06690470605221444),
(0.9193785447942945, 0.6314566605491208, 0.716798086280039, 0.13545127867094608),
(0.5828181810021488, 0.970491938366122, 0.7521723287576919, 0.02099917789974426),
(0.6775291332800575, 0.5180995363786292, 0.7337840488893119, 0.029505250640784464),
(0.9999999982720098, 2.158247870415833e-09, 1.0, 0.0)]
super_weights = [dd]
submission_file_arg = os.path.join(os.getcwd(), 'data/submissions/lsa_test_June28_600K.csv')
print("Starting Program")
d = load_data(train_size_arg, test_size_arg, load_challenge_arg, create_matrices_arg,
generate_data_arg, create_pickle_file_arg, text_index_text_mode_arg, min_track_prior_arg)
lsa = PredictWithLSA(d, num_components=num_components_arg, lsa_min_track_prior=lsa_min_track_prior_arg)
lsa.learn_model()
if False:
weight_arr = [(.25, .25, .25, .25),
(1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0,0,0,1),
(.7, .1, .1, .1), (.1, .7, .1, .1),(.1, .1, .7, .1),(.1, .1, .1, .7),
(.4, .1, .1, .4), (.4, .1, .4, .1), (.4, .4, .1, .1), (.1, .4, .4, .1),
(.1, .4, .1, .4), (.1, .1, .1, .4),
(.3, .3, .3, .1), (.3, .3, .1, .3), (.3, .1, .3, .3), (.1, .3, .3, .3)]
for weights in weight_arr:
lsa.predict_playlists(weights, z_score=lsa_zscore_arg)
if False:
filename = "data/pickles/test_results50.pickle"
num_trials = 25
weight_options = [.0, .0001, .001, .01, .05, 0.1, 0.15, .2, .25, .3, .4, .5, .6, .7, .75, .8, .85, .9, .95, .99, .999, .9999, 1.0]
weight_list = []
results = np.zeros((10, 3, num_trials), dtype=float)
best_weights = list()
for i in range(num_trials):
lsa_w = random.choice(weight_options)
while (True):
pop_w = random.choice(weight_options)
if pop_w + lsa_w <= 1.0:
break
while (True):
rt_w = random.choice(weight_options)
if pop_w + lsa_w + rt_w <= 1.0:
break
weights = (max(0.0, lsa_w), max(0.0, pop_w), max(0.0, rt_w), max(0.0, 1-lsa_w-pop_w-rt_w) )
print("\nTrial: ", i)
results[:, :, i] = lsa.predict_playlists(weights, z_score=lsa_zscore_arg)
weight_list.append(weights)
joblib.dump([weight_list, results], filename)
ncdg = results[:, 1, :]
top_score = -1 * np.sort(-1 * ncdg, axis=1)[:, 0]
top_idx = np.argsort(-1 * ncdg, axis=1)[:, 0]
for i in range(top_idx.shape[0]):
print(i, top_idx[i], top_score[i], weight_list[top_idx[i]])
# print(weight_list[top_idx[i]])
best_weights.append(weight_list[top_idx[i]])
print(np.mean(top_score))
print("Results with Best Weights:")
lsa.predict_playlists(best_weights, z_score=lsa_zscore_arg, random_baseline=random_baseline_arg)
print(best_weights)
if True: #Bayesian Optimation:
best_weights = list()
#ncdg = lsa.predict_playlists_bayes(0, .33, .33, .33)
for st in range(10):
bo = BayesianOptimization(lsa.predict_playlists_bayes, {'st':(st,st),
'w0': (0,1), 'w1':(0,1),
'w2':(0,1), 'w3':(0,1)})
bo.maximize(init_points=20, n_iter=5, acq='ucb', kappa=5)
print(bo.res['max'])
d = bo.res['max']
p = d['max_params']
best_weights.append((p['w0'], p['w1'], p['w2'], p['w3']))
lsa.predict_playlists(best_weights, z_score=lsa_zscore_arg, random_baseline=random_baseline_arg)
print(best_weights)
if False:
for sub_weights in super_weights:
print(sub_weights)
lsa.predict_playlists(sub_weights, z_score=lsa_zscore_arg, random_baseline=random_baseline_arg)
if load_challenge_arg:
#best_weights = [(0.48270270281836813, 0.7448876242714548, 0.8873458428769633, 0.15564998404090447), (0.6665980154381933, 0.9053823615161176, 0.4117130073449573, 0.2148710518378656), (0.8827692081275599, 0.5576141929834891, 0.49192775259341104, 0.2999736449122169), (0.8800370593956184, 0.7937380143368223, 0.8841046630093821, 0.34700353058398903), (0.5274603443643752, 0.07455477305947611, 0.1880354271110969, 0.03071420816074444), (0.6307804397623651, 0.27749035743731953, 0.7761038220705893, 0.06690470605221444), (0.9193785447942945, 0.6314566605491208, 0.716798086280039, 0.13545127867094608), (0.5828181810021488, 0.970491938366122, 0.7521723287576919, 0.02099917789974426), (0.6775291332800575, 0.5180995363786292, 0.7337840488893119, 0.029505250640784464), (0.9999999982720098, 2.158247870415833e-09, 1.0, 0.0)]
#lsa.predict_playlists(best_weights, z_score=lsa_zscore_arg, random_baseline=random_baseline_arg)
print("Generating Submission file:", submission_file_arg)
lsa.generate_submission(submission_file_arg, best_weights, z_score=lsa_zscore_arg)
print("done")
``` |
{
"source": "jimilee/image-classification",
"score": 2
} |
#### File: image-classification/models/conformer.py
```python
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from .layers import MLP, DropPath, trunc_normal_
class Attention(nn.Module):
def __init__(self, dim, head):
super().__init__()
self.head = head
self.scale = (dim // head) ** -0.5
self.qkv = nn.Linear(dim, dim*3)
self.proj = nn.Linear(dim, dim)
def forward(self, x: Tensor) -> Tensor:
B, N, C = x.shape
q, k, v = self.qkv(x).reshape(B, N, 3, self.head, C // self.head).permute(2, 0, 3, 1, 4)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x
class Block(nn.Module):
def __init__(self, dim, head, dpr=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = Attention(dim, head)
self.drop_path = DropPath(dpr) if dpr > 0. else nn.Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = MLP(dim, int(dim*4))
def forward(self, x: Tensor) -> Tensor:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConvBlock(nn.Module):
def __init__(self, c1, c2, s=1, res_conv=False):
super().__init__()
ch = c2 // 4
self.res_conv = res_conv
self.conv1 = nn.Conv2d(c1, ch, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(ch)
self.conv2 = nn.Conv2d(ch, ch, 3, s, 1, bias=False)
self.bn2 = nn.BatchNorm2d(ch)
self.conv3 = nn.Conv2d(ch, c2, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(c2)
self.act = nn.ReLU()
if self.res_conv:
self.residual_conv = nn.Conv2d(c1, c2, 1, s, 0, bias=False)
self.residual_bn = nn.BatchNorm2d(c2)
def zero_init_last_bn(self):
nn.init.zeros_(self.bn3.weight)
def forward(self, x: Tensor, x_t: Tensor = None, return_x2=True):
residual = x
x = self.act(self.bn1(self.conv1(x)))
x = self.conv2(x) if x_t is None else self.conv2(x+x_t)
x2 = self.act(self.bn2(x))
x = self.bn3(self.conv3(x2))
if self.res_conv:
residual = self.residual_bn(self.residual_conv(residual))
x += residual
x = self.act(x)
if return_x2:
return x, x2
return x
class FCUDown(nn.Module):
def __init__(self, c1, c2, dw_stride):
super().__init__()
self.conv_project = nn.Conv2d(c1, c2, 1, 1, 0)
self.sample_pooling = nn.AvgPool2d(dw_stride, dw_stride)
self.ln = nn.LayerNorm(c2)
self.act = nn.GELU()
def forward(self, x, x_t):
x = self.conv_project(x)
x = self.sample_pooling(x).flatten(2).transpose(1, 2)
x = self.ln(x)
x = self.act(x)
x = torch.cat([x_t[:, 0][:, None, :], x], dim=1)
return x
class FCUUp(nn.Module):
def __init__(self, c1, c2, up_stride):
super().__init__()
self.up_stride = up_stride
self.conv_project = nn.Conv2d(c1, c2, 1, 1, 0)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.ReLU()
def forward(self, x: Tensor, H, W) -> Tensor:
B, _, C = x.shape
x = x[:, 1:].transpose(1, 2).reshape(B, C, H, W)
x = self.act(self.bn(self.conv_project(x)))
x = F.interpolate(x, size=(H*self.up_stride, W*self.up_stride))
return x
class ConvTransBlock(nn.Module):
def __init__(self, c1, c2, res_conv, stride, dw_stride, embed_dim, head=12, dpr=0., last_fusion=False):
super().__init__()
expansion = 4
self.dw_stride = dw_stride
self.cnn_block = ConvBlock(c1, c2, stride, res_conv)
if last_fusion:
self.fusion_block = ConvBlock(c2, c2, 2, True)
else:
self.fusion_block = ConvBlock(c2, c2)
self.squeeze_block = FCUDown(c2//expansion, embed_dim, dw_stride)
self.expand_block = FCUUp(embed_dim, c2//expansion, dw_stride)
self.trans_block = Block(embed_dim, head, dpr)
def forward(self, x, x_t):
x, x2 = self.cnn_block(x)
_, _, H, W = x2.shape
x_st = self.squeeze_block(x2, x_t)
x_t = self.trans_block(x_st+x_t)
x_t_r = self.expand_block(x_t, H//self.dw_stride, W//self.dw_stride)
x = self.fusion_block(x, x_t_r, return_x2=False)
return x, x_t
conformer_settings = {
'T': [1, 384, 6, 0.1], # [channel_ratio, embed_dim, head, dpr]
'S': [4, 384, 6, 0.2],
'B': [6, 576, 9, 0.3]
}
class Conformer(nn.Module): # this model works with any image size, even non-square image size
def __init__(self, model_name: str = 'S', pretrained: str = None, num_classes: int = 1000, *args, **kwargs) -> None:
super().__init__()
assert model_name in conformer_settings.keys(), f"Conformer model name should be in {list(conformer_settings.keys())}"
channel_ratio, embed_dim, head, drop_path_rate = conformer_settings[model_name]
depth = 12
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# Stem
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.act = nn.ReLU()
self.maxpool = nn.MaxPool2d(3, 2, 1)
# Stage1
stage1_channel = int(64*channel_ratio)
self.conv_1 = ConvBlock(64, stage1_channel, res_conv=True)
self.trans_patch_conv = nn.Conv2d(64, embed_dim, 4, 4, 0)
self.trans_1 = Block(embed_dim, head, dpr[0])
# Stage2-4
self.conv_trans_2 = ConvTransBlock(stage1_channel, stage1_channel, False, 1, 4, embed_dim, head, dpr[1])
self.conv_trans_3 = ConvTransBlock(stage1_channel, stage1_channel, False, 1, 4, embed_dim, head, dpr[2])
self.conv_trans_4 = ConvTransBlock(stage1_channel, stage1_channel, False, 1, 4, embed_dim, head, dpr[3])
# Stage5-8
self.conv_trans_5 = ConvTransBlock(stage1_channel, stage1_channel*2, True, 2, 2, embed_dim, head, dpr[4])
self.conv_trans_6 = ConvTransBlock(stage1_channel*2, stage1_channel*2, False, 1, 2, embed_dim, head, dpr[5])
self.conv_trans_7 = ConvTransBlock(stage1_channel*2, stage1_channel*2, False, 1, 2, embed_dim, head, dpr[6])
self.conv_trans_8 = ConvTransBlock(stage1_channel*2, stage1_channel*2, False, 1, 2, embed_dim, head, dpr[7])
# Stage9-12
self.conv_trans_9 = ConvTransBlock(stage1_channel*2, stage1_channel*4, True, 2, 1, embed_dim, head, dpr[8])
self.conv_trans_10 = ConvTransBlock(stage1_channel*4, stage1_channel*4, False, 1, 1, embed_dim, head, dpr[9])
self.conv_trans_11 = ConvTransBlock(stage1_channel*4, stage1_channel*4, False, 1, 1, embed_dim, head, dpr[10])
self.conv_trans_12 = ConvTransBlock(stage1_channel*4, stage1_channel*4, False, 1, 1, embed_dim, head, dpr[11], True)
self.depth = depth
self.trans_norm = nn.LayerNorm(embed_dim)
# self.pooling = nn.AdaptiveAvgPool2d(1)
self.conv_cls_head = nn.Linear(int(256*channel_ratio), num_classes)
self.trans_cls_head = nn.Linear(embed_dim, num_classes)
trunc_normal_(self.cls_token, std=.02)
self._init_weights(pretrained)
def _init_weights(self, pretrained: str = None) -> None:
if pretrained:
self.load_state_dict(torch.load(pretrained, map_location='cpu'))
else:
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {"cls_token"}
def forward(self, x: Tensor) -> Tensor:
B = x.shape[0]
cls_tokens = self.cls_token.expand(B, -1, -1)
# stem
x_base = self.maxpool(self.act(self.bn1(self.conv1(x))))
# stage 1
x = self.conv_1(x_base, return_x2=False)
x_t = self.trans_patch_conv(x_base).flatten(2).transpose(1, 2)
x_t = torch.cat([cls_tokens, x_t], dim=1)
x_t = self.trans_1(x_t)
# stage 2-12
for i in range(2, self.depth+1):
x, x_t = eval(f'self.conv_trans_{i}')(x, x_t)
# x_p = self.pooling(x).flatten(1)
# conv_cls = self.conv_cls_head(x_p)
x_t = self.trans_norm(x_t)
trans_cls = self.trans_cls_head(x_t[:, 0])
return trans_cls
if __name__ == '__main__':
model = Conformer('S', 'checkpoints/conformer/Conformer_small_patch16.pth')
x = torch.zeros(1, 3, 224, 224)
y = model(x)
print(y.shape)
```
#### File: image-classification/models/convnext.py
```python
import torch
from torch import nn, Tensor
from .layers import DropPath
class LayerNorm(nn.Module):
"""Channel first layer norm
"""
def __init__(self, normalized_shape, eps=1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Block(nn.Module):
def __init__(self, dim, dpr=0., init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, 7, 1, 3, groups=dim)
self.norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4*dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4*dim, dim)
self.gamma = nn.Parameter(init_value * torch.ones((dim)), requires_grad=True) if init_value > 0 else None
self.drop_path = DropPath(dpr) if dpr > 0. else nn.Identity()
def forward(self, x: Tensor) -> Tensor:
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # NCHW to NHWC
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2)
x = input + self.drop_path(x)
return x
class Stem(nn.Sequential):
def __init__(self, c1, c2, k, s):
super().__init__(
nn.Conv2d(c1, c2, k, s),
LayerNorm(c2)
)
class Downsample(nn.Sequential):
def __init__(self, c1, c2, k, s):
super().__init__(
LayerNorm(c1),
nn.Conv2d(c1, c2, k, s)
)
convnext_settings = {
'T': [[3, 3, 9, 3], [96, 192, 384, 768]], # [depths, dims]
'S': [[3, 3, 27, 3], [96, 192, 384, 768]],
'B': [[3, 3, 27, 3], [128, 256, 512, 1024]]
}
class ConvNeXt(nn.Module):
def __init__(self, model_name: str = 'B', pretrained: str = None, num_classes: int = 1000, *args, **kwargs) -> None:
super().__init__()
assert model_name in convnext_settings.keys(), f"ConvNeXt model name should be in {list(convnext_settings.keys())}"
depths, embed_dims = convnext_settings[model_name]
drop_path_rate = 0.
self.downsample_layers = nn.ModuleList([
Stem(3, embed_dims[0], 4, 4),
*[Downsample(embed_dims[i], embed_dims[i+1], 2, 2) for i in range(3)]
])
self.stages = nn.ModuleList()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(*[
Block(embed_dims[i], dpr[cur+j])
for j in range(depths[i])])
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(embed_dims[-1], eps=1e-6)
self.head = nn.Linear(embed_dims[-1], num_classes)
# use as a backbone
# for i in range(4):
# self.add_module(f"norm{i}", LayerNorm(embed_dims[i]))
self._init_weights(pretrained)
def _init_weights(self, pretrained: str = None) -> None:
if pretrained:
self.load_state_dict(torch.load(pretrained, map_location='cpu')['model'])
else:
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
else:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def return_features(self, x):
outs = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
norm_layer = getattr(self, f"norm{i}")
outs.append(norm_layer(x))
return outs
def forward(self, x: torch.Tensor):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
x = self.norm(x.mean([-2, -1])) # GAP NCHW to NC
x = self.head(x)
return x
if __name__ == '__main__':
model = ConvNeXt('B', 'C:\\Users\\sithu\\Documents\\weights\\backbones\\convnext\\convnext_base_1k_224_ema.pth')
x = torch.randn(1, 3, 224, 224)
y = model(x)
print(y.shape)
```
#### File: image-classification/models/__init__.py
```python
from pathlib import Path
from .resnet import ResNet, resnet_settings
from .cyclemlp import CycleMLP, cyclemlp_settings
from .cswin import CSWin, cswin_settings
from .gfnet import GFNet, gfnet_settings
from .pvt import PVTv2, pvtv2_settings
from .shuffle import Shuffle, shuffle_settings
from .rest import ResT, rest_settings
from .conformer import Conformer, conformer_settings
from .micronet import MicroNet, micronet_settings
from .poolformer import PoolFormer, poolformer_settings
from .patchconvnet import PatchConvnet, patchconvnet_settings
from .hiremlp import HireMLP, hiremlp_settings
from .wavemlp import WaveMLP, wavemlp_settings
from .convnext import ConvNeXt, convnext_settings
from .uniformer import UniFormer, uniformer_settings
__all__ = [
'ResNet', 'MicroNet', 'ConvNeXt',
'GFNet', 'PVTv2', 'ResT',
'Conformer', 'Shuffle', 'CSWin',
'CycleMLP', 'HireMLP', 'WaveMLP',
'PoolFormer', 'PatchConvnet', 'UniFormer',
]
def get_model(model_name: str, model_variant: str, pretrained: str = None, num_classes: int = 1000, image_size: int = 224):
assert model_name in __all__, f"Unavailable model name >> {model_name}.\nList of available model names: {__all__}"
if pretrained is not None: assert Path(pretrained).exists(), "Please set the correct pretrained model path"
return eval(model_name)(model_variant, pretrained, num_classes, image_size)
```
#### File: image-classification/models/layers.py
```python
import torch
import math
import warnings
from torch import nn, Tensor
class MLP(nn.Module):
def __init__(self, dim, hidden_dim, out_dim=None) -> None:
super().__init__()
out_dim = out_dim or dim
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = nn.GELU()
self.fc2 = nn.Linear(hidden_dim, out_dim)
def forward(self, x: Tensor) -> Tensor:
return self.fc2(self.act(self.fc1(x)))
class PatchEmbedding(nn.Module):
"""Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, embed_dim=768):
super().__init__()
assert img_size % patch_size == 0, 'Image size must be divisible by patch size'
img_size = (img_size, img_size) if isinstance(img_size, int) else img_size
self.grid_size = (img_size[0] // patch_size, img_size[1] // patch_size)
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(3, embed_dim, patch_size, patch_size)
def forward(self, x: torch.Tensor) -> Tensor:
x = self.proj(x) # b x hidden_dim x 14 x 14
x = x.flatten(2).swapaxes(1, 2) # b x (14*14) x hidden_dim
return x
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Copied from timm
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
def __init__(self, p: float = None):
super().__init__()
self.p = p
def forward(self, x: Tensor) -> Tensor:
if self.p == 0. or not self.training:
return x
kp = 1 - self.p
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = kp + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
return x.div(kp) * random_tensor
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
```
#### File: image-classification/models/patchconvnet.py
```python
import torch
from torch import nn, Tensor
from .layers import DropPath
class MLP(nn.Module):
def __init__(self, dim, hidden_dim, out_dim=None) -> None:
super().__init__()
out_dim = out_dim or dim
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = nn.GELU()
self.fc2 = nn.Linear(hidden_dim, out_dim)
def forward(self, x: Tensor) -> Tensor:
return self.fc2(self.act(self.fc1(x)))
class LearnedAggreationLayer(nn.Module):
def __init__(self, dim, head=1):
super().__init__()
self.head = head
self.scale = (dim // head) ** -0.5
self.q = nn.Linear(dim, dim)
self.k = nn.Linear(dim, dim)
self.v = nn.Linear(dim, dim)
self.proj = nn.Linear(dim, dim)
def forward(self, x: Tensor) -> Tensor:
B, N, C = x.shape
q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.head, C//self.head).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.head, C//self.head).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, N, self.head, C//self.head).permute(0, 2, 1, 3)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
return x_cls
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, rd_ratio=0.25) -> None:
super().__init__()
rd_channels = round(in_chs * rd_ratio)
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1)
self.act1 = nn.ReLU(True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1)
self.gate = nn.Sigmoid()
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.act1(self.conv_reduce(x_se))
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class SEBlock(nn.Module):
def __init__(self, dim) -> None:
super().__init__()
self.qkv_pos = nn.Sequential(
nn.Conv2d(dim, dim, 1),
nn.GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, groups=dim),
nn.GELU(),
SqueezeExcite(dim),
nn.Conv2d(dim, dim, 1)
)
def forward(self, x):
B, N, C = x.shape
H = W = int(N ** 0.5)
x = x.transpose(-1, -2)
x = x.reshape(B, C, H, W)
x = self.qkv_pos(x)
x = x.reshape(B, C, N)
x = x.transpose(-1, -2)
return x
class Block(nn.Module):
def __init__(self, dim, dpr=0., init_values=1e-6) -> None:
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = SEBlock(dim)
self.drop_path = DropPath(dpr) if dpr > 0. else nn.Identity()
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
def forward(self, x):
return x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
class BlockToken(nn.Module):
def __init__(self, dim, head, dpr=0., init_values=1e-6):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = LearnedAggreationLayer(dim, head)
self.drop_path = DropPath(dpr) if dpr > 0. else nn.Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = MLP(dim, int(dim*3))
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
def forward(self, x: Tensor, x_cls: Tensor) -> Tensor:
u = torch.cat([x_cls, x], dim=1)
x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u)))
x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls)))
return x_cls
class Stem(nn.Module):
def __init__(self, dim):
super().__init__()
self.proj = nn.Sequential(
nn.Sequential(nn.Conv2d(3, dim//8, 3, 2, 1, bias=False)),
nn.GELU(),
nn.Sequential(nn.Conv2d(dim//8, dim//4, 3, 2, 1, bias=False)),
nn.GELU(),
nn.Sequential(nn.Conv2d(dim//4, dim//2, 3, 2, 1, bias=False)),
nn.GELU(),
nn.Sequential(nn.Conv2d(dim//2, dim, 3, 2, 1, bias=False))
)
def forward(self, x: Tensor) -> Tensor:
return self.proj(x).flatten(2).transpose(1, 2)
patchconvnet_settings = {
'S60': [384, 60, 0.], # [embed_dim, depth, drop_path_rate]
'S120': [384, 120, 0.],
'B60': [768, 60, 0.]
}
class PatchConvnet(nn.Module):
def __init__(self, model_name: str = 'S60', pretrained: str = None, num_classes: int = 1000, *args, **kwargs) -> None:
super().__init__()
assert model_name in patchconvnet_settings.keys(), f"PatchConvnet model name should be in {list(patchconvnet_settings.keys())}"
embed_dim, depth, drop_path_rate = patchconvnet_settings[model_name]
self.patch_embed = Stem(embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
dpr = [drop_path_rate for _ in range(depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(embed_dim, dpr[i])
for i in range(depth)])
self.blocks_token_only = nn.ModuleList([
BlockToken(embed_dim, 1, 0)
for i in range(1)])
self.norm = nn.LayerNorm(embed_dim)
self.total_len = 1 + depth
self.head = nn.Linear(embed_dim, num_classes)
self._init_weights(pretrained)
def _init_weights(self, pretrained: str = None) -> None:
if pretrained:
self.load_state_dict(torch.load(pretrained, map_location='cpu'))
else:
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
else:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, x: Tensor) -> Tensor:
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
for blk in self.blocks:
x = blk(x)
for blk in self.blocks_token_only:
cls_tokens = blk(x, cls_tokens)
x = torch.cat([cls_tokens, x], dim=1)
x = self.norm(x)
x = self.head(x[:, 0])
return x
if __name__ == '__main__':
model = PatchConvnet('B60', 'C:\\Users\\sithu\\Documents\\weights\\backbones\\patchconvnet\\b60_224_1k.pth')
x = torch.zeros(1, 3, 224, 224)
y = model(x)
print(y.shape)
``` |
{
"source": "jimilee/rcnnpose-pytorch-tracker",
"score": 2
} |
#### File: rcnnpose-pytorch-tracker/rcnnpose/estimator.py
```python
import numpy as np
import torch
import torchvision
class BodyPoseEstimator(object):
def __init__(self, pretrained=False):
self._estimator_m = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=pretrained)
self._estimator_k = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=pretrained)
if torch.cuda.is_available():
print('cuda is available.')
self._estimator_m = self._estimator_m.cuda()
self._estimator_k = self._estimator_k.cuda()
self._estimator_m.eval()
self._estimator_k.eval()
def __call__(self, image, masks=True, keypoints=True):
x = self._transform_image(image)
if torch.cuda.is_available():
x = x.cuda()
m = self._predict_masks(x) if masks else [None]
k = self._predict_keypoints(x) if keypoints else [None]
return {'estimator_m': m[0], 'estimator_k': k[0]}
def _transform_image(self, image):
return torchvision.transforms.ToTensor()(image)
def _predict_masks(self, x):
with torch.no_grad():
return self._estimator_m([x])
def _predict_keypoints(self, x):
with torch.no_grad():
return self._estimator_k([x])
@staticmethod
def get_masks(dictionary, label=1, score_threshold=0.5):
masks = []
if dictionary:
for i in (dictionary['labels'] == label).nonzero().view(-1):
if dictionary['scores'][i] > score_threshold:
mask = dictionary['masks'][i].detach().cpu().squeeze().numpy() > 0.5
masks.append(mask)
return np.asarray(masks, dtype=np.uint8)
@staticmethod
def get_keypoints(dictionary, label=1, score_threshold=0.5): #dictionary has 'boxes', 'labels', 'scores', 'keypoints', 'keypoints_scores'
keypoints = []
if dictionary:
for i in (dictionary['labels'] == label).nonzero().view(-1):
if dictionary['scores'][i] > score_threshold:
keypoint = dictionary['keypoints'][i].detach().cpu().squeeze().numpy()
keypoints.append(keypoint)
return np.asarray(keypoints, dtype=np.int32)
@staticmethod
def get_embeddings(dictionary, label=1, score_threshold=0.5): # dictionary has 'boxes', 'labels', 'scores', 'keypoints', 'keypoints_scores'
embeddings = []
if dictionary:
for i in (dictionary['labels'] == label).nonzero().view(-1):
if dictionary['scores'][i] > score_threshold:
embedding = dictionary['keypoints'][i].detach().cpu().squeeze().numpy()
embeddings.append(embedding)
return np.asarray(embeddings, dtype=np.float)
@staticmethod
def get_boxes(dictionary, label=1, score_threshold=0.5):
boxes = []
if dictionary:
# print(dictionary)
for i in (dictionary['labels'] == label).nonzero().view(-1):
if dictionary['scores'][i] > score_threshold:
box = dictionary['boxes'][i].detach().cpu().squeeze().numpy()
boxes.append(box)
return np.asarray(boxes, dtype=np.int32)
``` |
{
"source": "Jimilian/jenkinsator",
"score": 2
} |
#### File: Jimilian/jenkinsator/jenkinsator.py
```python
from __future__ import absolute_import, print_function
import sys
import netrc
import argparse
try:
import jenkins as jenkins_api
except ImportError as e:
no_module_error = "No module named "
if(e.message.startswith(no_module_error)):
module_name = e.message[len(no_module_error):]
if module_name == "jenkins":
module_name = "python-jenkins"
print(e)
print("Please, install it via: sudo python -m pip install", module_name)
sys.exit(1)
else:
raise e
class DryJenkins(object):
def get_nodes(self):
return [{"name": "dry node", "offline": True}]
def get_plugins(self, depth):
return {("DryRun Plugin", "dry run plugin"): {"version": 1}}
def __getattribute__(self, name):
if name == "get_nodes":
return lambda: DryJenkins.get_nodes(self)
if name == "get_plugins":
return lambda depth: DryJenkins.get_plugins(self, depth)
return (lambda *args: "DRY_RUN")
def get_items_from_file(list_file):
jobs = set()
for line in open(list_file):
job_name = line.strip().replace("\n", "")
if job_name:
jobs.add(job_name)
return jobs
def url_to_host(url):
return url.replace("http://", "").replace("https://", "")
def connect(url, login, password):
if not login and not password:
host = url_to_host(url)
try:
secrets = netrc.netrc()
secret = secrets.authenticators(host)
except IOError:
print("Please, provide login and password as parameters "
"or put them to .netrc file as default values for the host:", host)
secret = None
if secret is None:
return None
login, _, password = secret
return jenkins_api.Jenkins(url, login, password)
def validate_params(params): # noqa: C901
if args.action == "job":
if args.dump_to_file and (not params.name or params.list_from_file):
print("`--dump-to-file` can be used only with `--name`")
return False
if args.create_from_file and (not params.name or params.list_from_file):
print("`--create-from-file` can be used only with `--name`")
return False
elif args.action == "node":
if params.get_nodes:
return True
elif args.action == "script":
if params.execute_from_file:
return True
else:
print("Please, specify `--execute-from-file` option")
return False
else:
return True
if not params.name and not params.list_from_file:
print("Please, provide job name ('--name') or file with job names ('--list-from-file')")
return False
if params.name and params.list_from_file:
print("Using '--name' and '--list-from-file' at the same time is not supported")
return False
if args.enable and args.disable:
print("--enable and --disable can not be used together")
return False
return True
def main(args):
if not args.dry_run:
jenkins = connect(args.jenkins, args.login, args.password)
else:
jenkins = DryJenkins()
print("Succesfully connected to %s." % args.jenkins, "Version is", jenkins.get_version())
do_action(args, jenkins)
def do_action(args, jenkins):
actions = {"job": lambda x, y: process_jobs(x, y),
"node": lambda x, y: process_nodes(x, y),
"plugin": lambda x, y: process_plugins(x, y),
"script": lambda x, y: process_script(x, y)}
action = actions[args.action]
action(jenkins, args)
def process_script(jenkins, args):
with open(args.execute_from_file) as f:
script = f.read()
res = jenkins.run_script(script)
if res:
print(res)
def process_plugins(jenkins, args):
for plugin, desc in sorted(jenkins.get_plugins(depth=1).items(),
key=lambda x: x[0][1]):
print("{0}: {1}".format(plugin[1], desc["version"]))
def process_nodes(jenkins, args):
what_to_do = get_what_to_do(args, "node")
if what_to_do:
generic_action(jenkins, args, what_to_do)
elif args.get_nodes:
get_all_nodes(jenkins, args)
elif args.replace:
replace(jenkins, args, "node")
return
def get_all_nodes(jenkins, args):
show_all = args.get_nodes == "all"
for node in jenkins.get_nodes():
if show_all or node['offline'] == (args.get_nodes == "offline"):
print(node['name'])
def get_config(jenkins, name, key):
try:
if key == "job":
return jenkins.get_job_config(name)
if key == "node":
return jenkins.get_node_config(name)
print("Invalid key for get_config:", key)
except jenkins_api.NotFoundException:
print("Can't find the {0}: {1}".format(key, name))
return None
def get_items(args):
items = None
if args.name:
items = [args.name]
if args.list_from_file:
items = get_items_from_file(args.list_from_file)
return items
def update_config(jenkins, item, new_config, key):
if key == "job":
jenkins.reconfig_job(item, new_config)
if key == "node":
jenkins.reconfig_node(item, new_config)
return
def replace(jenkins, args, key):
splitter = args.replace[0]
statement = args.replace[1:]
if statement.count(splitter) != 1:
print()
print("You selected bad splitter '{0}', "
"because it occurs in your replacement as well. "
"Please, choose another one.".format(splitter))
return
items = get_items(args)
for item in items:
original_config = get_config(jenkins, item, key)
if not original_config:
continue
orig, target = args.replace[1:].split(splitter)
new_config = original_config.replace(orig, target)
if original_config == new_config:
print("Config was not changed for the {0}: {1}".format(key, item))
else:
update_config(jenkins, item, new_config, key)
print("Config was updated for the {0}: {1}".format(key, item))
return
def start_jobs(jenkins, args):
items = get_items(args)
for item in items:
print("Run:", item)
jenkins.build_job(item, {"fake_parameter": "x"})
return
def create_from_file(jenkins, args):
with open(args.create_from_file) as f:
jenkins.create_job(args.name, f.read())
print("Job `%s` was created from the file: %s" % (args.name, args.create_from_file))
def dump_to_file(jenkins, args, key):
config = get_config(jenkins, args.name, key)
if not config:
return
if not args.dry_run:
with open(args.dump_to_file, "w") as f:
f.write(config)
print("Configuration for the {0} `{1}` was dumped to the file: {2}".
format(key, args.name, args.dump_to_file))
return
def generic_action(jenkins, args, key):
actions = {"Delete job": lambda x: jenkins.delete_job(x),
"Delete node": lambda x: jenkins.delete_node(x),
"Disable node": lambda x: jenkins.disable_node(x),
"Disable job": lambda x: jenkins.disable_job(x),
"Enable job": lambda x: jenkins.enable_job(x),
"Enable node": lambda x: jenkins.enable_node(x),
"Dump job": lambda _: dump_to_file(jenkins, args, "job"),
"Dump node": lambda _: dump_to_file(jenkins, args, "node")}
for item in get_items(args):
action = actions[key]
action(item)
print("{0}: {1}".format(key, item))
def process_jobs(jenkins, args):
what_to_do = get_what_to_do(args, "job")
if what_to_do:
generic_action(jenkins, args, what_to_do)
elif args.replace:
replace(jenkins, args, "job")
elif args.create_from_file:
create_from_file(jenkins, args)
elif args.start:
start_jobs(jenkins, args)
return
def get_what_to_do(args, key):
if args.enable:
return "Enable " + key
elif args.disable:
return "Disable " + key
elif args.delete:
return "Delete " + key
elif args.dump_to_file:
return "Dump " + key
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Jenkinsator helps to orchestrate Jenkins master')
parser.add_argument(dest="jenkins", action="store", help="Jenkins master [full url]")
parser.add_argument('--login', help="login to access Jenkins [INSECURE - use .netrc]")
parser.add_argument('--password', help="password to access Jenkins [INSECURE - use .netrc]")
parser.add_argument('--dry-run', action="store_true", help="do not perform any action")
subparsers = parser.add_subparsers(title='Actions', dest="action",
help="Choose action type you want to perform")
job_parser = subparsers.add_parser("job")
job_parser.add_argument('--create-from-file', help="create the job from configuration file")
job_parser.add_argument('--start', action="store_true", help="starts the job")
node_parser = subparsers.add_parser("node")
for sub_parser, key in [(job_parser, "job"), (node_parser, "node")]:
sub_parser.add_argument('--list-from-file',
help="file to retrive the list of " +
key + "s to be processed [one per line]")
sub_parser.add_argument('--name', help=key + " to be processed [full name]")
sub_parser.add_argument('--dump-to-file', help="dump" + key + " configuration to the file")
sub_parser.add_argument('--enable', action="store_true", help="enable the " + key)
sub_parser.add_argument('--disable', action="store_true", help="disable the " + key)
sub_parser.add_argument('--delete', action="store_true", help="delete the " + key)
sub_parser.add_argument('--replace',
help="Replace some pattern in the configuration. "
"Use first symbol to configure the splitter "
"and the rest of parameter to define the "
"original value and desired one, i.e."
"`?aaa?bbb` specifies `?` as a splitter and "
"replaces all occurances of `aaa` by `bbb`")
node_parser.add_argument('--get-nodes', choices=["offline", "online", "all"],
help="dump list of all connected nodes")
plugin_parser = subparsers.add_parser("plugin")
plugin_parser.add_argument("--list-all", action="store_true",
help="list all available plugins")
script_parser = subparsers.add_parser("script")
script_parser.add_argument("--execute-from-file", action="store",
help="execute custom Groovy scipt from specified file")
args = parser.parse_args()
if not validate_params(args):
sys.exit(1)
sys.exit(main(args))
``` |
{
"source": "jimimased/HyperGAN",
"score": 2
} |
#### File: HyperGAN/examples/2d-measure-accuracy.py
```python
import argparse
import os
import uuid
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
import matplotlib.pyplot as plt
from hypergan.loaders import *
from hypergan.util.hc_tf import *
from hypergan.generators import *
import math
def parse_args():
parser = argparse.ArgumentParser(description='Train a 2d test!', add_help=True)
parser.add_argument('--batch_size', '-b', type=int, default=32, help='Examples to include in each batch. If using batch norm, this needs to be preserved when in server mode')
parser.add_argument('--device', '-d', type=str, default='/gpu:0', help='In the form "/gpu:0", "/cpu:0", etc. Always use a GPU (or TPU) to train')
parser.add_argument('--format', '-f', type=str, default='png', help='jpg or png')
parser.add_argument('--config', '-c', type=str, default='2d-test', help='config name')
parser.add_argument('--distribution', '-t', type=str, default='circle', help='what distribution to test, options are circle, modes')
return parser.parse_args()
def no_regularizer(amt):
return None
def custom_discriminator_config():
return {
'create': custom_discriminator
}
def custom_generator_config():
return {
'create': custom_generator
}
def custom_discriminator(gan, config, x, g, xs, gs, prefix='d_'):
net = tf.concat(axis=0, values=[x,g])
net = linear(net, 128, scope=prefix+'lin1')
net = tf.nn.relu(net)
net = linear(net, 128, scope=prefix+'lin2')
return net
def custom_generator(config, gan, net):
net = linear(net, 128, scope="g_lin_proj")
net = batch_norm_1(gan.config.batch_size, name='g_bn_1')(net)
net = tf.nn.relu(net)
net = linear(net, 2, scope="g_lin_proj3")
net = tf.tanh(net)
return [net]
def d_pyramid_search_config():
return hg.discriminators.pyramid_discriminator.config(
activation=[tf.nn.relu, lrelu, tf.nn.relu6, tf.nn.elu],
depth_increase=[1.5,1.7,2,2.1],
final_activation=[tf.nn.relu, tf.tanh, None],
layer_regularizer=[batch_norm_1, layer_norm_1, None],
layers=[2,1],
fc_layer_size=[32,16,8,4,2],
fc_layers=[0,1,2],
first_conv_size=[4,8,2,1],
noise=[False, 1e-2],
progressive_enhancement=[False],
strided=[True, False],
create=d_pyramid_create
)
def g_resize_conv_search_config():
return resize_conv_generator.config(
z_projection_depth=[8,16,32],
activation=[tf.nn.relu,tf.tanh,lrelu,resize_conv_generator.generator_prelu],
final_activation=[None,tf.nn.tanh,resize_conv_generator.minmax],
depth_reduction=[2,1.5,2.1],
layer_filter=None,
layer_regularizer=[layer_norm_1,batch_norm_1],
block=[resize_conv_generator.standard_block, resize_conv_generator.inception_block, resize_conv_generator.dense_block],
resize_image_type=[1],
create_method=g_resize_conv_create
)
def g_resize_conv_create(config, gan, net):
gan.config.x_dims = [8,8]
gan.config.channels = 1
gs = resize_conv_generator.create(config,gan,net)
filter = [1,4,8,1]
stride = [1,4,8,1]
gs[0] = tf.nn.avg_pool(gs[0], ksize=filter, strides=stride, padding='SAME')
#gs[0] = linear(tf.reshape(gs[0], [gan.config.batch_size, -1]), 2, scope="g_2d_lin")
gs[0] = tf.reshape(gs[0], [gan.config.batch_size, 2])
return gs
def d_pyramid_create(gan, config, x, g, xs, gs, prefix='d_'):
with tf.variable_scope("d_input_projection", reuse=False):
x = linear(x, 8*8, scope=prefix+'input_projection')
x = tf.reshape(x, [gan.config.batch_size, 8, 8, 1])
with tf.variable_scope("d_input_projection", reuse=True):
g = linear(g, 8*8, scope=prefix+'input_projection')
g = tf.reshape(g, [gan.config.batch_size, 8, 8, 1])
return hg.discriminators.pyramid_discriminator.discriminator(gan, config, x, g, xs, gs, prefix)
def batch_accuracy(a, b):
"Each point of a is measured against the closest point on b. Distance differences are added together."
tiled_a = a
tiled_a = tf.reshape(tiled_a, [int(tiled_a.get_shape()[0]), 1, int(tiled_a.get_shape()[1])])
tiled_a = tf.tile(tiled_a, [1, int(tiled_a.get_shape()[0]), 1])
tiled_b = b
tiled_b = tf.reshape(tiled_b, [1, int(tiled_b.get_shape()[0]), int(tiled_b.get_shape()[1])])
tiled_b = tf.tile(tiled_b, [int(tiled_b.get_shape()[0]), 1, 1])
difference = tf.abs(tiled_a-tiled_b)
difference = tf.reduce_min(difference, axis=1)
difference = tf.reduce_sum(difference, axis=1)
return tf.reduce_sum(difference, axis=0)
args = parse_args()
def train():
selector = hg.config.selector(args)
config_name="2d-measure-accuracy-"+str(uuid.uuid4())
config = selector.random_config()
config_filename = os.path.expanduser('~/.hypergan/configs/'+config_name+'.json')
trainers = []
rms_opts = {
'g_momentum': [0,0.1,0.01,1e-6,1e-5,1e-1,0.9,0.999, 0.5],
'd_momentum': [0,0.1,0.01,1e-6,1e-5,1e-1,0.9,0.999, 0.5],
'd_decay': [0.8, 0.9, 0.99,0.999,0.995,0.9999,1],
'g_decay': [0.8, 0.9, 0.99,0.999,0.995,0.9999,1],
'clipped_gradients': [False, 1e-2],
'clipped_d_weights': [False, 1e-2],
'd_learn_rate': [1e-3,1e-4,5e-4,1e-6,4e-4, 5e-5],
'g_learn_rate': [1e-3,1e-4,5e-4,1e-6,4e-4, 5e-5]
}
stable_rms_opts = {
"clipped_d_weights": 0.01,
"clipped_gradients": False,
"d_decay": 0.995, "d_momentum": 1e-05,
"d_learn_rate": 0.001,
"g_decay": 0.995,
"g_momentum": 1e-06,
"g_learn_rate": 0.0005,
}
trainers.append(hg.trainers.rmsprop_trainer.config(**rms_opts))
adam_opts = {}
adam_opts = {
'd_learn_rate': [1e-3,1e-4,5e-4,1e-2,1e-6],
'g_learn_rate': [1e-3,1e-4,5e-4,1e-2,1e-6],
'd_beta1': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8],
'd_beta2': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8],
'g_beta1': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8],
'g_beta2': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8],
'd_epsilon': [1e-8, 1, 0.1, 0.5],
'g_epsilon': [1e-8, 1, 0.1, 0.5],
'd_clipped_weights': [False, 0.01],
'clipped_gradients': [False, 0.01]
}
trainers.append(hg.trainers.adam_trainer.config(**adam_opts))
sgd_opts = {
'd_learn_rate': [1e-3,1e-4,5e-4,1e-2,1e-6],
'g_learn_rate': [1e-3,1e-4,5e-4,1e-2,1e-6],
'd_clipped_weights': [False, 0.01],
'clipped_gradients': [False, 0.01]
}
trainers.append(hg.trainers.sgd_trainer.config(**sgd_opts))
encoders = []
projections = []
projections.append([hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.identity])
projections.append([hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity])
projections.append([hg.encoders.uniform_encoder.binary, hg.encoders.uniform_encoder.sphere])
projections.append([hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity])
projections.append([hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.sphere])
projections.append([hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity, hg.encoders.uniform_encoder.gaussian])
encoder_opts = {
'z': [16],
'modes': [2,4,8,16],
'projections': projections
}
stable_encoder_opts = {
"max": 1,
"min": -1,
"modes": 8,
"projections": [[
"function:hypergan.encoders.uniform_encoder.modal",
"function:hypergan.encoders.uniform_encoder.sphere",
"function:hypergan.encoders.uniform_encoder.identity"
]],
"z": 16
}
losses = []
lamb_loss_opts = {
'reverse':[True, False],
'reduce': [tf.reduce_mean,hg.losses.wgan_loss.linear_projection,tf.reduce_sum,tf.reduce_logsumexp],
'labels': [
[-1, 1, 0],
[0, 1, 1],
[0, -1, -1],
[1, -1, 0],
[0, -1, 1],
[0, 1, -1],
[0, 0.5, -0.5],
[0.5, -0.5, 0],
[0.5, 0, -0.5]
],
'alpha':[0,1e-3,1e-2,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99,0.999],
'beta':[0,1e-3,1e-2,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99,0.999]
}
lsgan_loss_opts = {
'reduce': [tf.reduce_mean,hg.losses.wgan_loss.linear_projection,tf.reduce_sum,tf.reduce_logsumexp],
'labels': [
[-1, 1, 0],
[0, 1, 1],
[0, -1, -1],
[1, -1, 0],
[0, -1, 1],
[0, 1, -1],
[0, 0.5, -0.5],
[0.5, -0.5, 0],
[0.5, 0, -0.5]
]
}
stable_loss_opts = {
"alpha": 0.5,
"beta": [0.5, 0.8],
"discriminator": None,
"label_smooth": 0.26111111111111107,
"labels": [[
0,
-1,
-1
]],
"reduce": "function:tensorflow.python.ops.math_ops.reduce_mean",
"reverse": True
}
#losses.append([hg.losses.wgan_loss.config(**loss_opts)])
losses.append([hg.losses.lamb_gan_loss.config(**lamb_loss_opts)])
#losses.append([hg.losses.lamb_gan_loss.config(**stable_loss_opts)])
#losses.append([hg.losses.lamb_gan_loss.config(**stable_loss_opts)])
losses.append([hg.losses.lsgan_loss.config(**lsgan_loss_opts)])
#encoders.append([hg.encoders.uniform_encoder.config(**encoder_opts)])
encoders.append([hg.encoders.uniform_encoder.config(**stable_encoder_opts)])
custom_config = {
'model': args.config,
'batch_size': args.batch_size,
'trainer': trainers,
'generator': custom_generator_config(),
'discriminators': [[custom_discriminator_config()]],
'losses': losses,
'encoders': encoders
}
custom_config_selector = hc.Selector()
for key,value in custom_config.items():
custom_config_selector.set(key, value)
print("Set ", key, value)
custom_config_selection = custom_config_selector.random_config()
for key,value in custom_config_selection.items():
config[key]=value
config['dtype']=tf.float32
config = hg.config.lookup_functions(config)
def circle(x):
spherenet = tf.square(x)
spherenet = tf.reduce_sum(spherenet, 1)
lam = tf.sqrt(spherenet)
return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])
def modes(x):
return tf.round(x*2)/2.0
if args.distribution == 'circle':
x = tf.random_normal([args.batch_size, 2])
x = circle(x)
elif args.distribution == 'modes':
x = tf.random_uniform([args.batch_size, 2], -1, 1)
x = modes(x)
elif args.distribution == 'sin':
x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
x = tf.transpose(x)
r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
x = tf.concat([xy,x], 1)/16.0
elif args.distribution == 'arch':
offset1 = tf.random_uniform((1, args.batch_size), -10, 10 )
xa = tf.random_uniform((1, 1), 1, 4 )
xb = tf.random_uniform((1, 1), 1, 4 )
x1 = tf.random_uniform((1, args.batch_size), -1, 1 )
xcos = tf.cos(x1*np.pi + offset1)*xa
xsin = tf.sin(x1*np.pi + offset1)*xb
x = tf.transpose(tf.concat([xcos,xsin], 0))/16.0
initial_graph = {
'x':x,
'num_labels':1,
}
print("Starting training for: "+config_filename)
selector.save(config_filename, config)
with tf.device(args.device):
gan = hg.GAN(config, initial_graph)
accuracy_x_to_g=batch_accuracy(gan.graph.x, gan.graph.g[0])
accuracy_g_to_x=batch_accuracy(gan.graph.g[0], gan.graph.x)
s = [int(g) for g in gan.graph.g[0].get_shape()]
slice1 = tf.slice(gan.graph.g[0], [0,0], [s[0]//2, -1])
slice2 = tf.slice(gan.graph.g[0], [s[0]//2,0], [s[0]//2, -1])
accuracy_g_to_g=batch_accuracy(slice1, slice2)
x_0 = gan.sess.run(gan.graph.x)
z_0 = gan.sess.run(gan.graph.z[0])
gan.initialize_graph()
ax_sum = 0
ag_sum = 0
diversity = 0.00001
dlog = 0
last_i = 0
tf.train.start_queue_runners(sess=gan.sess)
for i in range(500000):
d_loss, g_loss = gan.train()
if(np.abs(d_loss) > 100 or np.abs(g_loss) > 100):
ax_sum = ag_sum = 100000.00
break
if i % 1000 == 0 and i != 0:
ax, ag, agg, dl = gan.sess.run([accuracy_x_to_g, accuracy_g_to_x, accuracy_g_to_g, gan.graph.d_log], {gan.graph.x: x_0, gan.graph.z[0]: z_0})
print("ERROR", ax, ag)
if np.abs(ax) > 50.0 or np.abs(ag) > 50.0:
ax_sum = ag_sum = 100000.00
break
#if(i % 10000 == 0 and i != 0):
# g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name]
# init = tf.initialize_variables(g_vars)
# gan.sess.run(init)
if(i > 490000):
ax, ag, agg, dl = gan.sess.run([accuracy_x_to_g, accuracy_g_to_x, accuracy_g_to_g, gan.graph.d_log], {gan.graph.x: x_0, gan.graph.z[0]: z_0})
diversity += agg
ax_sum += ax
ag_sum += ag
dlog = dl
with open("results.csv", "a") as myfile:
myfile.write(config_name+","+str(ax_sum)+","+str(ag_sum)+","+ str(ax_sum+ag_sum)+","+str(ax_sum*ag_sum)+","+str(dlog)+","+str(diversity)+","+str(ax_sum*ag_sum*(1/diversity))+","+str(last_i)+"\n")
tf.reset_default_graph()
gan.sess.close()
while(True):
train()
```
#### File: hypergan/samplers/audio_sampler.py
```python
def sample():
g = sess.run(generator)
#TODO: Refactor
x_one = tf.slice(generator,[0,0,0],[1,config['mp3_size'], config['channels']])
x_one = tf.reshape(x_one, [config['mp3_size'],config['channels']])
audio = sess.run(ffmpeg.encode_audio(x_one, 'wav', config['mp3_bitrate']))
print("SAVING WITH BITRATE", config['mp3_bitrate'], config['mp3_size'])
fobj = open("samples/g.wav", mode='wb')
fobj.write(audio)
fobj.close()
plt.clf()
plt.figure(figsize=(2,2))
plt.plot(g[0])
plt.xlim([0, config['mp3_size']])
plt.ylim([-2, 2.])
plt.ylabel("Amplitude")
plt.xlabel("Time")
plt.savefig('visualize/g.png')
x_one = tf.slice(generator,[1,0,0],[1,config['mp3_size'], config['channels']])
x_one = tf.reshape(x_one, [config['mp3_size'],config['channels']])
audio = sess.run(ffmpeg.encode_audio(x_one, 'wav', config['mp3_bitrate']))
fobj = open("samples/g2.wav", mode='wb')
fobj.write(audio)
fobj.close()
plt.clf()
plt.figure(figsize=(2,2))
plt.plot(g[1])
plt.xlim([0, config['mp3_size']])
plt.ylim([-2, 2.])
plt.ylabel("Amplitude")
plt.xlabel("Time")
plt.savefig('visualize/g2.png')
return []
def sample():
return [{'image':'visualize/input.png','label':'input'},{'image':'visualize/g.png','label':'g'}, {'image':'visualize/g2.png','label':'g2'}]
```
#### File: hypergan/samplers/batch_sampler.py
```python
from hypergan.util.ops import *
from hypergan.samplers.common import *
z = None
def sample(gan, sample_file):
sess = gan.sess
config = gan.config
global z
generator = gan.graph.g[0]
y_t = gan.graph.y
z_t = gan.graph.z
x = np.linspace(0,1, 4)
y = np.linspace(0,1, 6)
z = np.random.uniform(-1, 1, [config['batch_size'], int(z_t[0].get_shape()[1])])
g=tf.get_default_graph()
with g.as_default():
tf.set_random_seed(1)
sample = sess.run(generator, feed_dict={z_t[0]: z})
stacks = [np.hstack(sample[x*8:x*8+8]) for x in range(4)]
plot(config, np.vstack(stacks), sample_file)
return [{'image':sample_file, 'label':'grid'}]
```
#### File: hypergan/samplers/common.py
```python
import numpy as np
from scipy.misc import imsave
def plot(config, image, file):
""" Plot an image."""
image = np.squeeze(image)
imsave(file, image)
```
#### File: hypergan/samplers/static_batch_sampler.py
```python
from hypergan.util.ops import *
from hypergan.samplers.common import *
#mask_noise = None
z = None
y = None
def sample(gan, sample_file):
sess = gan.sess
config = gan.config
global z, y
generator = gan.graph.g[0]
y_t = gan.graph.y
z_t = gan.graph.z[0] # TODO support multiple z
x = np.linspace(0,1, 4)
if z is None:
z = sess.run(z_t)
y = sess.run(y_t)
g=tf.get_default_graph()
with g.as_default():
tf.set_random_seed(1)
sample = sess.run(generator, feed_dict={z_t: z, y_t: y})
#plot(self.config, sample, sample_file)
stacks = [np.hstack(sample[x*8:x*8+8]) for x in range(4)]
plot(config, np.vstack(stacks), sample_file)
return [{'image':sample_file, 'label':'grid'}]
```
#### File: hypergan/trainers/common.py
```python
import tensorflow as tf
def capped_optimizer(optimizer, cap, loss, vars):
gvs = optimizer.compute_gradients(loss, var_list=vars)
def create_cap(grad,var):
if(grad == None) :
print("Warning: No gradient for variable ",var.name)
return None
return (tf.clip_by_value(grad, -cap, cap), var)
capped_gvs = [create_cap(grad,var) for grad, var in gvs]
capped_gvs = [x for x in capped_gvs if x != None]
return optimizer.apply_gradients(capped_gvs)
```
#### File: hypergan/trainers/momentum_trainer.py
```python
import tensorflow as tf
import numpy as np
import hyperchamber as hc
from .common import *
def config():
selector = hc.Selector()
selector.set('create', create)
selector.set('run', run)
selector.set('d_learn_rate', 1e-3)
selector.set('discriminator_epsilon', 1e-8)
selector.set('discriminator_beta1', 0.9)
selector.set('discriminator_beta2', 0.999)
selector.set('g_learn_rate', 1e-3)
selector.set('generator_epsilon', 1e-8)
selector.set('generator_beta1', 0.9)
selector.set('generator_beta2', 0.999)
selector.set('capped', False)
selector.set('clipped_discriminator', False)
return selector.random_config()
def create(config, gan, d_vars, g_vars):
d_loss = gan.graph.d_loss
g_loss = gan.graph.g_loss
g_lr = np.float32(config.g_learn_rate)
d_lr = np.float32(config.d_learn_rate)
gan.graph.d_vars = d_vars
if(config.capped):
g_optimizer = capped_optimizer(tf.train.MomentumOptimizer, g_lr, g_loss, g_vars)
d_optimizer = capped_optimizer(tf.train.MomentumOptimizer, d_lr, d_loss, d_vars)
else:
g_optimizer = tf.train.MomentumOptimizer(g_lr, 0.975).minimize(g_loss, var_list=g_vars)
d_optimizer = tf.train.MomentumOptimizer(d_lr, 0.975).minimize(d_loss, var_list=d_vars)
return g_optimizer, d_optimizer
iteration = 0
def run(gan):
sess = gan.sess
config = gan.config
x_t = gan.graph.x
g_t = gan.graph.g
d_log_t = gan.graph.d_log
g_loss = gan.graph.g_loss
d_loss = gan.graph.d_loss
d_fake_loss = gan.graph.d_fake_loss
d_real_loss = gan.graph.d_real_loss
g_optimizer = gan.graph.g_optimizer
d_optimizer = gan.graph.d_optimizer
d_class_loss = gan.graph.d_class_loss
d_vars = gan.graph.d_vars
_, d_cost, d_log = sess.run([d_optimizer, d_loss, d_log_t])
# in WGAN paper, values are clipped. This might not work, and is slow.
if(config.clipped_discriminator):
clip = [tf.assign(d,tf.clip_by_value(d, -config.clip_value, config.clip_value)) for d in d_vars]
sess.run(clip)
if(d_class_loss is not None):
_, g_cost,d_fake,d_real,d_class = sess.run([g_optimizer, g_loss, d_fake_loss, d_real_loss, d_class_loss])
#print("%2d: g cost %.2f d_loss %.2f d_real %.2f d_class %.2f d_log %.2f" % (iteration, g_cost,d_cost, d_real, d_class, d_log ))
else:
_, g_cost,d_fake,d_real = sess.run([g_optimizer, g_loss, d_fake_loss, d_real_loss])
#print("%2d: g cost %.2f d_loss %.2f d_real %.2f d_log %.2f" % (iteration, g_cost,d_cost, d_real, d_log ))
global iteration
iteration+=1
return d_cost, g_cost
```
#### File: hypergan/vendor/vggnet_loader.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import op_def_registry
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import hypergan.loaders.resize_image_patch
MODEL_DIR='/tmp/imagenet'
# pylint: disable=line-too-long
DATA_URL = 'https://github.com/pavelgonchar/colornet/blob/master/vgg/tensorflow-vgg16/vgg16-20160129.tfmodel?raw=true'
# pylint: enable=line-too-long
def create_graph(image, output_layer):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'vgg16-20160129.tfmodel?raw=true'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for node in graph_def.node:
print(node.name)
#if(node.name != "DecodeJpeg" and node.name != "ResizeBilinear" and node.name != "DecodeJpeg/contents"):
node.device = "/cpu:0"
result= tf.import_graph_def(graph_def, name='vggnet', input_map={"images":reshape_input(image)}, return_elements=[output_layer])
result = result[0]
return result
def get_features(image):
graph = create_graph(image, 'Relu_1:0')
return tf.squeeze(graph[0])
def reshape_input(img):
reshaped_image = tf.identity(tf.squeeze(img))
tf.Tensor.set_shape(reshaped_image, [None, None, None])
reshaped_image = lib.loaders.resize_image_patch.resize_image_with_crop_or_pad(reshaped_image,
224, 224, dynamic_shape=True)
r = tf.fill([224, 224], 103.939)
g = tf.fill([224, 224], 116.779)
b = tf.fill([224, 224], 123.68)
offset = tf.transpose(tf.stack([r,g,b]), [2, 1, 0])
reshaped_image -= offset
#reshaped_image = tf.transpose(reshaped_image, [0, 2, 1])
reshaped_image = tf.expand_dims(reshaped_image, 0)
print("RESHAPED", reshaped_image)
return reshaped_image
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = MODEL_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
``` |
{
"source": "jimimvp/CausalProb",
"score": 2
} |
#### File: CausalProb/models/nf_confounder_model2.py
```python
from models.normalizing_flow.architectures import RealNVP
import jax.numpy as jnp
from jax.config import config
from jax.experimental import stax # neural network library
from jax.experimental.stax import Dense, Relu, normal # neural network layers
from jax import random
config.update("jax_enable_x64", True)
class NeuralNet:
def __init__(self, dim: int, seed: int = 0):
self.dim = dim
self.net_init, self.net_apply = stax.serial(Dense(8, W_init=normal()), Relu, Dense(8, W_init=normal()), Relu, Dense(self.dim, W_init=normal()))
self.seed = seed
def shift_and_log_scale_fn(self, u: jnp.array, params: jnp.array) -> list:
s = self.net_apply(params, u)
return jnp.split(s, 2, axis=-1)
def init_params(self, seed: int = 0) -> tuple:
in_shape = (-1, self.dim)
out_shape, layer_params = self.net_init(random.PRNGKey(self.seed + seed), in_shape)
return out_shape, layer_params
def define_model(dim=2):
f, finv, lpu, draw_u, init_params, ldij = dict(), dict(), dict(), dict(), dict(), dict()
nf = RealNVP(dim=dim, seed=42)
nn2 = NeuralNet(dim=2, seed=43)
nn4 = NeuralNet(dim=4, seed=44)
# V
def _f_V1(u: jnp.array, theta: dict, parents: dict):
return nf.forward(u, theta['V1'])
f['V1'] = _f_V1
def _finv_V1(v: jnp.array, theta: dict, parents: dict):
return nf.backward(v, theta['V1'])
finv['V1'] = lambda v, theta, parents: _finv_V1(v, theta, parents)[0]
ldij['V1'] = lambda v, theta, parents: jnp.sum(_finv_V1(v, theta, parents)[1], -1)
lpu['V1'] = lambda u, theta: nf.evaluate_base_logpdf(u)
draw_u['V1'] = lambda size, theta: nf.sample_base(size)
init_params['V1'] = lambda seed: nf.init_all_params(seed)
# X
def _f_X(u: jnp.array, theta: dict, parents: dict):
v1 = parents['V1']
return nf.forward(v1, theta['V1->X']) + nf.forward(u, theta['U_X->X'])
f['X'] = _f_X
def _finv_X(v: jnp.array, theta: dict, parents: dict):
v1 = parents['V1']
return nf.backward(v - nf.forward(v1, theta['V1->X']), theta['U_X->X'])
finv['X'] = lambda v, theta, parents: _finv_X(v, theta, parents)[0]
ldij['X'] = lambda v, theta, parents: jnp.sum(_finv_X(v, theta, parents)[1], -1)
lpu['X'] = lambda u, theta: nf.evaluate_base_logpdf(u)
draw_u['X'] = lambda size, theta: nf.sample_base(size)
init_params['V1->X'] = lambda seed: nn2.init_params(seed)[1]
init_params['U_X->X'] = lambda seed: nf.init_all_params(seed)
# Y
def _f_Y(u: jnp.array, theta: dict, parents: dict):
v1, x = parents['V1'], parents['X']
return nf.forward(v1, theta['V1->Y']) + nf.forward(x, theta['X->Y']) + nf.forward(u, theta['U_Y->Y'])
f['Y'] = _f_Y
def _finv_Y(v: jnp.array, theta: dict, parents: dict):
v1, x = parents['V1'], parents['X']
return nf.backward(v - nf.forward(v1, theta['V1->Y']) - nf.forward(x, theta['X->Y']), theta['U_Y->Y'])
finv['Y'] = lambda v, theta, parents: _finv_Y(v, theta, parents)[0]
ldij['Y'] = lambda v, theta, parents: jnp.sum(_finv_Y(v, theta, parents)[1], -1)
lpu['Y'] = lambda u, theta: nf.evaluate_base_logpdf(u)
draw_u['Y'] = lambda size, theta: nf.sample_base(size)
init_params['V1--X->Y'] = lambda seed: nn4.init_params(seed)[1]
init_params['U_Y->Y'] = lambda seed: nf.init_all_params(seed)
return dict(f=f, finv=finv, lpu=lpu, draw_u=draw_u, init_params=init_params, ldij=ldij)
```
#### File: CausalProb/tests/test_models.py
```python
from causalprob import CausalProb
import unittest
import jax.numpy as jnp
import numpy as np
class TestNFConfounderModel(unittest.TestCase):
def test_is_inverse_function(self):
from models.nf_confounder_model import define_model
dim = 2
model = define_model(dim=dim)
cp = CausalProb(model=model)
theta = {k: cp.init_params[k](i) for i, k in enumerate(cp.init_params)}
u, v = cp.fill({k: cp.draw_u[k](1, theta, seed) for seed, k in enumerate(cp.draw_u)}, {}, theta, cp.draw_u.keys())
for rv in cp.f:
assert jnp.allclose(cp.finv[rv](cp.f[rv](u[rv], theta, v), theta, v), u[rv])
def test_determinant(self):
from models.nf_confounder_model import define_model
dim = 2
model = define_model(dim=dim)
cp = CausalProb(model=model)
theta = {k: cp.init_params[k](i) for i, k in enumerate(cp.init_params)}
u, v = cp.fill({k: cp.draw_u[k](1, theta, seed) for seed, k in enumerate(cp.draw_u)}, {}, theta, cp.draw_u.keys())
for rv in cp.ldij:
assert jnp.allclose(jnp.round(cp.ldij[rv](v[rv], theta, v).squeeze(), 4),
jnp.round(
jnp.log(
jnp.abs(
jnp.linalg.det(
cp.dfinvv_dv(rv, {k: _v.squeeze(0) for k, _v in v.items()}, theta)))), 4))
``` |
{
"source": "jiminald/energenie-webpower",
"score": 3
} |
#### File: jiminald/energenie-webpower/server.py
```python
import os
import json
import atexit
import bottle
from time import sleep
from bottle import route, request, response, template, static_file
from gpiozero import Energenie
# Set Document Root by using the current file directory
DOCUMENT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Load JSON config
with open(DOCUMENT_ROOT+'/config.json', 'r') as f:
CONFIG = json.load(f)
# print(json.dumps(CONFIG, indent=4, sort_keys=True))
# Dynamically load the sockets we're controlling
sockets = [0]
for key, val in CONFIG['sockets'].items():
# Convert the key from a string to an integer
key = int(key)
# Create our connection to the socket and set the default state of the socket
sockets.insert(key, Energenie(key, bool(val['default_state'])))
# Function to control the socket
def energenie_socket_power(id, action):
# Open a connection to the socket
socket = sockets[id]
# Do action
if action == 'off':
socket.off()
elif action == 'on':
socket.on()
# Sleep, to ensure the command is sent
sleep(0.05)
# Function to control the socket
def energenie_socket_state(id):
# Open a connection to the socket
socket = sockets[id]
# Find out what state the socket is in (On or Off)
val = socket.value
# Give this back to whomever asked for it
return val
# Close everything when we quit the script
def on_exit():
for key, val in CONFIG['sockets'].items():
key = int(key)
sockets[key].close()
# Register the shutdown function
atexit.register(on_exit)
# Create the bottle web server
app = bottle.Bottle()
# Public assets and resources
@app.route('/public/<filename:re:.+>')
def server_public(filename):
return static_file(filename, root=DOCUMENT_ROOT+"/public")
# Serve up config.json
@app.route('/config.json')
def server_config():
return static_file('config.json', root=DOCUMENT_ROOT)
# Serve up the state of the socket
@app.route('/state/<socket_id:int>')
def socket_state(socket_id):
return '{"state":"%s"}' % str(energenie_socket_state(socket_id)).lower()
# Change the Socket State
@app.route('/state/<socket_id:int>/<action>')
def socket_state_trigger(socket_id, action):
energenie_socket_power(socket_id, action)
return socket_state(socket_id)
# Serve up the default index.html page
@app.route('/')
def server_home():
return static_file('index.html', root=DOCUMENT_ROOT+"/public")
# Start web server
app.run(host=CONFIG['http_host'], port=CONFIG['http_port'])
``` |
{
"source": "jiminald/homeassistant-unraid",
"score": 2
} |
#### File: custom_components/unraid/__init__.py
```python
import logging
import os.path, time
# Home assistant
import voluptuous as vol
from homeassistant import config_entries
import homeassistant.helpers.config_validation as cv
# GraphQL
import requests
from requests.exceptions import Timeout
from requests.exceptions import ConnectionError
import json
# Constants
from .const import (
DOMAIN,
HOSTS,
CONF_HOST,
CONF_API_KEY,
GRAPHQL_ENDPOINTS,
SENSOR_LIST,
)
# Config schema
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_API_KEY): cv.string,
# vol.Optional(CONF_BINARY_SENSOR): vol.All(
# cv.ensure_list, [BINARY_SENSOR_SCHEMA]
# ),
# vol.Optional(CONF_SENSOR): vol.All(cv.ensure_list, [SENSOR_SCHEMA]),
# vol.Optional(CONF_SWITCH): vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
# Set logger name
_LOGGER = logging.getLogger(__name__)
# def setup(hass, config):
async def async_setup(hass, config):
"""Set up the Unraid component using YAML"""
# Check config is setup
if config.get(DOMAIN) is None:
# We get here if the integration is set up using config flow
return True
# Debug log we're starting
_LOGGER.debug("YAML Setup started")
# Setup data dict
hass.data[DOMAIN] = {}
# Get "global" configuration.
host = config[DOMAIN].get(CONF_HOST)
api_key = config[DOMAIN].get(CONF_API_KEY)
# Config the unRAID Client
try:
api = UnraidClient(
hass, host, api_key
)
# Prepare JSON objects
for sensor_name in SENSOR_LIST:
api._json_object[sensor_name] = {}
# Store data
hass.data[DOMAIN] = {"config": config[DOMAIN], "api": api}
# Load sensors
hass.helpers.discovery.load_platform('sensor', DOMAIN, {"host": host}, config)
# Add config Flow
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
except Exception:
_LOGGER.error("(YAML) unRAID Fatal Error: %s - Failed to connect to API", host)
return True
async def async_setup_entry(hass, config_entry):
"""Set up this integration using UI."""
conf = hass.data.get(DOMAIN)
if config_entry.source == config_entries.SOURCE_IMPORT:
if conf is None:
hass.async_create_task(
hass.config_entries.async_remove(config_entry.entry_id)
)
return False
# Debug log we're starting
_LOGGER.debug("UI Setup started")
# Create DATA dict
hass.data[DOMAIN] = {}
# Get "global" configuration.
host = config_entry.data.get(CONF_HOST)
api_key = config_entry.data.get(CONF_API_KEY)
# _LOGGER.debug("host: %s", host)
# _LOGGER.debug("api_key %s", api_key)
try:
_LOGGER.debug("(UI) Do API")
api = UnraidClient(
hass, host, api_key
)
_LOGGER.debug("(UI) Do sensors")
for sensor_name in SENSOR_LIST:
api._json_object[sensor_name] = {}
_LOGGER.debug("(UI) Do data")
hass.data[DOMAIN] = {"config": config_entry.data, "api": api}
# Load sensors
# _LOGGER.debug("(UI) Do load of sensors")
# hass.helpers.discovery.load_platform('sensor', DOMAIN, {"host": host}, config_entry.data)
except Exception:
_LOGGER.error("(UI) unRAID Fatal Error: %s - Failed to connect to API", host)
# Add binary_sensor
# hass.async_add_job(
# hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
# )
#
# Add sensor
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
#
# # Add switch
# hass.async_add_job(
# hass.config_entries.async_forward_entry_setup(config_entry, "switch")
# )
return True
class UnraidClient:
"""Handle GraphQL communications"""
def __init__(self, hass, host, api_key):
"""Initialize the Unraid GraphQL Client."""
self._host = host
self._api_key = api_key
self._hass = hass
self._json_object = {}
def poll_graphql(self, graphql='All'):
# Get all sensor data
graphql_query = ''
if not graphql == 'All':
graphql_query += GRAPHQL_ENDPOINTS[graphql]
else:
for sensor_name in SENSOR_LIST:
graphql_query += GRAPHQL_ENDPOINTS[sensor_name] + ','
# Make request
try:
_LOGGER.debug("Host = %s", self._host)
# Dump the request query
_LOGGER.debug('Request GraphQL = %s', graphql)
_LOGGER.debug('Request = {%s}', graphql_query)
result = requests.post(
self._host + '/graph',
headers = {
'x-api-key': self._api_key,
},
json = {
'query': '{'+ graphql_query +'}',
},
)
json_result = json.loads(result.content)
# Debug of JSON result
_LOGGER.debug("Result = %s", json_result)
# Process JSON
if not graphql == 'All':
self._json_object[graphql] = {
'json': json_result['data'][graphql],
'data': flatten_json(json_result['data'][graphql])
}
else:
for sensor_name in SENSOR_LIST:
self._json_object[sensor_name] = {
'json': json_result['data'][sensor_name],
'data': flatten_json(json_result['data'][sensor_name])
}
return self._json_object
except Timeout:
_LOGGER.debug('The request timed out')
except ConnectionError as ce:
_LOGGER.debug('Connection Error = %s', ce)
else:
_LOGGER.debug('The request did not time out')
return self._json_object
def flatten_json(y, prefix=""):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y, prefix)
return out
``` |
{
"source": "JimInCO/bishopric_tools",
"score": 2
} |
#### File: bishopric_tools/people/forms.py
```python
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from people.models import Member
class MemberAddForm(forms.ModelForm):
class Meta:
model = Member
exclude = ["active"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit("submit", "Create Member"))
self.helper.form_id = "add-form"
```
#### File: bishopric_tools/people/lookups.py
```python
from ajax_select import register, LookupChannel
from django.db.models import Q
from .models import Member
@register("members")
class TagsLookup(LookupChannel):
model = Member
def get_query(self, q, request):
query = Q(first_name__icontains=q) | Q(last_name__icontains=q)
return self.model.objects.filter(query).order_by("last_name")
def format_item_display(self, item):
return u"<span class='tag'>{}</span>".format(item.full_name)
def get_result(self, obj):
""" result is the simple text that is the completion of what the person typed """
return obj.id
```
#### File: bishopric_tools/people/models.py
```python
from datetime import date
from django.db import models
from django.urls import reverse
from model_utils import Choices
from events.models import Talk
class Member(models.Model):
GENDER = Choices((0, "female", "female"), (1, "male", "male"))
lds_id = models.CharField(max_length=11, unique=True)
gender = models.IntegerField(choices=GENDER)
first_name = models.CharField(max_length=30, verbose_name="First name")
last_name = models.CharField(max_length=40, verbose_name="Last name")
birth_year = models.PositiveIntegerField(verbose_name="Birth Year")
# Contact information
email = models.EmailField(verbose_name="E-Mail", blank=True, null=True)
phone = models.CharField(max_length=20, verbose_name="Phone Number", blank=True, null=True)
street1 = models.CharField(max_length=50)
street2 = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=60)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=10, blank=True, null=True)
# Still in the ward
active = models.BooleanField(default=True)
# Notes
notes = models.TextField(blank=True)
@property
def full_name(self):
"""Returns the person's full name"""
return "{} {}".format(self.first_name, self.last_name)
@property
def formal_name(self):
"""Returns the person's Title (Brother/Sister) and last name"""
if self.gender == 0:
return "Sister {}".format(self.last_name)
else:
return "Brother {}".format(self.last_name)
@property
def age(self):
"""Returns the person's age"""
return int((date.today().year - self.birth_year))
@property
def adult(self):
return self.age >= 19
@property
def last_talk_date(self):
talks = Talk.objects.filter(speaker=self).order_by("-date")
if len(talks) > 0:
return talks[0].date
else:
return None
def get_absolute_url(self):
return reverse("members:detail", args=[str(self.pk)])
def __str__(self):
return self.full_name
```
#### File: bishopric_tools/people/views.py
```python
from django.views.generic import ListView, CreateView, DetailView
from events.models import Talk
from . import forms
from . import models
class MemberDetail(DetailView):
model = models.Member
slug_field = "pk"
slug_url_kwarg = "pk"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["talks"] = Talk.objects.filter(speaker=self.object).order_by("-date")
return ctx
class MemberList(ListView):
model = models.Member
queryset = model.objects.filter(active=True)
class MemberAddView(CreateView):
model = models.Member
form_class = forms.MemberAddForm
``` |
{
"source": "Jiminger/CSMuseum",
"score": 3
} |
#### File: Jiminger/CSMuseum/app.py
```python
from flask import Flask
from flask import render_template
from markupsafe import Markup
import arduino_controller
import db_controller
app = Flask(__name__)
# Prototype using Case 3 Page as the Index page.
@app.route('/')
def index():
arduino_controller.light_specific_case(3)
return render_template("case_3.html")
@app.route('/item_<item_id>')
def item_page(item_id):
item_info = db_controller.get_item_information(3, item_id)
arduino_controller.light_specific_item(3, item_id)
return render_template("item.html", item_name=item_info[0][0], item_desc=Markup(item_info[0][1]),
img_path=item_info[0][2])
"""
This code can be used to later extend the program to light up all three museum cases.
# Index Page
@app.route('/')
def index():
arduino_controller.light_entire_museum()
return render_template("index.html")
# Case One Pages
@app.route('/case_1/')
def case_1():
# controller.turn_on_case(1)
return render_template("case_1.html")
# Case Two Pages
@app.route('/case_2/')
def case_2():
# controller.turn_on_case(2)
return render_template("case_2.html")
# Case Three Pages
@app.route('/case_3/')
def case_3():
# controller.turn_on_case(3)
return render_template("case_3.html")
@app.route('/case_<case_id>/item_<item_id>')
def item_page(case_id, item_id):
item_info = db_controller.get_item_information(case_id, item_id)
arduino_controller.light_specific_item(case_id, item_id)
return render_template("item.html", item_name=item_info[0][0], item_desc=Markup(item_info[0][1]), img_path=item_info[0][2])
"""
if __name__ == "__main__":
app.run()
```
#### File: Jiminger/CSMuseum/db_controller.py
```python
import mysql.connector
import info
def open_connection():
my_db = mysql.connector.connect(
host="localhost",
user=info.get_db_user(),
password=info.get_db_pass(),
database="CSMuseum"
)
my_cursor = my_db.cursor()
return my_db, my_cursor
def close_connection(connection):
for c in connection:
c.close()
def format_input(start_index, end_index):
return '<' + str(start_index) + ',' + str(end_index) + '>'
def get_item_indexes(case_id, item_id):
connection = open_connection()
my_cursor = connection[1]
my_cursor.execute("SELECT start_index, end_index FROM Items WHERE case_id= " + str(case_id) +
" AND item_id =" + str(item_id) + ";")
my_result = my_cursor.fetchall()
close_connection(connection)
return format_input(str(my_result[0][0]), str(my_result[0][1]))
def get_item_information(case_id, item_id):
connection = open_connection()
my_cursor = connection[1]
my_cursor.execute("SELECT item_name, item_desc, img_path FROM Items WHERE case_id =" + str(case_id) +
" AND item_id =" + str(item_id) + ";")
my_result = my_cursor.fetchall()
close_connection(connection)
return my_result
``` |
{
"source": "jimingham/llvm-project",
"score": 3
} |
#### File: expression/calculator_mode/TestCalculatorMode.py
```python
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCalculatorMode(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test__calculator_mode(self):
"""Test calling expressions in the dummy target."""
self.expect("expression 11 + 22", "11 + 22 didn't get the expected result", substrs=["33"])
# Now try it with a specific language:
self.expect("expression -l c -- 11 + 22", "11 + 22 didn't get the expected result", substrs=["33"])
``` |
{
"source": "jiming-liu/overpick_Scrapy",
"score": 3
} |
#### File: Spider5i5j/spiders/city5i5j.py
```python
import scrapy
import demjson
from Spider5i5j.items import Spider5I5JItem
from Spider5i5j.spiders.startURL import startURL
class city5i5j(scrapy.Spider):
name = 'city5i5j'
allowed_domains = ['5i5j.com']
start_urls = ['http://cs.5i5j.com/exchange']
def parse(self, response):
city_page_query = '//body/nav/div/div/ul[@class="city-more-r"]/li/a'
for info in response.xpath(city_page_query):
item = Spider5I5JItem()
item['houseCity'] = info.xpath('text()').extract()[0]
item['houseCityURL'] = info.xpath('attribute::href').extract()[0]
yield item
```
#### File: Spider5i5j/spiders/ershoufang5i5j.py
```python
import scrapy
import demjson
from Spider5i5j.items import Spider5I5JItem
from Spider5i5j.spiders.startURL import startURL
class ershoufang5i5j(scrapy.Spider):
name = 'ershoufang5i5j'
allowed_domains = ['5i5j.com']
start_urls = startURL.ershoufangURL
def parse(self, response):
house_page_query = '//body/section/div/div/div/ul[@class="list-body"]/li'
house_page_root = response.request.url.split('/')[2]
for info in response.xpath(house_page_query):
house_page_href = info.xpath('a/attribute::href').extract()[0]
house_page_url = 'http://'+ house_page_root + house_page_href
yield scrapy.Request(house_page_url,callback=self.parse_house_page)
def parse_house_page(self,response):
item = Spider5I5JItem()
item['houseTitle'] = response.xpath('//html/head/title/text()').extract()[0].split('_')[0]
#此XPath节点可以获得房屋的所有基本信息
house_info_query = '//body/section/div/div/ul'
area_query = 'li/ul/li[3]/text()'
item['houseArea'] = response.xpath(house_info_query).xpath(area_query).extract()[0]
name_query = 'li[3]/text()'
item['houseName'] = response.xpath(house_info_query).xpath(name_query).extract()[0]
#此XPath节点获得房屋的历史价格信息和最早发布时间
#这里初始化房屋售价为一个字典
item['housePrice'] = {}
histroy_price_query = '//body/section/div/section/div/script/text()'
histroy_price_json = response.xpath(histroy_price_query).extract()[0].split(';')[1].split('=')[1]
histroy_price_dejson = demjson.decode(histroy_price_json)
histroy_price_data = histroy_price_dejson['xAxis'][0]['data']
histroy_time = len(histroy_price_data)
i = 0
while i < histroy_time :
histroy_price_guapai = histroy_price_dejson['series'][0]['data'][i]
histroy_price_chengjiao = histroy_price_dejson['series'][1]['data'][i]
item['housePrice'][histroy_price_data[i]] = {
'price_guapai' : histroy_price_guapai,
'price_chengjiao' : histroy_price_chengjiao
}
i += 1
#最早的历史时间就是发帖的时间
item['housePublishedTime'] = histroy_price_data[0]
#这里请求房屋的地址和城市
item['houseAddress'] = response.xpath('//body/section/div/section/div[@class="xq-intro-info"]/ul/li[3]/text()').extract()[0]
item['houseCity'] = response.xpath('//body').re(r'mapCityName.*;?')[0].split('\"')[-2]
#这里请求房屋的地址和城市
item['houseAddress'] = response.xpath('//body/section/div/section/div[@class="xq-intro-info"]/ul/li[3]/text()').extract()[0]
item['houseCity'] = response.xpath('//body').re(r'mapCityName.*;?')[0].split('\"')[-2]
item['houseBaiduLongitude'] = response.xpath('//body').re(r'mapY.*;?')[0].split('=')[-1].split(';')[0].replace('"','')
item['houseBaiduLatitude'] = response.xpath('//body').re(r'mapX.*;?')[0].split('=')[-1].split(';')[0].replace('"','')
yield item
```
#### File: SpiderGanji/SpiderGanji/pipelines.py
```python
import sys
import csv
import time
import string
class SpiderganjiPipeline(object):
def process_item(self, item, spider):
return item
class xinfangGanjiPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
if spider.name != 'xinfangGanji':
return item
#判断是否存在历史价格数据
#这里要用字典类型中的get方法来判断,否则会报错。
#原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。
if not item.get('housePrice'):
return item
#打开写入的文件和CSV写入模块
self.file = open('xinfangGanji.csv','ab')
csvWriter = csv.writer(self.file)
#获得发布时间的月份
time_tmp = string.atof(item['housePublishedTime'][0:10])
time_list = time.localtime(time_tmp)
if time_list[1] < 10:
times = '%d'%time_list[0]+'0'+'%d'%time_list[1]
else:
times = '%d'%time_list[0]+'%d'%time_list[1]
#格式化item为CSV格式数据
house_area = item['houseArea']
price_chengjiao_tmp = item['housePrice']
price_guapai_tmp = item['housePrice']
price_chengjiao = string.atof(price_chengjiao_tmp) / string.atof(house_area) * 10000
price_guapai = string.atof(price_guapai_tmp) / string.atof(house_area) * 10000
house_name = item['houseName'].strip()
line = (times,house_name,item['houseCity'],price_chengjiao,price_guapai,item['houseArea'],item['houseAddress'],item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle'])
csvWriter.writerow(line)
return item
class ershoufangGanjiPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
if spider.name != 'ershoufangGanji':
return item
#判断是否存在历史价格数据
#这里要用字典类型中的get方法来判断,否则会报错。
#原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。
if not item.get('housePrice'):
return item
#打开写入的文件和CSV写入模块
self.file = open('ershoufangfangGanji.csv','ab')
csvWriter = csv.writer(self.file)
#获得发布时间的月份
time_tmp = string.atof(item['housePublishedTime'][0:10])
time_list = time.localtime(time_tmp)
if time_list[1] < 10:
times = '%d'%time_list[0]+'0'+'%d'%time_list[1]
else:
times = '%d'%time_list[0]+'%d'%time_list[1]
#格式化item为CSV格式数据
price_chengjiao = item['housePrice']
price_guapai = item['housePrice']
house_name = item['houseName'].strip()
line = (times,house_name,item['houseCity'],price_chengjiao,price_guapai,item['houseArea'].strip(),item['houseAddress'],item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle'])
csvWriter.writerow(line)
return item
class zufangGanjiPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
if spider.name != 'zufangGanji':
return item
#判断是否存在历史价格数据
#这里要用字典类型中的get方法来判断,否则会报错。
#原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。
if not item.get('housePrice'):
return item
#打开写入的文件和CSV写入模块
self.file = open('zufangfangGanji.csv','ab')
csvWriter = csv.writer(self.file)
#获得发布时间的月份
time_tmp = string.atof(item['housePublishedTime'][0:10])
time_list = time.localtime(time_tmp)
if time_list[1] < 10:
times = '%d'%time_list[0]+'0'+'%d'%time_list[1]
else:
times = '%d'%time_list[0]+'%d'%time_list[1]
#格式化item为CSV格式数据
price_chengjiao = item['housePrice']
price_guapai = item['housePrice']
house_name = item['houseName'].strip()
line = (times,house_name,item['houseCity'],price_chengjiao,price_guapai,item['houseArea'].strip(),item['houseAddress'],item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle'])
csvWriter.writerow(line)
return item
class cityGanjiPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
if spider.name != 'cityGanji':
return item
self.file = open('startURL_quanguo.txt','ab')
i = 1
while i < 71:
i_str = '%d'%i
line_1 = ' \''+item['houseCityURL'].encode('utf-8') + 'fang12/o' + i_str + '/\','
line_2 = ' \''+item['houseCityURL'].encode('utf-8') + 'fang5/o' + i_str + '/\','
line_3 = ' \''+item['houseCityURL'].encode('utf-8') + 'fang1/o' + i_str + '/\','
print >> self.file , line_1
print >> self.file , line_2
print >> self.file , line_3
i += 1
return item
```
#### File: SpiderLianjia/SpiderLianjia/pipelines.py
```python
import csv
import sys
import string
class SpiderlianjiaPipeline(object):
def process_item(self, item, spider):
return item
class xinfangLianjiaPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
#判断是否为链家新房爬虫
if spider.name != 'xinfangLianjia':
return item
#判断是否存在历史价格数据
#这里要用字典类型中的get方法来判断,否则会报错。
#原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。
if not item.get('houseHistoryPrice'):
return item
#打开写入的文件和CSV写入模块
self.file = open('xinfangLianjia.csv','ab')
csvWriter = csv.writer(self.file)
#列表化时间、房价数据
time_list = item['houseHistoryPrice']['time'].strip('[]').replace('"','').split(',')
price_chengjiao_list = item['houseHistoryPrice']['price_chengjiao'].strip('[]').split(',')
price_guapai_list = item['houseHistoryPrice']['price_guapai'].strip('[]').split(',')
#格式化item为CSV格式数据
for house in time_list:
price_index = time_list.index(house)
price_chengjiao = price_chengjiao_list[price_index]
price_guapai = price_guapai_list[price_index]
line = (house,item['houseName'],item['houseCity'],price_chengjiao,price_guapai,item['houseAddress'],item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle'])
csvWriter.writerow(line)
return item
class ershoufangLianjiaPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
if spider.name != 'ershoufangLianjia':
return item
#判断是否存在历史价格数据
#这里要用字典类型中的get方法来判断,否则会报错。
#原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。
if not item.get('houseHistoryPrice'):
return item
#打开写入的文件和CSV写入模块
self.file = open('ershoufangLianjia.csv','ab')
csvWriter = csv.writer(self.file)
#格式化item为CSV格式数据
for house in item['houseHistoryPrice']['time']:
price_index = item['houseHistoryPrice']['time'].index(house)
price_chengjiao_tmp = string.atof(item['houseHistoryPrice']['price'][price_index])
price_guapai_tmp = string.atof(item['houseHistoryPrice']['price'][price_index])
house_area = string.atof(item['houseArea'])
price_chengjiao = price_chengjiao_tmp * house_area / 10000
price_guapai = price_guapai_tmp * house_area / 10000
if price_chengjiao == 0:
price_chengjiao = item['housePrice']
price_guapai = item['housePrice']
line = (house,item['houseName'],item['houseCity'],price_chengjiao,price_guapai,house_area,'N/A',item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle'])
csvWriter.writerow(line)
return item
class zufangLianjiaPipeline(object):
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
def process_item(self,item,spider):
if spider.name != 'zufangLianjia':
return item
#判断是否存在历史价格数据
#这里要用字典类型中的get方法来判断,否则会报错。
#原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。
if not item.get('houseHistoryPrice'):
return item
#打开写入的文件和CSV写入模块
self.file = open('zufangLianjia.csv','ab')
csvWriter = csv.writer(self.file)
#格式化item为CSV格式数据
for house in item['houseHistoryPrice']['time']:
price_index = item['houseHistoryPrice']['time'].index(house)
price_chengjiao = string.atof(item['houseHistoryPrice']['price'][price_index])
price_guapai = string.atof(item['houseHistoryPrice']['price'][price_index])
house_area = string.atof(item['houseArea'])
if price_chengjiao == 0:
price_chengjiao = item['housePrice']
price_guapai = item['housePrice']
line = (house,item['houseName'],item['houseCity'],price_chengjiao,price_guapai,house_area,'N/A',item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle'])
csvWriter.writerow(line)
return item
``` |
{
"source": "JiminKung/Violet-LogMin",
"score": 3
} |
#### File: Violet-LogMin/widgets/boot_page.py
```python
import yaml
import tkinter as tk
from utils.utils import centered_display
from widgets.violet import Voilet
with open("violet-logmin.yaml", mode='r', encoding="utf-8") as f:
WIDGET_CONFIG = yaml.load(f, Loader=yaml.FullLoader)["widget"]
BOOT_PAGE_CONFIG = WIDGET_CONFIG["boot_page"]
WELCOME_LABEL_FRAME_CONFIG = WIDGET_CONFIG["welcome_label_frame"]
WELCOME_LABEL_CONFIG = WIDGET_CONFIG["welcome_label"]
CHOICE_FRAME_CONFIG = WIDGET_CONFIG["choice_frame"]
ENGLISH_BUTTON_CONFIG = WIDGET_CONFIG["english_button"]
CHINESE_BUTTON_CONFIG = WIDGET_CONFIG["chinese_button"]
class BootPage(tk.Tk):
def __init__(self):
super().__init__()
self.language = ""
self.context_box = {"boot_page": self}
self.title(BOOT_PAGE_CONFIG["title"])
self.iconbitmap(default=BOOT_PAGE_CONFIG["icon"])
self.window_width = BOOT_PAGE_CONFIG["window_width"]
self.window_height = BOOT_PAGE_CONFIG["window_height"]
if not BOOT_PAGE_CONFIG["resizeable"]:
self.resizable(0, 0)
self.welcome_label_frame = None
self.welcome_label = None
self.choice_frame = None
self.english_button = None
self.chinese_button = None
self.setup()
centered_display(self)
def setup(self):
self.welcome_label_frame = tk.LabelFrame(self, text=WELCOME_LABEL_FRAME_CONFIG["text"],
labelanchor=WELCOME_LABEL_FRAME_CONFIG["labelanchor"],
font=(WELCOME_LABEL_FRAME_CONFIG["font_family"],
WELCOME_LABEL_FRAME_CONFIG["font_size"],
WELCOME_LABEL_FRAME_CONFIG["font_weight"]))
self.welcome_label_frame.pack(pady=WELCOME_LABEL_FRAME_CONFIG["pady"])
self.welcome_label = tk.Label(self.welcome_label_frame, text=WELCOME_LABEL_CONFIG["text"],
wraplength=WELCOME_LABEL_CONFIG["wraplength"],
justify=WELCOME_LABEL_CONFIG["justify"],
font=(WELCOME_LABEL_CONFIG["font_family"],
WELCOME_LABEL_CONFIG["font_size"],
WELCOME_LABEL_CONFIG["font_weight"]))
self.welcome_label.pack(side=WELCOME_LABEL_CONFIG["side"],
padx=WELCOME_LABEL_CONFIG["padx"],
pady=WELCOME_LABEL_CONFIG["pady"])
self.choice_frame = tk.Frame(self)
self.choice_frame.pack(padx=CHOICE_FRAME_CONFIG["padx"],
pady=CHOICE_FRAME_CONFIG["pady"])
self.english_button = tk.Button(self.choice_frame, text=ENGLISH_BUTTON_CONFIG["text"],
font=(ENGLISH_BUTTON_CONFIG["font_family"],
ENGLISH_BUTTON_CONFIG["font_size"],
ENGLISH_BUTTON_CONFIG["font_weight"]),
command=self.choose_english)
self.english_button.pack(side=ENGLISH_BUTTON_CONFIG["side"],
padx=ENGLISH_BUTTON_CONFIG["padx"])
self.chinese_button = tk.Button(self.choice_frame, text=CHINESE_BUTTON_CONFIG["text"],
font=(CHINESE_BUTTON_CONFIG["font_family"],
CHINESE_BUTTON_CONFIG["font_size"],
CHINESE_BUTTON_CONFIG["font_weight"]),
command=self.choose_chinese)
self.chinese_button.pack(side=CHINESE_BUTTON_CONFIG["side"],
padx=CHINESE_BUTTON_CONFIG["padx"])
def choose_english(self):
self.language = "English"
self.boot_violet()
def choose_chinese(self):
self.language = "Chinese"
self.boot_violet()
def boot_violet(self):
violet = Voilet(self.context_box, self.language)
self.withdraw()
violet.mainloop()
def exit(self):
self.destroy()
```
#### File: Violet-LogMin/widgets/proscenium.py
```python
import yaml
import tkinter as tk
with open("violet-logmin.yaml", mode='r', encoding="utf-8") as f:
CONFIG = yaml.load(f, Loader=yaml.FullLoader)
WIDGET_CONFIG = CONFIG["widget"]
PROSCENIUM_FRAME_CONFIG = WIDGET_CONFIG["proscenium_frame"]
PROSCENIUM_TEXT_CONFIG = WIDGET_CONFIG["proscenium_text"]
PROSCENIUM_SCROLLBAR_CONFIG = WIDGET_CONFIG["proscenium_scrollbar"]
DIALOGUE_CONFIG = CONFIG["dialogue"]
LOGMIN_DIALOGUE_CONFIG = DIALOGUE_CONFIG["LogMin"]
VIOLET_DIALOGUE_CONFIG = DIALOGUE_CONFIG["Violet"]
VIOLET_LOGMIN_DIALOGUE_CONFIG = DIALOGUE_CONFIG["Violet-LogMin"]
class ProsceniumFrame(tk.LabelFrame):
def __init__(self, master, context_box, language):
self.context_box = context_box
self.context_box["proscenium_frame"] = self
self.language = language
super().__init__(master=master,
text=PROSCENIUM_FRAME_CONFIG[language]["text"],
labelanchor=PROSCENIUM_FRAME_CONFIG["labelanchor"],
font=(PROSCENIUM_FRAME_CONFIG["font_family"],
PROSCENIUM_FRAME_CONFIG["font_size"],
PROSCENIUM_FRAME_CONFIG["font_weight"]))
self.pack(side=PROSCENIUM_FRAME_CONFIG["side"],
padx=PROSCENIUM_FRAME_CONFIG["padx"],
pady=PROSCENIUM_FRAME_CONFIG["pady"],
fill="x")
self.proscenium_text = None
self.proscenium_scrollbar = None
self.setup()
def setup(self):
self.proscenium_text = tk.Text(self, wrap=PROSCENIUM_TEXT_CONFIG["wrap"],
font=(PROSCENIUM_TEXT_CONFIG["font_family"],
PROSCENIUM_TEXT_CONFIG["font_size"],
PROSCENIUM_TEXT_CONFIG["font_weight"]))
self.proscenium_scrollbar = tk.Scrollbar(self, orient=PROSCENIUM_SCROLLBAR_CONFIG["orient"])
self.proscenium_scrollbar.config(command=self.proscenium_text.yview)
self.proscenium_text.config(yscrollcommand=self.proscenium_scrollbar.set)
self.proscenium_scrollbar.pack(side=PROSCENIUM_SCROLLBAR_CONFIG["side"],
fill=PROSCENIUM_SCROLLBAR_CONFIG["fill"])
self.proscenium_text.pack(padx=PROSCENIUM_TEXT_CONFIG["padx"],
pady=PROSCENIUM_TEXT_CONFIG["pady"],
fill=PROSCENIUM_TEXT_CONFIG["fill"])
self.proscenium_text.see("end")
self.perform_opening_act()
self.list_cast()
def perform_opening_act(self):
welcome_lines = VIOLET_LOGMIN_DIALOGUE_CONFIG["role"] + \
VIOLET_LOGMIN_DIALOGUE_CONFIG[self.language]["welcome"] + "\n\n"
introduction_lines = VIOLET_LOGMIN_DIALOGUE_CONFIG["role"] + \
VIOLET_LOGMIN_DIALOGUE_CONFIG[self.language]["introduction"] + "\n\n"
website_lines = VIOLET_LOGMIN_DIALOGUE_CONFIG["role"] + \
VIOLET_LOGMIN_DIALOGUE_CONFIG[self.language]["website"] + "\n\n"
self.proscenium_text.insert("end", welcome_lines)
self.proscenium_text.insert("end", introduction_lines)
self.proscenium_text.insert("end", website_lines)
self.proscenium_text.config(state="disabled")
def list_cast(self):
self.proscenium_text.config(state="normal")
violet_introduction_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["introduction"] + "\n\n"
logmin_introduction_lines = LOGMIN_DIALOGUE_CONFIG["role"] + \
LOGMIN_DIALOGUE_CONFIG[self.language]["introduction"] + "\n\n"
tutorial_lines = VIOLET_LOGMIN_DIALOGUE_CONFIG["role"] + \
VIOLET_LOGMIN_DIALOGUE_CONFIG[self.language]["tutorial"] + "\n\n"
self.proscenium_text.insert("end", violet_introduction_lines)
self.proscenium_text.insert("end", logmin_introduction_lines)
self.proscenium_text.insert("end", tutorial_lines)
self.proscenium_text.config(state="disabled")
# self.proscenium_text.see("end")
def throw_miss_selecting_exception(self):
self.proscenium_text.config(state="normal")
violet_miss_selecting_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["miss_selecting"] + "\n\n"
self.proscenium_text.insert("end", violet_miss_selecting_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
def throw_empty_input_exception(self):
self.proscenium_text.config(state="normal")
violet_empty_input_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["empty_input"] + "\n\n"
self.proscenium_text.insert("end", violet_empty_input_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
def throw_log_unchange_exception(self):
self.proscenium_text.config(state="normal")
violet_log_unchange_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["log_unchange"] + "\n\n"
self.proscenium_text.insert("end", violet_log_unchange_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
def display_selected_member(self, member):
self.proscenium_text.config(state="normal")
violet_select_member_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["select_member"]\
.format(member["name"], member["address"]) + "\n\n"
self.proscenium_text.insert("end", violet_select_member_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
def display_send_permission(self):
self.proscenium_text.config(state="normal")
violet_send_permission_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["send_permission"] + "\n\n"
self.proscenium_text.insert("end", violet_send_permission_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
def display_send_cancel_instruction(self):
self.proscenium_text.config(state="normal")
violet_send_cancel_lines = VIOLET_DIALOGUE_CONFIG["role"] + \
VIOLET_DIALOGUE_CONFIG[self.language]["send_cancel"] + "\n\n"
self.proscenium_text.insert("end", violet_send_cancel_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
def display_send_finished(self, receiver):
full_name = receiver["sur_name"] + receiver["given_name"]
self.proscenium_text.config(state="normal")
logmin_send_finished_lines = LOGMIN_DIALOGUE_CONFIG["role"] + \
LOGMIN_DIALOGUE_CONFIG[self.language]["send_finished"] \
.format(full_name, receiver["address"]) + "\n\n"
self.proscenium_text.insert("end", logmin_send_finished_lines)
self.proscenium_text.config(state="disabled")
self.proscenium_text.see("end")
``` |
{
"source": "JiminLeeDev/BaekJoonPython",
"score": 4
} |
#### File: BaekJoonPython/problems/10870.py
```python
def FindFibonacciNumber(n):
if n <= 1:
return n
return FindFibonacciNumber(n - 1) + FindFibonacciNumber(n - 2)
def Num10870():
n = int(input())
print(FindFibonacciNumber(n))
Num10870()
```
#### File: BaekJoonPython/problems/10872.py
```python
def Get_Factorial(N, result):
if N == 0:
return result
else:
return Get_Factorial(N - 1, result * N)
def Num10872():
N = int(input())
result = Get_Factorial(N, 1)
print(result)
Num10872()
```
#### File: BaekJoonPython/problems/11729.py
```python
def move(n, a, b, c):
if n == 1:
print(str(a) + " " + str(c))
else:
move(n - 1, a, c, b)
print(str(a) + " " + str(c))
move(n - 1, b, a, c)
def Num11729():
n = int(input())
print(2 ** n -1)
move(n, 1, 2, 3)
Num11729()
```
#### File: BaekJoonPython/problems/2447.py
```python
def draw_patern(n, src):
result = []
if n == 1:
return src
for y in range(3):
for x in src:
if y == 1:
result.append(x + " " * len(x) + x)
else:
result.append(x * 3)
return draw_patern(n // 3, result)
def Num2447():
n = int(input())
result = draw_patern(n, ["*"])
for line in result:
print(line)
Num2447()
```
#### File: BaekJoonPython/problems/2581.py
```python
def Num2581():
M = int(input())
N = int(input())
minPrimeNum = 0
sumPrimeNum = 0
primeNums = []
for dividend in range(M, N+1):
if dividend == 1:
continue
primeNums.append(dividend)
sumPrimeNum += dividend
for divisor in range(2, dividend):
if dividend % divisor == 0:
primeNums.pop()
sumPrimeNum -= dividend
break
if len(primeNums) == 0:
print("-1")
else:
minPrimeNum = primeNums[0]
sumPrimeNum = sum(primeNums)
print(str(sumPrimeNum))
print(str(minPrimeNum))
Num2581()
```
#### File: BaekJoonPython/problems/3009.py
```python
def Num3009():
coordinate = [
[int(splitedInput) for splitedInput in input().split()] for i in range(3)
]
xSet = [coordinate[i][0] for i in range(3)]
ySet = [coordinate[i][1] for i in range(3)]
x = 0
y = 0
for idx in range(3):
if xSet.count(xSet[idx]) == 1:
x = xSet[idx]
if ySet.count(ySet[idx]) == 1:
y = ySet[idx]
print(str(x) + " " + str(y))
Num3009()
```
#### File: BaekJoonPython/problems/4153.py
```python
def Num4153():
while True:
testcase = input().split()
if testcase.count("0") == 3:
return
triangle = [int(t) for t in testcase]
triangle.sort()
if triangle[0] ** 2 + triangle[1] ** 2 == triangle[2] ** 2:
print("right")
else:
print("wrong")
Num4153()
```
#### File: BaekJoonPython/problems/7568.py
```python
def Num7568():
N = int(input())
bodyList = [[int(i) for i in input().split()] for i in range(N)]
result = ""
grades = []
for body1 in bodyList:
grade = 0
for body2 in bodyList:
if body1[0] < body2[0] and body1[1] < body2[1]:
grade += 1
grades.append(grade + 1)
for grade in grades:
result += str(grade) + " "
print(result)
Num7568()
```
#### File: BaekJoonPython/problems/9020.py
```python
def Num9029():
nums = [True] * 10000
for n in range(2, int(len(nums) ** 0.5) + 1):
if nums[n] == True:
for compositionNum in range(n + n, len(nums), n):
nums[compositionNum] = False
testcase = int(input())
while testcase == 0:
n = int(input())
numberOfN = [(i, n - i) for i in range(1, n // 2 + 1)]
goldbachNums = []
for i in numberOfN:
a = i[0]
b = i[1]
if nums[a] and nums[b]:
goldbachNums.append((a, b, (a - b) ** 2))
goldbachNums.sort(key=lambda goldbachNum: int(goldbachNum[2]))
result = str(goldbachNums[0][0]) + " " + str(goldbachNums[0][1])
print(result)
testcase -= 1
Num9029()
``` |
{
"source": "JiminLeeDev/JMBlog",
"score": 2
} |
#### File: JMBlog/views/main_views.py
```python
import flask
from flask.templating import render_template
bp = flask.Blueprint("main", __name__, url_prefix="/")
@bp.route("/")
def Index():
return render_template("index.html")
``` |
{
"source": "jimin-shin/oppia",
"score": 2
} |
#### File: jobs/decorators/audit_decorators.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import inspect
from core.platform import models
from jobs.types import audit_errors
import python_utils
import apache_beam as beam
from apache_beam import typehints
_ALL_MODEL_CLASSES = frozenset(models.Registry.get_all_storage_model_classes())
_ALL_BASE_MODEL_CLASSES = frozenset(
models.Registry.get_storage_model_classes([models.NAMES.base_model]))
_MODEL_CLASSES_BY_BASE_CLASS = {
base_model_cls: frozenset({base_model_cls}).union(
cls for cls in _ALL_MODEL_CLASSES if issubclass(cls, base_model_cls))
for base_model_cls in _ALL_BASE_MODEL_CLASSES
}
class AuditsExisting(python_utils.OBJECT):
"""Decorator for registering DoFns that audit storage models.
DoFns registered by this decorator should assume that the models they
receive as input do not have `deleted=True`.
When decorating a DoFn that inherits from another, it overwrites the base
class. For example, ValidateExplorationModelId overwrites ValidateModelId if
and only if ValidateExplorationModelId inherits from ValidateModelId.
"""
_DO_FNS_BY_MODEL_KIND = collections.defaultdict(set)
def __init__(self, *model_cls_args):
"""Initializes the decorator to target the given models.
Args:
*model_cls_args: tuple(class). The models the decorator will target.
If an argument is a base class, all of its subclasses will be
targeted as well.
Raises:
TypeError. When a non-model type is provided.
"""
if not model_cls_args:
raise ValueError('Must provide at least one model')
self._model_classes = set()
for cls in model_cls_args:
if cls in _MODEL_CLASSES_BY_BASE_CLASS:
self._model_classes.update(_MODEL_CLASSES_BY_BASE_CLASS[cls])
elif cls in _ALL_MODEL_CLASSES:
self._model_classes.add(cls)
else:
raise TypeError(
'%r is not a model registered in core.platform' % cls)
def __call__(self, do_fn):
"""Decorator which registers the given DoFn to the targeted models.
This decorator also installs type constraints on the DoFn to guard it
from invalid argument types.
Args:
do_fn: DoFn. The DoFn to decorate.
Returns:
do_fn. The decorated DoFn.
Raises:
TypeError. When the input argument is not a DoFn.
"""
if not issubclass(do_fn, beam.DoFn):
raise TypeError('%r is not a subclass of DoFn' % do_fn)
# The "mro" (method resolution order) of a class is the list of types
# the class is derived from, including itself, in the order they are
# searched for methods and attributes.
# To learn more see: https://stackoverflow.com/a/2010732/4859885.
base_classes_of_do_fn = set(inspect.getmro(do_fn))
for cls in self._model_classes:
registered_do_fns = self._DO_FNS_BY_MODEL_KIND[cls.__name__]
if any(issubclass(r, do_fn) for r in registered_do_fns):
# Always keep the most-derived DoFn.
continue
registered_do_fns -= base_classes_of_do_fn
registered_do_fns.add(do_fn)
# Decorate the DoFn with type constraints that raise an error when
# arguments or return values have the wrong type.
with_input_types, with_output_types = (
typehints.with_input_types(typehints.Union[self._model_classes]),
typehints.with_output_types(audit_errors.BaseAuditError))
return with_input_types(with_output_types(do_fn))
@classmethod
def get_do_fns_for_model_kind(cls, model_kind):
"""Returns the list of DoFns registered to the given model kind.
Args:
model_kind: str. The kind/name of the model.
Returns:
list(DoFn). The DoFns registered to the model kind.
"""
return list(cls._DO_FNS_BY_MODEL_KIND[model_kind])
```
#### File: jobs/transforms/base_model_audits.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import re
from core.platform import models
import feconf
from jobs import jobs_utils
from jobs.decorators import audit_decorators
from jobs.types import audit_errors
import apache_beam as beam
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
MAX_CLOCK_SKEW_SECS = datetime.timedelta(seconds=1)
class ValidateDeletedModel(beam.DoFn):
"""DoFn to check whether models marked for deletion are stale.
Deleted models do not use a decorator for registration. This DoFn must be
called explicitly by runners.
"""
def process(self, input_model):
"""Yields audit errors that are discovered in the input model.
Args:
input_model: datastore_services.Model. Entity to validate.
Yields:
ModelExpiredError. An error class for expired models.
"""
model = jobs_utils.clone_model(input_model)
expiration_date = (
datetime.datetime.utcnow() -
feconf.PERIOD_TO_HARD_DELETE_MODELS_MARKED_AS_DELETED)
if model.last_updated < expiration_date:
yield audit_errors.ModelExpiredError(model)
@audit_decorators.AuditsExisting(base_models.BaseModel)
class ValidateBaseModelId(beam.DoFn):
"""DoFn to validate model ids.
Models with special ID checks should derive from this class and override the
MODEL_ID_REGEX attribute or the entire process() method, then decorate it to
target the appropriate model(s).
"""
MODEL_ID_REGEX = re.compile('^[A-Za-z0-9-_]{1,%s}$')
def process(self, input_model):
"""Function that defines how to process each element in a pipeline of
models.
Args:
input_model: datastore_services.Model. Entity to validate.
Yields:
ModelIdRegexError. An error class for models with invalid IDs.
"""
model = jobs_utils.clone_model(input_model)
regex = self.MODEL_ID_REGEX
if not regex.match(model.id):
yield audit_errors.ModelIdRegexError(model, regex.pattern)
@audit_decorators.AuditsExisting(base_models.BaseCommitLogEntryModel)
class ValidatePostCommitIsPrivate(beam.DoFn):
"""DoFn to check if post_commmit_status is private when
post_commit_is_private is true and vice-versa.
"""
def process(self, input_model):
"""Function validates that post_commit_is_private is true iff
post_commit_status is private
Args:
input_model: base_models.BaseCommitLogEntryModel.
Entity to validate.
Yields:
ModelInvalidCommitStatus. Error for commit_type validation.
"""
model = jobs_utils.clone_model(input_model)
expected_post_commit_is_private = (
model.post_commit_status == feconf.POST_COMMIT_STATUS_PRIVATE)
if model.post_commit_is_private != expected_post_commit_is_private:
yield audit_errors.InvalidCommitStatusError(model)
@audit_decorators.AuditsExisting(base_models.BaseModel)
class ValidateModelTimestamps(beam.DoFn):
"""DoFn to check whether created_on and last_updated timestamps are
valid.
"""
def process(self, input_model):
"""Function that defines how to process each element in a pipeline of
models.
Args:
input_model: datastore_services.Model. Entity to validate.
Yields:
ModelMutatedDuringJobError. Error for models mutated during the job.
InconsistentTimestampsError. Error for models with inconsistent
timestamps.
"""
model = jobs_utils.clone_model(input_model)
if model.created_on > (model.last_updated + MAX_CLOCK_SKEW_SECS):
yield audit_errors.InconsistentTimestampsError(model)
current_datetime = datetime.datetime.utcnow()
if (model.last_updated - MAX_CLOCK_SKEW_SECS) > current_datetime:
yield audit_errors.ModelMutatedDuringJobError(model)
``` |
{
"source": "jimiolaniyan/tracksuite",
"score": 2
} |
#### File: tracksuite/datasets/imagenetvid.py
```python
from torch.utils.data.dataset import Dataset
class ImageNetVIDDataSet(Dataset):
def __init__(self, path, mode='train'):
self.path = path
```
#### File: tracksuite/utils/metrics.py
```python
import numpy as np
def calculate_iou(bboxes1, bboxes2):
"""
This calculates the intersection over union of N bounding boxes
in the form N x [left, top, right, bottom], e.g for N=2:
>> bb = [[21,34,45,67], [67,120, 89, 190]]
:param bboxes1: np array: N x 4 ground truth bounding boxes
:param bboxes2: np array: N x 4 target bounding boxes
:return: iou: ratio between 0 and 1
"""
if len(bboxes1.shape) == 1:
bboxes1 = bboxes1.reshape(1, bboxes1.shape[0])
if len(bboxes2.shape) == 1:
bboxes2 = bboxes2.reshape(1, bboxes2.shape[0])
if bboxes1.shape[0] != bboxes2.shape[0] or bboxes1.shape[1] != bboxes2.shape[1]:
raise ValueError('Bounding boxes must be of equal dimension')
left_intersection = np.maximum(bboxes1[:, 0], bboxes2[:, 0])
top_intersection = np.maximum(bboxes1[:, 1], bboxes2[:, 1])
right_intersection = np.minimum(bboxes1[:, 2], bboxes2[:, 2])
bottom_intersection = np.minimum(bboxes1[:, 3], bboxes2[:, 3])
w_intersection = right_intersection - left_intersection
h_intersection = bottom_intersection - top_intersection
intersection_area = w_intersection * h_intersection
bboxes1_area = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
bboxes2_area = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
union_area = bboxes1_area + bboxes2_area - intersection_area
iou = np.clip(intersection_area/union_area, 0, 1)
return iou
``` |
{
"source": "jimisantana/ai_gpc",
"score": 2
} |
#### File: allowed/gpc/Gpc.py
```python
import re
from os import system, path
from threading import Thread
import pandas as pd
import numpy as np
from traceback import format_exc
from datetime import datetime
import pickle
from transformers import BertTokenizer, BertForSequenceClassification
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
PATH_TMP = './files_temp'
COL_TEXT_SEP = '__sys_gpc_text_sep__'
PAD_MAX_TOKENS = 25
BATCH_SIZE_AKA_MAX_ROWS_PER_GUESS_TO_FIT_GPU_MEM = int(4e3)
# BATCH_SIZE_AKA_MAX_ROWS_PER_GUESS_TO_FIT_GPU_MEM = int(1e3) # small batches
PRINT_EVERY_N = int(1e4)
# PRINT_EVERY_N = int(1) # print every time
MAX_TEST_ROWS = int(2e4) # TODO TEMP TEST DEBUG
path2here = '.'
dict_label_iid_pkl = f'{path2here}/model_save/dict_label_iid.pkl'
dict_label_t_pkl = f'{path2here}/model_save/dict_label_t.pkl'
dict_label_iid: dict = None
dict_label_t: dict = None
tokenizer: BertTokenizer = None
model: BertForSequenceClassification = None
# endregion globals
"""
originating from this tute - the test/eval inference part
http://mccormickml.com/2019/07/22/BERT-fine-tuning/
"""
# region cuda
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# TODO - find out how to use all GPUs
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# endregion cuda
# region load model
def load_model_n_stuff():
print('ai_gpc loading model and tokenizer...')
global dict_label_iid, dict_label_t
global tokenizer, model
if path.exists(dict_label_iid_pkl):
with open(dict_label_iid_pkl, 'rb') as f:
dict_label_iid = pickle.load(f)
if path.exists(dict_label_t_pkl):
with open(dict_label_t_pkl, 'rb') as f:
dict_label_t = pickle.load(f)
tokenizer = BertTokenizer.from_pretrained(f'{path2here}/model_save')
model = BertForSequenceClassification.from_pretrained(f'{path2here}/model_save')
print('setting model to', device)
model.to(device) # GPU or CPU
model.cuda()
model.eval()
# if is_cuda:
# model.cuda()
load_model_n_stuff()
# endregion load model
r_dang_chars = re.compile(r'[{}"]+') # dang stay for dangerous?
# region M A I N MAIN function to be called from flask
def gpc(name: str = 'file_name_unique_without_extension', top_cat: int = -1, top_cat_t: str = None):
def fn(name2: str = 'file_name_unique_without_extension'):
try:
system(f'cd {PATH_TMP}; tar -zxvf {name2}.feather.tar.gz')
ppath = f'{PATH_TMP}/{name2}.feather'
df = pd.read_feather(ppath)
print(f'original len {len(df)} titles')
if len(df) > MAX_TEST_ROWS:
df = df.sample(n=MAX_TEST_ROWS)
print(f'doing inference on {len(df)} titles')
with Gpc(df, top_cat, top_cat_t) as obj:
df = obj.prepare_and_guess()
obj.dump('end gpc instance... - we should be DONE ... maybe')
print('end gpc static... - we should be DONE')
except:
err = format_exc()
print(err)
# async
t = Thread(target=fn, args=(name,))
t.start()
# TODO TEMP DEBUG
t.join()
# endregion M A I N MAIN function to be called from flask
# =============
# MAIN method in Gpc class is: prepare_and_guess()
# =============
class Gpc:
def __init__(self, df: pd.DataFrame = None, top_cat: int = -1, top_cat_t: str = None):
super().__init__()
self.df = df
self.top_cat = top_cat
self.top_cat_t = top_cat_t
self.column = COL_TEXT_SEP
self.input_ids_test = []
self.labels_test = []
self.attention_masks_test = []
self.texts_test = []
self.test_dataloader: DataLoader = None
self.d: datetime = datetime.now()
def __del__(self):
try:
del self.df
del self.input_ids_test
del self.labels_test
del self.attention_masks_test
del self.texts_test
del self.test_dataloader
except:
format_exc()
def __enter__(self):
return self
def __exit__(self, ttype, value, traceback):
self.__del__()
# =============
# MAIN
# =============
def prepare_and_guess(self) -> pd.DataFrame:
self.texts_test = self.df[self.column].tolist()
self.labels_test = [0] * len(self.texts_test) # dummy
self.input_ids_test, self.attention_masks_test, self.labels_test = self.encode_stuff()
test_dataset = TensorDataset(self.input_ids_test, self.attention_masks_test, self.labels_test)
self.test_dataloader = DataLoader(
test_dataset,
sampler=SequentialSampler(test_dataset),
# batch_size=len(test_dataset) # AKA - single batch - nope! no mem for that
batch_size=BATCH_SIZE_AKA_MAX_ROWS_PER_GUESS_TO_FIT_GPU_MEM,
# tests
num_workers=8,
# maybe this is the culprit as suggested by user12750353 in stackoverflow
# pin_memory=True
pin_memory=False
)
# =======
# call MAIN - that's what we are here for - the main GPU thing
# =======
# self.dump('start predictions...')
predictions = self.guess()
# self.dump('end predictions...')
print('pytorch tensor shape is', predictions.shape)
label_indices = torch.argmax(predictions, dim=1)
self.dump('start loop df append...')
df = []
for i, o in enumerate(self.texts_test):
# t, iid, s = self.find_with_top_level(predictions[i])
label_index = label_indices[i]
t = dict_label_t.get(label_index)
# s = predictions[label_index] # A! A! A! getting a number from the GPU to real CPU word is a shit load of time! Nope!
# df.append(
# {
# 'text': o,
# 't': dict_label_t.get(label_index),
# 'iid': dict_label_iid.get(label_index),
# 's': predictions[label_index]
# }
# )
self.dump('end loop df append...')
self.dump('start df...')
df = pd.DataFrame(df)
self.dump('end df...')
return df
# GPU
def guess(self):
# =======
# MAIN - that's what we are here for - the main GPU thing
# =======
print()
print("that's what we are here for - the main GPU inference thing...")
print()
# predictions, true_labels = [], []
predictions = None
# torch.cuda.empty_cache()
for i, batch in enumerate(self.test_dataloader):
print()
self.dump('start empty cache...', i, 1)
# torch.cuda.empty_cache()
self.dump('end empty cache...', i, 1)
self.dump('start to device...', i, 1)
# region test shuffle
# if not i:
# batch = tuple(t.to(device) for t in batch) # to GPU when gpu (or CPU otherwise)
# else:
# for t in batch:
# t[...] = t[torch.randperm(t.shape[0], device=t.device)]
# endregion test shuffle
# region to device, where first batch is fast, next ones are slow
# there are just 3 tensors in each batch: input_ids, input_mask, labels
batch = tuple(t.to(device) for t in batch[:2]) # to GPU when gpu (or CPU otherwise)
# region to device, where first batch is fast, next ones are slow
# batch = list(t.to(device) for t in batch) # no real improvement
# batch = batch.to(device) # nope - this is just a list
self.dump('end to device...', i, 1)
# , b_labels - labels are not used
b_input_ids, b_input_mask = batch
self.dump('start outputs...', i, 1)
# torch.cuda.empty_cache()
with torch.no_grad():
# torch.cuda.empty_cache()
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
self.dump('end outputs...', i, 1)
self.dump('logits...', i, 1)
logits = outputs[0]
self.dump('start detach...', i, 1)
logits = logits.detach()
self.dump('end detach...', i, 1)
del outputs
predictions = logits if predictions is None else torch.cat((predictions, logits), 0)
del logits
del b_input_ids
del b_input_mask
# del b_labels
for o in batch:
del o
del batch
return predictions
def find_with_top_level(self, predictions: torch.tensor) -> (str, int, float):
if self.top_cat < 0:
# # label_index = np.argmax(predictions)
# label_index = torch.argmax(predictions)
# return dict_label_t.get(label_index), dict_label_iid.get(label_index), predictions[label_index]
return dict_label_t.get(0), dict_label_iid.get(0), 0
t = None
iid = None
score = None
# # for label_index in np.argsort(predictions)[::-1]:
# for label_index in torch.argsort(predictions)[::-1]:
#
# t = dict_label_t.get(label_index)
#
# if self.top_cat_t in t:
# iid = dict_label_iid.get(label_index)
# score = predictions[label_index]
# break
# else:
# t = None
if not t:
t = self.top_cat_t
iid = self.top_cat
score = 0.
return t, iid, score
# just on CPU
def encode_stuff(self) -> (list, list, list):
# just on cpu - TODO - make on multiple cpu's
print('disregard this - this runs on CPU and should be distributed along multi CPUs')
print()
for i, sent in enumerate(self.texts_test):
if not i % PRINT_EVERY_N:
print(f'encode_stuff {i}')
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
max_length=PAD_MAX_TOKENS, # Pad & truncate all sentences. was 64
truncation=True,
padding='max_length',
return_attention_mask=True, # Construct attn. masks.
return_tensors='pt', # Return pytorch tensors.
)
self.input_ids_test.append(encoded_dict['input_ids'])
self.attention_masks_test.append(encoded_dict['attention_mask'])
return torch.cat(self.input_ids_test, dim=0), \
torch.cat(self.attention_masks_test, dim=0), \
torch.tensor(self.labels_test)
# --- measure and print times
def dump(self, pref='blah', i = -1, print_every_n=None):
if not print_every_n:
print_every_n = PRINT_EVERY_N
if i>-1 and not i % print_every_n:
print(pref, (datetime.now() - self.d).total_seconds())
self.d = datetime.now()
``` |
{
"source": "jimishapatel/statistic",
"score": 3
} |
#### File: statistic/Statistics/samplestand.py
```python
from Calculator.subtraction import subtraction
from Calculator.division import division
from Statistics.sampledata import sampledata
from Calculator.squareroot import squareroot
from Calculator.square import square
from Statistics.mean import mean
from Calculator.addition import addition
def samplestand(data, sample_size):
dev = 0
sample = sampledata(data, sample_size)
sample_values = len(sample)
x_bar = mean()
x = sample_values
n = subtraction(sample_values, 1)
for dev in sample:
dev = subtraction(x, x_bar)
square_x_bar = square(dev)
add = addition(square_x_bar, square_x_bar)
result = division(add, n)
return squareroot(result)
```
#### File: statistic/Statistics/zscore.py
```python
from Statistics.mean import mean
from Statistics.samplestand import samplestand
from Calculator.subtraction import subtraction
from Calculator.division import division
def zscore(data):
x = 64
u = mean(data)
sample_sd = samplestand(data)
y = subtraction(x, u)
return division(sample_sd, y)
``` |
{
"source": "jimist/midi_genetic",
"score": 3
} |
#### File: jimist/midi_genetic/genetic.py
```python
import random
from sklearn.externals import joblib
import numpy as np
class Genetic:
miss_start = -1
miss_end = -1
genome_length = 0
notes_before = []
notes_after = []
mutation_rate = 0.1
guesser = None
total_best_score = 1000000
best_population = []
def __init__(self, song):
for index, note in enumerate(song):
if note == 0 and self.miss_start == -1:
self.miss_start = index
if note != 0 and self.miss_start != -1 and self.miss_end == -1:
self.miss_end = index
self.genome_length = self.miss_end - self.miss_start
self.notes_before = song[self.miss_start - 20:self.miss_start]
self.notes_after = song[self.miss_end:self.miss_end + 20]
self.max = max(song)
for index, note in enumerate(song):
if note == 0:
song[index] = self.max
self.min = min(song)
self.guesser = joblib.load("guesser")
def make_population(self, count):
populations = []
for i in range(count):
temp_list = [random.randint(self.min, self.max) for iter in range(self.genome_length)]
populations.append(temp_list)
# print("generated {} populations with length of {}!".format(count, self.genome_length))
return populations
def score_population(self, pop):
pop = self.notes_before[-5:] + pop + self.notes_after[:5]
total_score = 0
reducers = []
test_data = []
for index, note in enumerate(pop[5:-5]):
real_index = index + 5
test_data_temp = pop[real_index - 5: real_index] + pop[real_index + 1:real_index + 5]
# print(test_data_temp)
reducer = test_data_temp[0]
reducers.append(reducer)
for ii, nn in enumerate(test_data_temp):
test_data_temp[ii] = nn - reducer
test_data.append(test_data_temp)
test_results = self.guesser.predict(np.array(test_data))
for ti, tv in enumerate(test_results):
test_results[ti] = tv + reducers[ti]
for index, note in enumerate(pop[5:-5]):
guessed_note = test_results[index] # find out what was supposed to be here
temp_score = abs(guessed_note - note)
# print(temp_score)
total_score += temp_score
# print(total_score)
return total_score
def mutate_population(self, pop):
rand_position = random.randint(0, self.genome_length - 1)
random_note = random.randint(self.min, self.max)
# print("mutating a population at position {} with the note {}!".format(rand_position, random_note))
pop[rand_position] = random_note
# print(pop[rand_position])
return pop
def mix_genomes(self, g1, g2):
pivot1 = random.randint(0, self.genome_length / 2)
pivot2 = random.randint(pivot1 + (self.genome_length / 4), self.genome_length)
rg1 = g1.copy() # return genome 1
rg2 = g2.copy() # return genome 2
rg1[pivot1:pivot2] = g2[pivot1: pivot2]
rg2[pivot1:pivot2] = g1[pivot1: pivot2]
# print("-----------")
# print(pivot1, pivot2)
# print(g1)
# print(rg1)
# print(g2)
# print(rg2)
mutation_rand = random.uniform(0.00, 10.00)
if mutation_rand <= (self.mutation_rate * 10):
rg1 = self.mutate_population(rg1)
# print("mutate rg1!")
mutation_rand = random.uniform(0.00, 10.00)
if mutation_rand <= (self.mutation_rate * 10):
rg2 = self.mutate_population(rg2)
# print("mutate rg2!")
return rg1, rg2
def run(self, iterations_count=5):
populations_count = 100
populations = self.make_population(populations_count)
scores = [0] * populations_count
i = 0
while True:
# score each population
min_score = 10000
for j, pop in enumerate(populations):
scores[j] = self.score_population(populations[j])
if scores[j] < self.total_best_score:
self.total_best_score = scores[j]
self.best_population = pop
if scores[j] < min_score:
min_score = scores[j]
# print("best score was {}".format(min_score))
# sort populations by score. since the lower the score the better population is, first 30 are the best
populations = [x for _, x in sorted(zip(scores, populations))]
# after sorting populations break if the iteration counts are finished
if i > iterations_count:
break
new_population = [0] * populations_count
# mix the first 30 for the first 60 of populations
for index, pop in enumerate(populations[:int(0.3 * populations_count)]):
couple_pop = populations[int(0.6 * populations_count) - index]
# print(index, int(0.6 * populations_count) - index)
new_population[index * 2], new_population[(index * 2) + 1] = self.mix_genomes(pop, couple_pop)
# make 40 new genomes for the least 40 populations
new_population[int(0.6 * populations_count):] = self.make_population(int(0.4 * populations_count))
populations = new_population
i += 1
print("best score in total was {}".format(self.total_best_score))
print(self.best_population)
return self.best_population
``` |
{
"source": "jimit105/leetcode-submissions",
"score": 3
} |
#### File: problems/arranging_coins/solution.py
```python
class Solution:
def arrangeCoins(self, n: int) -> int:
return int((2*n+0.25)**0.5 - 0.5)
```
#### File: problems/backspace_string_compare/solution.py
```python
class Solution:
def backspaceCompare(self, s: str, t: str) -> bool:
def build(st):
ans = []
for c in st:
if c != '#':
ans.append(c)
elif ans:
ans.pop()
return ''.join(ans)
return build(s) == build(t)
```
#### File: problems/best_sightseeing_pair/solution.py
```python
import math
class Solution:
def maxScoreSightseeingPair(self, values: List[int]) -> int:
max_end_right = values[0]
res = -math.inf
for i in range(1, len(values)):
max_end_right = max(max_end_right, values[i - 1] + i - 1)
res = max(res, max_end_right + values[i] - i)
return res
```
#### File: problems/can_place_flowers/solution.py
```python
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
l = len(flowerbed)
flowerbed = [0] + flowerbed + [0]
for i in range(1, l+1):
if flowerbed[i] == flowerbed[i-1] == flowerbed[i+1] == 0:
flowerbed[i] = 1
n -= 1
if n <= 0:
return True
return False
```
#### File: problems/check_if_a_number_is_majority_element_in_a_sorted_array/solution.py
```python
import bisect
class Solution:
def isMajorityElement(self, nums: List[int], target: int) -> bool:
half = len(nums) // 2
left = bisect.bisect_left(nums, target)
if left > half:
return False
right = bisect.bisect_right(nums, target, lo=left+half, hi=min(left+half+1, len(nums)))
return (right - left) > half
```
#### File: problems/combination_sum/solution.py
```python
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
def backtrack(remain, comb, next_start):
if remain == 0:
results.append(comb.copy())
return
elif remain < 0:
return
for i in range(next_start, len(candidates)):
comb.append(candidates[i])
backtrack(remain - candidates[i], comb, i)
comb.pop()
results = []
backtrack(target, [], 0)
return results
```
#### File: problems/count_equal_and_divisible_pairs_in_an_array/solution.py
```python
class Solution:
def countPairs(self, nums: List[int], k: int) -> int:
n = len(nums)
count = 0
for i in range(n):
for j in range(i+1, n):
if nums[i] == nums[j] and (i * j) % k == 0:
count += 1
return count
```
#### File: problems/count_integers_with_even_digit_sum/solution.py
```python
class Solution:
def sum_digits(self, num):
return 0 if num == 0 else int(num % 10) + self.sum_digits(num // 10)
def countEven(self, num: int) -> int:
count = 0
for i in range(1, num + 1):
if self.sum_digits(i) % 2 == 0:
count += 1
return count
```
#### File: problems/diameter_of_binary_tree/solution.py
```python
class Solution:
def diameterOfBinaryTree(self, root: Optional[TreeNode]) -> int:
diameter = 0
def find_longest_path(node):
nonlocal diameter
if not node:
return 0
left_path = find_longest_path(node.left)
right_path = find_longest_path(node.right)
diameter = max(diameter, left_path+right_path)
return max(left_path, right_path)+1
find_longest_path(root)
return diameter
```
#### File: problems/duplicate_zeros/solution.py
```python
class Solution(object):
def duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
i = 0
while i in range(len(arr)):
if arr[i] == 0:
arr.insert(i+1, 0)
i += 2
arr.pop()
else:
i += 1
return None
```
#### File: problems/group_anagrams/solution.py
```python
from collections import defaultdict
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
hashmap = defaultdict(list)
for s in strs:
hashmap[tuple(sorted(s))].append(s)
return hashmap.values()
```
#### File: problems/largest_number_at_least_twice_of_others/solution.py
```python
class Solution:
def dominantIndex(self, nums: List[int]) -> int:
max1, max2 = 0, 0
max1_idx = 0
for i, n in enumerate(nums):
if n >= max1:
max2 = max1
max1 = n
max1_idx = i
elif n > max2:
max2 = n
return max1_idx if max1 >= 2 * max2 else -1
```
#### File: problems/lru_cache/solution.py
```python
from collections import OrderedDict
class LRUCache(OrderedDict):
def __init__(self, capacity: int):
self.capacity = capacity
def get(self, key: int) -> int:
if key not in self:
return -1
self.move_to_end(key)
return self[key]
def put(self, key: int, value: int) -> None:
if key in self:
self.move_to_end(key)
self[key] = value
if len(self) > self.capacity:
self.popitem(last = False)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
```
#### File: problems/minimize_deviation_in_array/solution.py
```python
import heapq
import math
class Solution:
def minimumDeviation(self, nums: List[int]) -> int:
# heapq is min-heap
# So use negative values to mimic max-heap
evens = []
minimum = math.inf
for num in nums:
if num % 2 == 0:
evens.append(-num)
minimum = min(minimum, num)
else:
evens.append(-num*2)
minimum = min(minimum, num * 2)
heapq.heapify(evens)
min_deviation = math.inf
while evens:
current_val = -heapq.heappop(evens)
min_deviation = min(min_deviation, current_val - minimum)
if current_val % 2 == 0:
minimum = min(minimum, current_val // 2)
heapq.heappush(evens, -current_val // 2)
else:
# if the maximum is odd number, break
break
return min_deviation
```
#### File: problems/minimum_moves_to_reach_target_score/solution.py
```python
class Solution:
def minMoves(self, target: int, maxDoubles: int) -> int:
moves = 0
if 1 << maxDoubles == target:
return maxDoubles
while target > 1:
if target % 2 == 0 and maxDoubles:
target = target >> 1
moves += 1
maxDoubles -= 1
elif not maxDoubles:
diff = target - 1
target -= diff
moves += diff
else:
target -= 1
moves += 1
print(target, moves)
return moves
```
#### File: problems/n-queens_ii/solution.py
```python
class Solution:
def totalNQueens(self, n: int) -> int:
def backtrack(row, diagonals, anti_diagonals, cols):
# Base Case - When all queens have been placed
if row == n:
return 1
solutions = 0
for col in range(n):
curr_diag = row - col
curr_anti_diag = row + col
# If the queen cannot be placed
if col in cols or curr_diag in diagonals or curr_anti_diag in anti_diagonals:
continue
# Place the queen
cols.add(col)
diagonals.add(curr_diag)
anti_diagonals.add(curr_anti_diag)
# Move to the next row
solutions += backtrack(row + 1, diagonals, anti_diagonals, cols)
# All valid paths have been explored in the above function call, so remove the queen
cols.remove(col)
diagonals.remove(curr_diag)
anti_diagonals.remove(curr_anti_diag)
return solutions
return backtrack(0, set(), set(), set())
```
#### File: problems/palindrome_partitioning/solution.py
```python
class Solution:
def partition(self, s: str) -> List[List[str]]:
res = []
def isPalindrome(start_idx, end_idx):
while start_idx <= end_idx:
if s[start_idx] != s[end_idx]:
return False
start_idx += 1
end_idx -=1
return True
def dfs(start_idx, path):
if start_idx >= len(s):
res.append(path)
for i in range(len(s) - start_idx):
if isPalindrome(start_idx, start_idx+i):
dfs(start_idx+i+1, path + [s[start_idx : start_idx+i+1]])
dfs(0, [])
return res
```
#### File: problems/plus_one_linked_list/solution.py
```python
class Solution:
def plusOne(self, head: ListNode) -> ListNode:
# if input is all 9s e.g. 99, then we need additional node
sentinel = ListNode(val = 0, next = head)
not_nine = sentinel
# find the rightmost not-nine
while head:
if head.val != 9:
not_nine = head
head = head.next
# add 1 to the value and move to next node
not_nine.val += 1
not_nine = not_nine.next
# set following 9s to zero e.g. 1299 -> 1300
while not_nine:
not_nine.val = 0
not_nine = not_nine.next
return sentinel if sentinel.val else sentinel.next
```
#### File: problems/product_of_array_except_self/solution.py
```python
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
n = len(nums)
answer = [0] * n
answer[0] = 1
for i in range(1, n):
answer[i] = answer[i-1] * nums[i-1]
right = 1
for i in range(n-1, -1, -1):
answer[i] = answer[i] * right
right *= nums[i]
return answer
# Time: O(n) - traversing the elements two times independently
# Space: O(1) - one additional variable - right
```
#### File: problems/remove_duplicates_from_sorted_list/solution.py
```python
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head:
return head
current = head
while current is not None and current.next is not None:
if current.next.val == current.val:
current.next = current.next.next
else:
current = current.next
return head
```
#### File: problems/reverse_integer/solution.py
```python
class Solution:
def reverse(self, x: int) -> int:
rev_x = int(str(abs(x))[::-1])
return_x = rev_x if x > 0 else -1*rev_x
return return_x if -2 ** 31 <= return_x <= 2 ** 31 - 1 else 0
```
#### File: problems/roman_to_integer/solution.py
```python
values = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000,
"IV": 4,
"IX": 9,
"XL": 40,
"XC": 90,
"CD": 400,
"CM": 900
}
class Solution:
def romanToInt(self, s: str) -> int:
total = 0
i = 0
while i < len(s):
# subtractive case
if i < len(s) - 1 and s[i : i + 2] in values:
total += values[s[i : i + 2]]
i += 2
else:
total += values[s[i]]
i += 1
return total
```
#### File: problems/single_number/solution.py
```python
from collections import Counter
class Solution:
def singleNumber(self, nums: List[int]) -> int:
a = 0
for i in nums:
a ^= i
return a
```
#### File: problems/sort_characters_by_frequency/solution.py
```python
class Solution:
def frequencySort(self, s: str) -> str:
count = Counter(s)
result = ''
for k, v in count.most_common():
result += k*v
return result
```
#### File: problems/sort_colors/solution.py
```python
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
nums.sort()
```
#### File: problems/squares_of_a_sorted_array/solution.py
```python
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
n = len(nums)
left, right = 0, n-1
result = [0] * n
for i in range(n-1, -1, -1):
if abs(nums[left]) < abs(nums[right]):
square = nums[right]
right -= 1
else:
square = nums[left]
left += 1
result[i] = square ** 2
return result
```
#### File: problems/subarray_sum_equals_k/solution.py
```python
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
count = 0
total = 0
hashmap = {} # {sum_i : no. of occurrences of sum_i}
hashmap[0] = 1
for i in range(len(nums)):
total += nums[i]
if (total - k) in hashmap.keys():
count += hashmap.get(total - k)
hashmap[total] = hashmap.get(total, 0) + 1
return count
```
#### File: problems/subtract_the_product_and_sum_of_digits_of_an_integer/solution.py
```python
class Solution:
def subtractProductAndSum(self, n: int) -> int:
prod = 1
sum_ = 0
while n > 0:
last_digit = n % 10
prod *= last_digit
sum_ += last_digit
n //= 10
return prod - sum_
```
#### File: problems/triangle/solution.py
```python
from functools import lru_cache
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
@lru_cache(maxsize = None)
def min_path(row, col):
path = triangle[row][col]
if row < len(triangle) - 1:
path += min(min_path(row + 1, col), min_path(row + 1, col + 1))
return path
return min_path(0, 0)
```
#### File: problems/word_pattern/solution.py
```python
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
map_index = {}
words = s.split()
if len(pattern) != len(words):
return False
for i in range(len(words)):
c = pattern[i]
w = words[i]
char_key = 'char_{}'.format(c)
word_key = 'word_{}'.format(w)
if char_key not in map_index:
map_index[char_key] = i
if word_key not in map_index:
map_index[word_key] = i
if map_index[char_key] != map_index[word_key]:
return False
return True
``` |
{
"source": "jimit23/SPLP_expts",
"score": 3
} |
#### File: jimit23/SPLP_expts/bio_val.py
```python
import numpy as np
import networkx as nx
import xlsxwriter
import math
def read_gt(gt_file, nodes):
with open(('final_datasets/complex_datasets/gold_standard/' + gt_file + '.txt'), 'r') as f:
gt_raw = []
for item in f:
gt_raw.append(item.split())
if gt_file == 'sgd':
# determine ground truth communities
gt_comms = []
gt_nodes = []
for g in gt_raw:
gt_comms.append(str(g[1]))
gt_nodes.append(str(g[0]))
gt_comms = list(set(gt_comms))
gt_nodes = list(set(gt_nodes))
# append to "nodes" the nodes not in it but found in gt_nodes
ex_nodes_count = 0
for gn in gt_nodes:
if gn not in nodes:
nodes.append(gn)
ex_nodes_count += 1
# form ground truth theta
gt_theta = np.zeros((len(nodes), len(gt_comms)))
for g in gt_raw:
n_index = nodes.index(g[0])
c_index = gt_comms.index(g[1])
gt_theta[n_index, c_index] = 1.0
elif gt_file == 'mips_3_100':
# append to "nodes" the nodes not in it but found in gt_nodes
gt_nodes = set()
for g in gt_raw:
gt_nodes.update(g)
ex_nodes_count = 0
for gn in gt_nodes:
if gn not in nodes:
nodes.append(gn)
ex_nodes_count += 1
# form ground truth theta
gt_theta = np.zeros((len(nodes), len(gt_raw)))
for j in range(len(gt_raw)):
for cn in gt_raw[j]:
n_index = nodes.index(cn)
gt_theta[n_index, j] = 1.0
return gt_theta, ex_nodes_count, nodes
def calc_mmr(pred_c, gt_c, dataset, gt_dataset):
# predicted and ground truth comms count
pred_count = pred_c.shape[1]
gt_count = gt_c.shape[1]
# create bipartite graph
G = nx.Graph()
G.add_nodes_from(range(pred_count + gt_count))
for i in range(pred_count):
for j in range(gt_count):
predvec = pred_c[:, i]
gtvec = gt_c[:, j]
w = (np.dot(predvec, gtvec)/(np.dot(np.linalg.norm(predvec), np.linalg.norm(gtvec))))**2
G.add_edge(i, j + pred_count, weight = w)
print('created weighted bipartite graph using predicted and ground truth communities')
matching = list(nx.algorithms.max_weight_matching(G))
# save matching
with open('matching_' + dataset + '_' + gt_dataset + '.txt', 'w') as fp:
for row in matching:
fp.write(str(row) + '\n')
print('computed and saved max weight matching')
mmr = 0
for e in matching:
mmr += G.get_edge_data(e[0], e[1])['weight']
mmr /= gt_count
return mmr
def calc_frac(pred_c, gt_c):
# predicted and ground truth comms count
pred_count = pred_c.shape[1]
gt_count = gt_c.shape[1]
match_count = 0
for i1 in range(gt_count):
for j1 in range(pred_count):
gtvec = gt_c[:, i1]
predvec = pred_c[:, j1]
curr_match = (np.dot(predvec, gtvec) / (np.dot(np.linalg.norm(predvec), np.linalg.norm(gtvec)))) ** 2
if curr_match >= 0.25:
match_count += 1
break
return match_count/gt_count
def calc_acc(pred_c, gt_c):
# predicted and ground truth comms count
pred_count = pred_c.shape[1]
gt_count = gt_c.shape[1]
t = np.zeros((gt_count, pred_count))
for i1 in range(gt_count):
for j1 in range(pred_count):
t[i1, j1] = np.dot(gt_c[:, i1], pred_c[:, j1])
sn_num = 0.0
sn_den = 0.0
for q in range(gt_count):
sn_num += np.max(t[q, :])
sn_den += np.sum(gt_c[:, q])
sn = sn_num / sn_den
ppv_num = 0.0
ppv_den = 0.0
for j1 in range(pred_count):
ppv_num += np.max(t[:, j1])
ppv_den += np.sum(t[:, j1])
ppv = ppv_num / ppv_den
return math.sqrt(sn*ppv)
#-----------------------------------------------------------------------------------------------------
# parameter choices
# select dataset from: krogan2006_core, krogan2006_extended, collins2007
dataset = 'krogan2006_core'
# select validation (ground truth) dataset from: mips_3_100, sgd
gt_dataset = 'mips_3_100'
discard_small = False
cs_tol = 0.0 # threshold for comm size based on third largest entry in community vector
binary_memberships = True # whether to consider binary or fractional memberships
rounding_tol = 0.5 # quantity for rounding fractional data to binary
merge_comms = True
merge_tol = 0.8
#-----------------------------------------------------------------------------------------------------
# load nodes list
with open(('nodes_' + dataset + '.txt'), 'r') as f:
nodes = []
for item in f:
nodes.append(item.strip('\n'))
gt_comms, ex_nodes_count, nodes = read_gt(gt_dataset, nodes)
print('computed ground truth communities for ' + gt_dataset)
# load cvx opt solutions
optsols = np.load('opt_sols_' + dataset + '.npy')
n = optsols.shape[1]
k = optsols.shape[0]
optsols = np.transpose(np.squeeze(optsols, axis = 2))
print('finished loading cvx optimal solutions')
print('original # of communities: ' + str(k))
# convert opt sols to 0-1
if binary_memberships:
optsols = (optsols > rounding_tol).astype(int)
print('rounded fractional memberships to obtain binary memberships')
# remove communities based on their third largest values
if discard_small:
i = 0
rem = 0
while i < optsols.shape[1]:
c = optsols[:, i]
c = np.sort(c)
if c[-3] <= cs_tol:
optsols = np.delete(optsols, i, axis = 1)
rem += 1
else:
i += 1
print('# of communities after removing communities with <= 2 nodes: ' + str(k-rem))
# remove duplicate communities
optsols = np.unique(optsols, axis=1)
print('# of communities after removing duplicates: ' + str(optsols.shape[1]))
# merge highly overlapping communities
if merge_comms and binary_memberships:
num_merges = 1
while num_merges > 0:
num_merges = 0
for i in range(optsols.shape[1]):
j = i+1
while j in range(i+1, optsols.shape[1]):
ovr = (np.dot(optsols[:, i], optsols[:, j]))**2
ovr /= np.sum(optsols[:, i])
ovr /= np.sum(optsols[:, j])
if ovr >= merge_tol:
optsols[:, i] = np.maximum(optsols[:, i], optsols[:, j])
optsols = np.delete(optsols, j, axis=1)
num_merges += 1
else:
j += 1
# check no highly overlapping communities remain
ovr = np.zeros((optsols.shape[1], optsols.shape[1]))
for i in range(optsols.shape[1]):
for j in range(optsols.shape[1]):
if i != j:
curr_ovr = (np.dot(optsols[:, i], optsols[:, j]))**2
curr_ovr /= (np.linalg.norm(optsols[:, i]))**2
curr_ovr /= (np.linalg.norm(optsols[:, j]))**2
ovr[i, j] = curr_ovr
assert np.max(ovr) < merge_tol
print('# of communities after merging highly overlapping ones: ' + str(optsols.shape[1]))
if binary_memberships and discard_small:
assert np.min(np.sum(optsols, axis=0)) >= 3.0
# pad optsols for extra nodes found in ground truth data to obtain predicted communities
pred_comms = np.concatenate((optsols, np.zeros((ex_nodes_count, optsols.shape[1]))))
print('smallest predicted community size: ' + str(np.min(np.sum(pred_comms, axis=0))))
print('largest predicted community size: ' + str(np.max(np.sum(pred_comms, axis=0))))
# save bipartite graph data
# save nodes
with open('nodes_' + dataset + '_' + gt_dataset + '.txt', 'w') as fp:
for item in nodes:
fp.write("%s\n" % item)
np.save('pred_comms_' + dataset + '_' + gt_dataset + '.npy', pred_comms)
np.save('gt_comms_' + dataset + '_' + gt_dataset + '.npy', gt_comms)
print('finished saving bipartite graph data')
# obtain three scores - mmr, frac, acc
# obtain mmr
mmr = calc_mmr(pred_comms, gt_comms, dataset, gt_dataset)
print('mmr: ' + str(mmr))
# obtain frac
frac = calc_frac(pred_comms, gt_comms)
print('frac: ' + str(frac))
# obtain acc
acc = calc_acc(pred_comms, gt_comms)
print('geo accuracy: ' + str(acc))
comp_score = mmr + frac + acc
print('composite score: ' + str(comp_score))
# # save a sample community vector
# sample_com_vec = np.ndarray.tolist(pred_comms[:, 120])
# sample = []
# for i in range(len(nodes)):
# sample.append([nodes[i], sample_com_vec[i]])
# with xlsxwriter.Workbook('sample.xlsx') as wb:
# ws = wb.add_worksheet()
# for row_num, data in enumerate(sample):
# ws.write_row(row_num, 0, data)
``` |
{
"source": "JimitMehta1807/Inspiquote",
"score": 2
} |
#### File: JimitMehta1807/Inspiquote/main.py
```python
from Resources.quotes_fetch import Quotes
from bots.config import create_api
from post import posting
from bots.tags import check_mentions
from bots.direct_message import direct_message_initial
from bots.retweet_fav import retweet_fav_post
import time
import random
api = create_api()
quote_class = Quotes()
quote_author = random.choice([quote_class.quotes_fav(),quote_class.quotable()])
quote = "\"" + quote_author[0] + "\" " + "- " + quote_author[1]
def tags():
last_id = 1
last_id = check_mentions(api,last_id,quote)
def message():
direct_message_initial(api)
def tweet_quote():
api.update_status(quote)
def post():
posting(api)
def retweet_fun():
retweet_fav_post(api)
``` |
{
"source": "jimixjay/acestats",
"score": 2
} |
#### File: front/services/ingest_matches_service.py
```python
from service_objects import services
import numpy as np
import pandas as pd
from django.db import connection
import datetime
from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface
class IngestMatchesService(services.Service):
def process(self):
cursor = connection.cursor()
errors = ''
total_matches_updated = 0
total_matches_inserted = 0
tourneys = {}
surfaces = {}
tourney_levels = {}
players = {}
for year in range(1990, 2021):
csv_file = pd.read_csv('https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master/atp_matches_' + str(year) + '.csv', header=1, names=self.getColumns())
for row in csv_file.itertuples():
created_at = datetime.datetime.now()
updated_at = datetime.datetime.now()
#try:
id = str(row.tourney_id) + '-' + str(row.match_num)
match = Match.objects.filter(id=id)
if (not match):
match = Match()
match.id = id
match.year = row.tourney_id.split('-')[0]
match.match_num = row.match_num
match.result = row.score
match.best_of = row.best_of
match.minutes = None if np.isnan(row.minutes) else row.minutes
match.round = row.round
if not tourneys.get(str(row.tourney_id)):
tourney = Tourney.objects.filter(id=row.tourney_id)
if (not tourney):
tourney = Tourney()
tourney.id = row.tourney_id
tourney.name = row.tourney_name
tourney.date = datetime.datetime.strptime(str(int(row.tourney_date)), '%Y%m%d').date()
tourney.created_at = created_at
tourney.updated_at = updated_at
if not surfaces.get(str(row.surface)):
surfaces[str(row.surface)] = self.getSurface(str(row.surface))
tourney.surface = surfaces[str(row.surface)]
if not tourney_levels.get(str(row.tourney_level)):
tourney_levels[str(row.tourney_level)] = self.getTourneyLevel(str(row.tourney_level))
tourney.tourney_level = tourney_levels[str(row.tourney_level)]
tourney.created_at = created_at
tourney.updated_at = updated_at
tourney.save()
else:
tourney = tourney[0]
tourneys[str(row.tourney_id)] = tourney
match.tourney = tourneys[str(row.tourney_id)]
match.created_at = created_at
match.updated_at = updated_at
match.save()
total_matches_inserted += 1
else:
match[0].year = row.tourney_id.split('-')[0]
match[0].save()
total_matches_updated += 1
match = match[0]
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.winner_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.winner_seed
if pd.isnull(row.winner_seed) or not str(row.winner_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.w_ace) else row.w_ace
match_stats.double_faults = None if np.isnan(row.w_df) else row.w_df
match_stats.service_points = None if np.isnan(row.w_svpt) else row.w_svpt
match_stats.first_services = None if np.isnan(row.w_1stIn) else row.w_1stIn
match_stats.first_services_won = None if np.isnan(row.w_1stWon) else row.w_1stWon
match_stats.second_services_won = None if np.isnan(row.w_2ndWon) else row.w_2ndWon
match_stats.service_game_won = None if np.isnan(row.w_SvGms) else row.w_SvGms
match_stats.break_points_saved = None if np.isnan(row.w_bpSaved) else row.w_bpSaved
match_stats.break_points_played = None if np.isnan(row.w_bpFaced) else row.w_bpFaced
match_stats.rank = None if np.isnan(row.winner_rank) else row.winner_rank
match_stats.rank_points = None if np.isnan(row.winner_rank_points) else row.winner_rank_points
match_stats.is_winner = True
match_stats.created_at = created_at
match_stats.updated_at = updated_at
players[row.winner_id] = self.getPlayer(str(row.winner_id))
match_stats.player = players[row.winner_id]
match_stats.match = match
match_stats.save()
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.loser_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.loser_seed
if pd.isnull(row.loser_seed) or not str(row.loser_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.l_ace) else row.l_ace
match_stats.double_faults = None if np.isnan(row.l_df) else row.l_df
match_stats.service_points = None if np.isnan(row.l_svpt) else row.l_svpt
match_stats.first_services = None if np.isnan(row.l_1stIn) else row.l_1stIn
match_stats.first_services_won = None if np.isnan(row.l_1stWon) else row.l_1stWon
match_stats.second_services_won = None if np.isnan(row.l_2ndWon) else row.l_2ndWon
match_stats.service_game_won = None if np.isnan(row.l_SvGms) else row.l_SvGms
match_stats.break_points_saved = None if np.isnan(row.l_bpSaved) else row.l_bpSaved
match_stats.break_points_played = None if np.isnan(row.l_bpFaced) else row.l_bpFaced
match_stats.rank = None if np.isnan(row.loser_rank) else row.loser_rank
match_stats.rank_points = None if np.isnan(row.loser_rank_points) else row.loser_rank_points
match_stats.is_winner = False
match_stats.created_at = created_at
match_stats.updated_at = updated_at
players[row.loser_id] = self.getPlayer(str(row.loser_id))
match_stats.player = players[row.loser_id]
match_stats.match = match
match_stats.save()
#except:
# assert False, (row.tourney_date, )
#errors = errors + '|||' + str(row.tourney_id) + '-' + str(row.match_num)
return {'inserts': total_matches_inserted, 'updates': total_matches_updated}
def getColumns(self):
return ["tourney_id","tourney_name","surface","draw_size","tourney_level","tourney_date","match_num","winner_id","winner_seed","winner_entry","winner_name","winner_hand","winner_ht","winner_ioc","winner_age",
"loser_id","loser_seed","loser_entry","loser_name","loser_hand","loser_ht","loser_ioc","loser_age","score","best_of","round","minutes","w_ace","w_df","w_svpt","w_1stIn","w_1stWon","w_2ndWon","w_SvGms","w_bpSaved",
"w_bpFaced","l_ace","l_df","l_svpt","l_1stIn","l_1stWon","l_2ndWon","l_SvGms","l_bpSaved","l_bpFaced","winner_rank","winner_rank_points","loser_rank","loser_rank_points"]
def getPlayer(self, id):
player = Player.objects.filter(id=id)
if (not player):
return None
else:
player = player[0]
return player
def getSurface(self, name):
surface = Surface.objects.filter(name=name)
if (not surface):
surface = Surface()
surface.name = name
surface.created_at = datetime.datetime.now()
surface.updated_at = datetime.datetime.now()
surface.save()
else:
surface = surface[0]
return surface
def getTourneyLevel(self, code):
tourney_level = Tourney_Level.objects.filter(code=code)
if (not tourney_level):
tourney_level = Tourney_Level()
tourney_level.code = code
tourney_level.name = code
tourney_level.created_at = datetime.datetime.now()
tourney_level.updated_at = datetime.datetime.now()
tourney_level.save()
else:
tourney_level = tourney_level[0]
return tourney_level
``` |
{
"source": "jimjag/crayon",
"score": 2
} |
#### File: crayon/Scripts/syncU3.py
```python
import os
import sys
import shutil
def main():
files = {}
web_dest_dir = os.path.join('..', 'Compiler', 'Platforms', 'JavaScriptApp', 'ResourcesU3')
from_dir = os.path.join('..', 'U3', 'src', 'render')
for file in [
'nori.js',
'nori_audio.js',
'nori_canvas.js',
'nori_context.js',
'nori_events.js',
'nori_gamepad.js',
'nori_gl.js',
'nori_layout.js',
'nori_util.js'
]:
from_file = os.path.join(from_dir, file)
files[from_file] = [
os.path.join(web_dest_dir, file.replace('.', '_') + '.txt')
]
msghub_client = os.path.join('..', 'Libraries', 'MessageHub', 'client')
files[os.path.join(msghub_client, 'js', 'messagehub.js')] = [
os.path.join(web_dest_dir, 'messagehub_js.txt')
]
msghub_nodejs_client = os.path.join(msghub_client, 'nodejs', 'messagehubclient')
print(msghub_nodejs_client )
for file in os.listdir(msghub_nodejs_client):
files[os.path.join(msghub_nodejs_client, file)] = [os.path.join('..', 'U3', 'src', 'messagehubclient', file)]
for src_file in files.keys():
for dst_file in files[src_file]:
shutil.copy(src_file, dst_file)
print(src_file + ' --> ' + dst_file)
print("Done.")
main()
```
#### File: crayon/Scripts/u3packager.py
```python
import os
import sys
import shutil
def run_command(cmd):
c = os.popen(cmd)
t = c.read()
c.close()
return t
_current_dir = [os.getcwd()]
def push_cd(dir):
_current_dir.append(os.path.abspath(dir))
os.chdir(dir)
def pop_cd():
_current_dir.pop()
os.chdir(_current_dir[-1])
def main(args):
platform = 'windows'
if len(args) == 1:
platform = args[0]
isWindows = platform == 'windows'
isMac = platform == 'mac'
isLinux = platform == 'linux'
if isLinux:
print("Linux not supported yet")
return
if not isWindows and not isMac:
print("Unknown platform: '" + platform + "'")
return
print("Building U3 for " + platform)
if isWindows:
props = {
'arch': 'win32',
'electron-out': 'u3window-win32-x64',
'u3-out': 'win',
'bin-name': 'u3window.exe',
}
elif isMac:
props = {
'arch': 'darwin',
'electron-out': 'u3window-darwin-x64',
'u3-out': 'mac',
'bin-name': 'u3window',
}
else:
raise Exception()
push_cd(os.path.join('..', 'U3'))
if os.path.exists('dist'):
shutil.rmtree('dist')
os.mkdir('dist')
run_command('electron-packager src u3window --platform=' + props['arch'] + ' --arch=x64 --out=dist/temp --lang=en-US')
source = os.path.join('dist', 'temp', props['electron-out'])
target = os.path.join('dist', props['u3-out'])
shutil.move(source, target)
print('U3/dist/' + props['u3-out'] + '/' + props['bin-name'] + ' created')
shutil.rmtree(os.path.join('dist', 'temp'))
pop_cd()
main(sys.argv[1:])
``` |
{
"source": "jimjag/scancode-toolk",
"score": 2
} |
#### File: tests/scancode/test_cli.py
```python
import io
import json
import os
import pytest
from commoncode import fileutils
from commoncode.testcase import FileDrivenTesting
from commoncode.system import on_linux
from commoncode.system import on_mac
from commoncode.system import on_macos_14_or_higher
from commoncode.system import on_windows
from commoncode.system import py36
from commoncode.system import py37
from scancode.cli_test_utils import check_json_scan
from scancode.cli_test_utils import load_json_result
from scancode.cli_test_utils import load_json_result_from_string
from scancode.cli_test_utils import run_scan_click
from scancode.cli_test_utils import run_scan_plain
test_env = FileDrivenTesting()
test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
"""
Most of these tests spawn new process as if launched from the command line. Some
of these CLI tests are dependent on py.test monkeypatch to ensure we are testing
the actual command outputs as if using a real command line call. Some are using
a plain subprocess to the same effect.
"""
def test_package_option_detects_packages(monkeypatch):
test_dir = test_env.get_test_loc('package', copy=True)
result_file = test_env.get_temp_file('json')
args = ['--package', test_dir, '--json', result_file]
run_scan_click(args, monkeypatch=monkeypatch)
assert os.path.exists(result_file)
result = open(result_file).read()
assert 'package.json' in result
def test_verbose_option_with_packages(monkeypatch):
test_dir = test_env.get_test_loc('package', copy=True)
result_file = test_env.get_temp_file('json')
args = ['--package', '--verbose', test_dir, '--json', result_file]
result = run_scan_click(args, monkeypatch=monkeypatch)
assert 'package.json' in result.output
assert os.path.exists(result_file)
result = open(result_file).read()
assert 'package.json' in result
def test_copyright_option_detects_copyrights():
test_dir = test_env.get_test_loc('copyright', copy=True)
result_file = test_env.get_temp_file('json')
run_scan_click(['--copyright', test_dir, '--json', result_file])
assert os.path.exists(result_file)
assert len(open(result_file).read()) > 10
def test_verbose_option_with_copyrights(monkeypatch):
test_dir = test_env.get_test_loc('copyright', copy=True)
result_file = test_env.get_temp_file('json')
args = ['--copyright', '--verbose', test_dir, '--json', result_file]
result = run_scan_click(args, monkeypatch=monkeypatch)
assert os.path.exists(result_file)
assert 'copyright_acme_c-c.c' in result.output
assert len(open(result_file).read()) > 10
def test_scanned_resource_no_attribute_emails():
test_dir = test_env.get_test_loc('attribute_error_data/apache-1.1.txt')
result_file = test_env.get_temp_file('bb.json')
args = ['-clp', '--json-pp', result_file, test_dir, '--filter-clues']
result = run_scan_click(args)
assert "'ScannedResource' object has no attribute 'emails'" not in result.output
def test_unwanted_log_warning_message():
test_dir = test_env.get_test_loc('unwanted_log_message.txt')
result_file = test_env.get_temp_file('json')
args = ['-c', '--json-pp', result_file, test_dir]
result = run_scan_click(args)
assert 'No handlers could be found for logger "bs4.dammit"' not in result.output
def test_license_option_detects_licenses():
test_dir = test_env.get_test_loc('license', copy=True)
result_file = test_env.get_temp_file('json')
args = ['--license', test_dir, '--json', result_file, '--verbose']
run_scan_click(args)
assert os.path.exists(result_file)
assert len(open(result_file).read()) > 10
def test_can_call_run_scan_as_a_function():
from scancode.cli import run_scan
test_dir = test_env.get_test_loc('license', copy=True)
rc, results = run_scan(test_dir, license=True, copyright=True, return_results=True)
assert rc
assert len(results['files']) == 2
assert not results['headers'][0]['errors']
def test_run_scan_includes_outdated_in_extra():
from scancode.cli import run_scan
test_dir = test_env.get_test_loc('license', copy=True)
rc, results = run_scan(test_dir, outdated='out of date', return_results=True)
assert rc
assert results['headers'][0]['extra_data']['OUTDATED'] == 'out of date'
def test_usage_and_help_return_a_correct_script_name_on_all_platforms():
result = run_scan_click(['--help'])
assert 'Usage: scancode [OPTIONS]' in result.output
# this was showing up on Windows
assert 'scancode-script.py' not in result.output
result = run_scan_click([], expected_rc=2)
assert 'Usage: scancode [OPTIONS]' in result.output
# this was showing up on Windows
assert 'scancode-script.py' not in result.output
result = run_scan_click(['-xyz'], expected_rc=2)
# this was showing up on Windows
assert 'scancode-script.py' not in result.output
def test_scan_info_does_collect_info():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--info', '--strip-root', test_dir, '--json', result_file]
run_scan_click(args)
expected = test_env.get_test_loc('info/basic.expected.json')
check_json_scan(expected, result_file, regen=False)
def test_scan_info_does_collect_info_with_root():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
run_scan_click(['--info', test_dir, '--json', result_file])
expected = test_env.get_test_loc('info/basic.rooted.expected.json')
check_json_scan(expected, result_file, regen=False)
def test_scan_info_returns_full_root():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--info', '--full-root', test_dir, '--json', result_file]
run_scan_click(args)
result_data = json.loads(open(result_file).read())
file_paths = [f['path'] for f in result_data['files']]
assert len(file_paths) == 12
root = fileutils.as_posixpath(test_dir)
assert all(p.startswith(root) for p in file_paths)
def test_scan_info_returns_correct_full_root_with_single_file():
test_file = test_env.get_test_loc('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--info', '--full-root', test_file, '--json', result_file]
run_scan_click(args)
result_data = json.loads(open(result_file).read())
files = result_data['files']
# we have a single file
assert len(files) == 1
scanned_file = files[0]
# and we check that the path is the full path without repeating the file name
assert scanned_file['path'] == fileutils.as_posixpath(test_file)
def test_scan_info_returns_does_not_strip_root_with_single_file():
test_file = test_env.get_test_loc('single/iproute.c')
result_file = test_env.get_temp_file('json')
args = ['--info', '--strip-root', test_file, '--json', result_file]
run_scan_click(args)
expected = test_env.get_test_loc('single/iproute.expected.json')
check_json_scan(expected, result_file, remove_file_date=True, regen=False)
@pytest.mark.scanslow
def test_scan_info_license_copyrights():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--info', '--license', '--copyright', '--strip-root', test_dir, '--json', result_file]
run_scan_click(args)
check_json_scan(test_env.get_test_loc('info/all.expected.json'), result_file, regen=False)
def test_scan_noinfo_license_copyrights_with_root():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--email', '--url', '--license', '--copyright', test_dir, '--json', result_file]
run_scan_click(args)
expected = test_env.get_test_loc('info/all.rooted.expected.json')
check_json_scan(expected, result_file, regen=False)
def test_scan_email_url_info():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--email', '--url', '--info', '--strip-root', test_dir, '--json', result_file]
run_scan_click(args)
expected = test_env.get_test_loc('info/email_url_info.expected.json')
check_json_scan(expected, result_file, regen=False)
def test_scan_should_not_fail_on_faulty_pdf_or_pdfminer_bug_but_instead_keep_trucking_with_json():
test_file = test_env.get_test_loc('failing/patchelf.pdf')
result_file = test_env.get_temp_file('test.json')
args = ['--copyright', '--strip-root', test_file, '--json', result_file]
result = run_scan_click(args, expected_rc=0)
expected = test_env.get_test_loc('failing/patchelf.expected.json')
check_json_scan(expected, result_file, regen=False)
assert 'Some files failed to scan' not in result.output
assert 'patchelf.pdf' not in result.output
def test_scan_with_timeout_errors():
test_file = test_env.get_test_loc('failing/patchelf.pdf')
result_file = test_env.get_temp_file('test.json')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['-e', '--test-slow-mode', '--timeout', '0.01', '--verbose',
test_file, '--json', result_file]
result = run_scan_click(args, expected_rc=1)
assert 'ERROR: Processing interrupted: timeout' in result.output
assert 'patchelf.pdf' in result.output
result_json = json.loads(open(result_file).read())
assert result_json['files'][0]['scan_errors'][0].startswith('ERROR: for scanner: emails')
assert result_json['headers'][0]['errors']
def test_scan_with_errors_always_includes_full_traceback():
test_file = test_env.get_test_loc('failing/patchelf.pdf')
result_file = test_env.get_temp_file('test.json')
# we use a short timeout and a --test-error-mode --email scan to simulate an error
args = ['-e', '--test-error-mode', '--verbose',
test_file, '--json', result_file]
result = run_scan_click(args, expected_rc=1)
assert 'ScancodeError: Triggered email failure' in result.output
assert 'patchelf.pdf' in result.output
result_json = json.loads(open(result_file).read())
assert result_json['files'][0]['scan_errors'][0].startswith('ERROR: for scanner: emails')
assert result_json['headers'][0]['errors']
def test_failing_scan_return_proper_exit_code_on_failure():
test_file = test_env.get_test_loc('failing/patchelf.pdf')
result_file = test_env.get_temp_file('test.json')
args = ['-e', '--test-error-mode', test_file, '--json', result_file]
run_scan_click(args, expected_rc=1)
def test_scan_should_not_fail_on_faulty_pdf_or_pdfminer_bug_but_instead_report_errors_and_keep_trucking_with_html():
test_file = test_env.get_test_loc('failing/patchelf.pdf')
result_file = test_env.get_temp_file('test.html')
args = ['--copyright', '--timeout', '1', test_file, '--html', result_file]
run_scan_click(args)
def test_scan_license_should_not_fail_with_output_to_html_and_json():
test_dir = test_env.get_test_loc('dual_output_with_license', copy=True)
result_file_html = test_env.get_temp_file('html')
result_file_json = test_env.get_temp_file('json')
args = ['--license', test_dir,
'--json', result_file_json,
'--html', result_file_html,
'--verbose']
result = run_scan_click(args)
assert 'Object of type License is not JSON serializable' not in result.output
def test_scan_should_not_fail_on_faulty_pdf_or_pdfminer_bug_but_instead_report_errors_and_keep_trucking_with_html_app():
test_file = test_env.get_test_loc('failing/patchelf.pdf')
result_file = test_env.get_temp_file('test.app.html')
args = ['--copyright', '--timeout', '1', test_file, '--html-app', result_file]
run_scan_click(args)
def test_scan_works_with_multiple_processes():
test_dir = test_env.get_test_loc('multiprocessing', copy=True)
# run the same scan with one or three processes
result_file_1 = test_env.get_temp_file('json')
args = ['--copyright', '--processes', '1', test_dir, '--json', result_file_1]
run_scan_click(args)
result_file_3 = test_env.get_temp_file('json')
args = ['--copyright', '--processes', '3', test_dir, '--json', result_file_3]
run_scan_click(args)
res1 = json.loads(open(result_file_1).read())
res3 = json.loads(open(result_file_3).read())
assert sorted(res1['files'], key=lambda x: tuple(x.items())) == sorted(res3['files'], key=lambda x: tuple(x.items()))
def test_scan_works_with_no_processes_in_threaded_mode():
test_dir = test_env.get_test_loc('multiprocessing', copy=True)
# run the same scan with zero or one process
result_file_0 = test_env.get_temp_file('json')
args = ['--copyright', '--processes', '0', test_dir, '--json', result_file_0]
result0 = run_scan_click(args)
assert 'Disabling multi-processing' in result0.output
result_file_1 = test_env.get_temp_file('json')
args = ['--copyright', '--processes', '1', test_dir, '--json', result_file_1]
run_scan_click(args)
res0 = json.loads(open(result_file_0).read())
res1 = json.loads(open(result_file_1).read())
assert sorted(res0['files'], key=lambda x: tuple(x.items())) == sorted(res1['files'], key=lambda x: tuple(x.items()))
def test_scan_works_with_no_processes_non_threaded_mode():
test_dir = test_env.get_test_loc('multiprocessing', copy=True)
# run the same scan with zero or one process
result_file_0 = test_env.get_temp_file('json')
args = ['--copyright', '--processes', '-1', test_dir, '--json', result_file_0]
result0 = run_scan_click(args)
assert 'Disabling multi-processing and multi-threading' in result0.output
result_file_1 = test_env.get_temp_file('json')
args = ['--copyright', '--processes', '1', test_dir, '--json', result_file_1]
run_scan_click(args)
res0 = json.loads(open(result_file_0).read())
res1 = json.loads(open(result_file_1).read())
assert sorted(res0['files'], key=lambda x: tuple(x.items())) == sorted(res1['files'], key=lambda x: tuple(x.items()))
def test_scan_works_with_multiple_processes_and_timeouts():
test_dir = test_env.get_test_loc('timeout')
result_file = test_env.get_temp_file('json')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['--email', '--processes', '2',
'--timeout', '0.01',
# this will guarantee that an email scan takes at least one second
'--test-slow-mode',
'--strip-root', test_dir, '--json', result_file]
run_scan_click(args, expected_rc=1)
expected = [
[(u'path', u'test1.txt'),
(u'type', u'file'),
(u'emails', []),
(u'scan_errors', [u'ERROR: for scanner: emails:\nERROR: Processing interrupted: timeout after 0 seconds.'])],
[(u'path', u'test2.txt'),
(u'type', u'file'),
(u'emails', []),
(u'scan_errors', [u'ERROR: for scanner: emails:\nERROR: Processing interrupted: timeout after 0 seconds.'])],
[(u'path', u'test3.txt'),
(u'type', u'file'),
(u'emails', []),
(u'scan_errors', [u'ERROR: for scanner: emails:\nERROR: Processing interrupted: timeout after 0 seconds.'])]
]
result_json = json.loads(open(result_file).read())
assert sorted(sorted(x.items()) for x in result_json['files']) == sorted(sorted(x) for x in expected)
def check_scan_does_not_fail_when_scanning_unicode_files_and_paths(verbosity):
test_dir = test_env.get_test_loc(u'unicodepath/uc')
result_file = test_env.get_temp_file('json')
args = [
'--info',
'--license',
'--copyright',
'--package',
'--email',
'--url',
'--strip-root',
test_dir ,
'--json',
result_file
] + ([verbosity] if verbosity else [])
results = run_scan_click(args)
# the paths for each OS ends up encoded differently.
# See for details:
# https://github.com/nexB/scancode-toolkit/issues/390
# https://github.com/nexB/scancode-toolkit/issues/688
# https://github.com/nexB/scancode-toolkit/issues/1635
if on_macos_14_or_higher:
expected = 'unicodepath/unicodepath.expected-mac14.json' + verbosity
elif on_linux:
expected = 'unicodepath/unicodepath.expected-linux.json' + verbosity
elif on_mac:
expected = 'unicodepath/unicodepath.expected-mac.json' + verbosity
elif on_windows:
expected = 'unicodepath/unicodepath.expected-win.json' + verbosity
check_json_scan(test_env.get_test_loc(expected), result_file, remove_file_date=True, regen=False)
return results
def test_scan_does_not_fail_when_scanning_unicode_files_and_paths_default():
result = check_scan_does_not_fail_when_scanning_unicode_files_and_paths('')
assert result.output
def test_scan_does_not_fail_when_scanning_unicode_files_and_paths_verbose():
result = check_scan_does_not_fail_when_scanning_unicode_files_and_paths('--verbose')
assert result.output
def test_scan_does_not_fail_when_scanning_unicode_files_and_paths_quiet():
result = check_scan_does_not_fail_when_scanning_unicode_files_and_paths('--quiet')
assert not result.output
@pytest.mark.skipif(on_windows, reason='Python tar cannot extract these files on Windows')
def test_scan_does_not_fail_when_scanning_unicode_test_files_from_express():
# On Windows, Python tar cannot extract these files. Other
# extractors either fail or change the file name, making the test
# moot. Git cannot check these files. So for now it makes no sense
# to test this on Windows at all. Extractcode works fine, but does
# rename the problematic files.
test_path = u'unicode_fixtures.tar.gz'
test_dir = test_env.extract_test_tar_raw(test_path)
test_dir = os.fsencode(test_dir)
args = ['-n0', '--info', '--license', '--copyright', '--package', '--email',
'--url', '--strip-root', '--json', '-', test_dir]
run_scan_click(args)
def test_scan_can_handle_licenses_with_unicode_metadata():
test_dir = test_env.get_test_loc('license_with_unicode_meta')
result_file = test_env.get_temp_file('json')
run_scan_click(['--license', test_dir, '--json', result_file])
def test_scan_quiet_to_file_does_not_echo_anything():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--quiet', '--info', test_dir, '--json', result_file]
result = run_scan_click(args)
assert not result.output
def test_scan_quiet_to_stdout_only_echoes_json_results():
test_dir = test_env.extract_test_tar('info/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--quiet', '--info', test_dir, '--json-pp', result_file]
result_to_file = run_scan_click(args)
assert not result_to_file.output
# also test with an output of JSON to stdout
args = ['--quiet', '--info', test_dir, '--json-pp', '-']
result_to_stdout = run_scan_click(args)
# outputs to file or stdout should be identical
result1_output = open(result_file).read()
json_result1_output = load_json_result_from_string(result1_output)
json_result_to_stdout = load_json_result_from_string(result_to_stdout.output)
# cleanup JSON
assert json_result_to_stdout == json_result1_output
def test_scan_verbose_to_stdout_does_not_echo_ansi_escapes():
test_dir = test_env.extract_test_tar('info/basic.tgz')
args = ['--verbose', '--info', test_dir, '--json', '-']
result = run_scan_click(args)
assert '[?' not in result.output
def test_scan_can_return_matched_license_text():
test_file = test_env.get_test_loc('license_text/test.txt')
expected_file = test_env.get_test_loc('license_text/test.expected')
result_file = test_env.get_temp_file('json')
args = ['--license', '--license-text', '--strip-root', test_file, '--json', result_file]
run_scan_click(args)
check_json_scan(test_env.get_test_loc(expected_file), result_file, regen=False)
@pytest.mark.skipif(on_windows, reason='This test cannot run on windows as these are not legal file names.')
def test_scan_can_handle_weird_file_names():
test_dir = test_env.extract_test_tar('weird_file_name/weird_file_name.tar.gz')
result_file = test_env.get_temp_file('json')
args = ['-c', '-i', '--strip-root', test_dir, '--json', result_file]
result = run_scan_click(args)
assert "KeyError: 'sha1'" not in result.output
# Some info vary on each OS
# See https://github.com/nexB/scancode-toolkit/issues/438 for details
expected = 'weird_file_name/expected-posix.json'
check_json_scan(test_env.get_test_loc(expected), result_file, regen=False)
@pytest.mark.skipif(on_macos_14_or_higher or on_windows,
reason='Cannot handle yet byte paths on macOS 10.14+. '
'See https://github.com/nexB/scancode-toolkit/issues/1635')
def test_scan_can_handle_non_utf8_file_names_on_posix():
test_dir = test_env.extract_test_tar_raw('non_utf8/non_unicode.tgz')
result_file = test_env.get_temp_file('json')
args = ['-i', '--strip-root', test_dir, '--json', result_file]
run_scan_click(args)
# the paths for each OS end up encoded differently.
# See for details:
# https://github.com/nexB/scancode-toolkit/issues/390
# https://github.com/nexB/scancode-toolkit/issues/688
if on_linux:
expected = 'non_utf8/expected-linux.json'
elif on_mac:
expected = 'non_utf8/expected-mac.json'
elif on_windows:
expected = 'non_utf8/expected-win.json'
check_json_scan(test_env.get_test_loc(expected), result_file, regen=False)
def test_scan_can_run_from_other_directory():
test_file = test_env.get_test_loc('altpath/copyright.c')
expected_file = test_env.get_test_loc('altpath/copyright.expected.json')
result_file = test_env.get_temp_file('json')
work_dir = os.path.dirname(result_file)
args = ['-ci', '--strip-root', test_file, '--json', result_file]
run_scan_plain(args, cwd=work_dir)
check_json_scan(test_env.get_test_loc(expected_file), result_file, remove_file_date=True, regen=False)
def test_scan_logs_errors_messages_not_verbosely_on_stderr():
test_file = test_env.get_test_loc('errors/many_copyrights.c')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['-e', '--test-slow-mode', '-n', '0', '--timeout', '0.0001',
test_file, '--json', '-']
_rc, stdout, stderr = run_scan_plain(args, expected_rc=1)
assert 'Some files failed to scan properly:' in stderr
assert 'Path: many_copyrights.c' in stderr
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' in stdout, stdout
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' not in stderr, stderr
def test_scan_logs_errors_messages_not_verbosely_on_stderr_with_multiprocessing():
test_file = test_env.get_test_loc('errors/many_copyrights.c')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['-e', '--test-slow-mode', '-n', '2', '--timeout', '0.0001',
test_file, '--json', '-']
_rc, stdout, stderr = run_scan_plain(args, expected_rc=1)
assert 'Some files failed to scan properly:' in stderr
assert 'Path: many_copyrights.c' in stderr
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' in stdout
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' not in stderr
def test_scan_logs_errors_messages_verbosely():
test_file = test_env.get_test_loc('errors/many_copyrights.c')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['-e', '--test-slow-mode', '--verbose', '-n', '0', '--timeout', '0.0001',
test_file, '--json', '-']
_rc, stdout, stderr = run_scan_plain(args, expected_rc=1)
assert 'Some files failed to scan properly:' in stderr
assert 'Path: many_copyrights.c' in stderr
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' in stdout
assert 'ERROR: for scanner: emails:' in stdout
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' in stderr
assert 'ERROR: for scanner: emails:' in stderr
def test_scan_logs_errors_messages_verbosely_with_verbose_and_multiprocessing():
test_file = test_env.get_test_loc('errors/many_copyrights.c')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['-e', '--test-slow-mode', '--verbose', '-n', '2', '--timeout', '0.0001',
test_file, '--json', '-']
_rc, stdout, stderr = run_scan_plain(args, expected_rc=1)
assert 'Some files failed to scan properly:' in stderr
assert 'Path: many_copyrights.c' in stderr
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' in stdout
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' in stderr
@pytest.mark.scanslow
def test_scan_does_not_report_errors_on_incorrect_package_manifest():
test_file = test_env.get_test_loc('errors/broken_packages')
args = ['-pi', '--verbose', '-n', '0', test_file, '--json', '-']
_rc, stdout, stderr = run_scan_plain(args, expected_rc=0)
assert 'Some files failed to scan properly:' not in stderr
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' not in stdout
assert 'ERROR: Processing interrupted: timeout after 0 seconds.' not in stderr
def test_scan_progress_display_is_not_damaged_with_long_file_names_plain():
test_dir = test_env.get_test_loc('long_file_name')
result_file = test_env.get_temp_file('json')
args = ['--copyright', test_dir, '--json', result_file]
_rc, stdout, stderr = run_scan_plain(args)
expected1 = 'Scanned: abcdefghijklmnopqr...234567890123456789.c'
expected2 = 'Scanned: 0123456789012345678901234567890123456789.c'
expected3 = 'abcdefghijklmnopqrtu0123456789012345678901234567890123456789abcdefghijklmnopqrtu0123456789012345678901234567890123456789.c'
assert expected1 not in stdout
assert expected2 not in stdout
assert expected3 not in stdout
assert expected1 not in stderr
assert expected2 not in stderr
assert expected3 not in stderr
def test_scan_progress_display_is_not_damaged_with_long_file_names(monkeypatch):
test_dir = test_env.get_test_loc('long_file_name')
result_file = test_env.get_temp_file('json')
args = ['--copyright', test_dir, '--json', result_file]
result = run_scan_click(args, monkeypatch=monkeypatch)
if on_windows:
expected1 = 'Scanned: 0123456789012345678901234567890123456789.c'
expected2 = 'Scanned: abcdefghijklmnopqrt...0123456789012345678'
expected3 = 'abcdefghijklmnopqrtu0123456789012345678901234567890123456789abcdefghijklmnopqrtu0123456789012345678901234567890123456789.c'
try:
assert expected1 in result.output
assert expected2 not in result.output
assert expected3 not in result.output
except:
print()
print('output:')
print(result.output)
print()
raise
else:
expected1 = 'Scanned: abcdefghijklmnopqr...234567890123456789.c'
expected2 = 'Scanned: 0123456789012345678901234567890123456789.c'
expected3 = 'abcdefghijklmnopqrtu0123456789012345678901234567890123456789abcdefghijklmnopqrtu0123456789012345678901234567890123456789.c'
assert expected1 in result.output
assert expected2 in result.output
assert expected3 not in result.output
def test_scan_does_scan_php_composer():
test_file = test_env.get_test_loc('composer/composer.json')
expected_file = test_env.get_test_loc('composer/composer.expected.json')
result_file = test_env.get_temp_file('results.json')
run_scan_click(['--package', test_file, '--json', result_file])
check_json_scan(expected_file, result_file, regen=False)
def test_scan_does_scan_rpm():
test_file = test_env.get_test_loc('rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm')
expected_file = test_env.get_test_loc('rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm.expected.json')
result_file = test_env.get_temp_file('results.json')
run_scan_click(['--package', test_file, '--json', result_file])
check_json_scan(expected_file, result_file, regen=False)
def test_scan_cli_help(regen=False):
expected_file = test_env.get_test_loc('help/help.txt')
result = run_scan_click(['--help'])
result = result.output
if regen:
with io.open(expected_file, 'w', encoding='utf-8') as ef:
ef.write(result)
def no_spaces(s):
return ' '.join(s.split())
expected = open(expected_file).read()
if no_spaces(result) != no_spaces(expected):
assert result == expected
def test_scan_errors_out_with_unknown_option():
test_file = test_env.get_test_loc('license_text/test.txt')
args = ['--json--info', test_file]
result = run_scan_click(args, expected_rc=2)
assert 'Error: No such option: --json--info'.lower() in result.output.lower()
def test_scan_to_json_without_FILE_does_not_write_to_next_option():
test_file = test_env.get_test_loc('license_text/test.txt')
args = ['--json', '--info', test_file]
result = run_scan_click(args, expected_rc=2)
assert (
'Error: Invalid value for "--json": Illegal file name '
'conflicting with an option name: --info.'
).replace("'", '"') in result.output.replace("'", '"')
def test_scan_errors_out_with_conflicting_root_options():
test_file = test_env.get_test_loc('license_text/test.txt')
result_file = test_env.get_temp_file('results.json')
args = ['--strip-root', '--full-root', '--json', result_file, '--info', test_file]
result = run_scan_click(args, expected_rc=2)
assert ('Error: The option --strip-root cannot be used together with the '
'--full-root option(s) and --full-root is used.') in result.output
def test_scan_errors_out_with_conflicting_verbosity_options():
test_file = test_env.get_test_loc('license_text/test.txt')
result_file = test_env.get_temp_file('results.json')
args = ['--quiet', '--verbose', '--json', result_file, '--info', test_file]
result = run_scan_click(args, expected_rc=2)
assert ('Error: The option --quiet cannot be used together with the '
'--verbose option(s) and --verbose is used. You can set only one of '
'these options at a time.') in result.output
def test_scan_valid_duration_field_in_json_output_headers():
test_file = test_env.get_test_loc('license_text/test.txt')
result_file = test_env.get_temp_file('results.json')
args = ['--json', result_file, test_file]
run_scan_click(args)
with open(result_file) as result:
headers = json.loads(result.read())['headers']
assert headers[0]['duration'] >= 0
@pytest.mark.scanslow
@pytest.mark.skipif(on_windows, reason='Somehow this test fails for now on Python 3')
def test_scan_with_timing_json_return_timings_for_each_scanner():
test_dir = test_env.extract_test_tar('timing/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--email', '--url', '--license', '--copyright', '--info',
'--package', '--timing', '--json', result_file, test_dir]
run_scan_click(args)
file_results = load_json_result(result_file)['files']
# NB: these keys are the name of the scan plugins in setup.py
expected = set(['emails', 'urls', 'licenses', 'copyrights', 'info', 'packages'])
check_timings(expected, file_results)
@pytest.mark.scanslow
@pytest.mark.skipif(on_windows, reason='Somehow this test fails for now on Python 3')
def test_scan_with_timing_jsonpp_return_timings_for_each_scanner():
test_dir = test_env.extract_test_tar('timing/basic.tgz')
result_file = test_env.get_temp_file('json')
args = ['--email', '--url', '--license', '--copyright', '--info',
'--package', '--timing', '--verbose', '--json-pp', result_file, test_dir]
run_scan_click(args)
file_results = load_json_result(result_file)['files']
# NB: these keys are the name of the scan plugins in setup.py
expected = set(['emails', 'urls', 'licenses', 'copyrights', 'info', 'packages'])
check_timings(expected, file_results)
def check_timings(expected, file_results):
for res in file_results:
scan_timings = res['scan_timings']
if not res['type'] == 'file':
# should be an empty dict for dirs
assert not scan_timings
continue
assert scan_timings
for scanner, timing in scan_timings.items():
assert scanner in expected
assert timing
@pytest.mark.scanslow
def test_summary_counts_when_using_disk_cache():
test_file = test_env.get_test_loc('summaries/counts')
result_file = test_env.get_temp_file('json')
args = ['--info', '-n', '-1', '--max-in-memory', '-1', test_file, '--json', result_file]
result = run_scan_click(args, expected_rc=0)
assert ('44 resource(s): 33 file(s) and 11 directorie(s)') in result.output
def test_scan_should_not_fail_with_low_max_in_memory_setting_when_ignoring_files():
test_file = test_env.get_test_loc('summaries/client')
result_file = test_env.get_temp_file('json')
args = ['--info', '-n', '-1', '--ignore', '*.gif', '--max-in-memory=1', test_file, '--json', result_file]
run_scan_click(args, expected_rc=0)
def test_get_displayable_summary():
from scancode.cli import get_displayable_summary
from commoncode.resource import Codebase
# Set up test codebase
test_codebase = test_env.get_test_loc('summaries/client')
codebase = Codebase(test_codebase, strip_root=True)
codebase.timings['scan'] = 0
scan_names = 'foo, bar, baz'
processes = 23
errors = ['failed to scan ABCD']
results = get_displayable_summary(codebase, scan_names, processes, errors)
expected = (
[u'Some files failed to scan properly:', u'failed to scan ABCD'],
[
u'Summary: foo, bar, baz with 23 process(es)',
u'Errors count: 1',
u'Scan Speed: 0.00 files/sec. ',
u'Initial counts: 0 resource(s): 0 file(s) and 0 directorie(s) ',
u'Final counts: 0 resource(s): 0 file(s) and 0 directorie(s) ',
u'Timings:',
u' scan_start: None',
u' scan_end: None']
)
assert results == expected
def test_display_summary_edge_case_scan_time_zero_should_not_fail():
from io import StringIO
import sys
from scancode.cli import display_summary
from commoncode.resource import Codebase
# Set up test codebase
test_codebase = test_env.get_test_loc('summaries/client')
codebase = Codebase(test_codebase, strip_root=True)
codebase.timings['scan'] = 0
scan_names = 'foo, bar, baz'
processes = 23
errors = ['failed to scan ABCD']
try:
# Redirect summary output from `stderr` to `result`
result = StringIO()
sys.stderr = result
# Output from `display_summary` will be in `result`
display_summary(codebase, scan_names, processes, errors)
finally:
# Set `stderr` back
sys.stderr = sys.__stderr__
def test_check_error_count():
test_dir = test_env.get_test_loc('failing')
result_file = test_env.get_temp_file('json')
# we use a short timeout and a --test-slow-mode --email scan to simulate an error
args = ['-e', '--test-slow-mode', '--timeout', '0.1',
test_dir, '--json', result_file]
result = run_scan_click(args, expected_rc=1)
output = result.output
output = output.replace('\n', ' ').replace(' ', ' ')
output = output.split(' ')
error_files = output.count('Path:')
error_count = output[output.index('count:') + 1]
assert str(error_files) == str(error_count)
on_mac_new_py = on_mac and not (py36 or py37)
def test_scan_keep_temp_files_is_false_by_default():
test_file = test_env.get_test_loc('tempfiles/samples')
result_file = test_env.get_temp_file('json')
# mock using a well defined temporary directory
temp_directory = test_env.get_temp_dir()
env = dict(os.environ)
env.update({'SCANCODE_TEMP': temp_directory})
args = [
'--info', test_file, '--json', result_file,
# this forces using a temp file cache so that we have temp files
'--max-in-memory', '-1']
_ = run_scan_plain(args, expected_rc=0, env=env)
# the SCANCODE_TEMP dir is not deleted, but it should be empty
assert os.path.exists(temp_directory)
# this does not make sense but that's what is seen in practice
expected = 2 if (on_windows or on_mac_new_py) else 1
assert len(list(os.walk(temp_directory))) == expected
def test_scan_keep_temp_files_keeps_files():
test_file = test_env.get_test_loc('tempfiles/samples')
result_file = test_env.get_temp_file('json')
# mock using a well defined temporary directory
temp_directory = test_env.get_temp_dir()
env = dict(os.environ)
env.update({'SCANCODE_TEMP': temp_directory})
args = [
'--keep-temp-files',
'--info', test_file, '--json', result_file,
# this forces using a temp file cache
'--max-in-memory', '-1']
_rc, _stdout, _stderr = run_scan_plain(args, expected_rc=0, env=env)
# the SCANCODE_TEMP dir is not deleted, but it should not be empty
assert os.path.exists(temp_directory)
# this does not make sense but that's what is seen in practice
expected = 8 if (on_windows or on_mac_new_py) else 7
assert len(list(os.walk(temp_directory))) == expected
def test_scan_errors_out_without_an_input_path():
args = ['--json-pp', '-']
result = run_scan_click(args, expected_rc=2)
assert 'Error: Invalid value: At least one input path is required.' in result.output
def test_merge_multiple_scans():
test_file_1 = test_env.get_test_loc('merge_scans/sample.json')
test_file_2 = test_env.get_test_loc('merge_scans/thirdparty.json')
result_file = test_env.get_temp_file('json')
args = ['--from-json', test_file_1, '--from-json', test_file_2, '--json', result_file]
run_scan_click(args, expected_rc=0)
expected = test_env.get_test_loc('merge_scans/expected.json')
with open(expected) as f:
expected_files = json.loads(f.read())['files']
with open(result_file) as f:
result_files = json.loads(f.read())['files']
assert result_files == expected_files
def test_VirtualCodebase_output_with_from_json_is_same_as_original():
test_file = test_env.get_test_loc('virtual_idempotent/codebase.json')
result_file = test_env.get_temp_file('json')
args = ['--from-json', test_file, '--json-pp', result_file]
run_scan_click(args)
expected = load_json_result(test_file, remove_file_date=True)
results = load_json_result(result_file, remove_file_date=True)
expected.pop('summary', None)
results.pop('summary', None)
expected_headers = expected.pop('headers', [])
results_headers = results.pop('headers', [])
assert json.dumps(results , indent=2) == json.dumps(expected, indent=2)
assert len(results_headers) == len(expected_headers) + 1
``` |
{
"source": "JimJam07/DevBot",
"score": 3
} |
#### File: devbot/github_login/login.py
```python
import click
from github import Github
import requests
import pickle
from ..decorators.auth_state import check_auth_state
from ..decorators.user_filepath import get_user_filepath
@click.command()
@click.option('--username', prompt="Your Github Username")
@click.option('--password', prompt=True, hide_input=True)
@get_user_filepath
@check_auth_state
def login(username, password, is_authenticated, user, user_file_path):
if is_authenticated:
print(f"\033[93mYou've already logged in as {user.get_user().login}. Run `devbot logout` to Log out\033[0m")
return
res = requests.get('https://api.github.com', auth=(username, password))
if res.status_code < 400:
user = Github(username, password)
print(f"\033[92mLogged in Successfully\033[0m")
pickle.dump(user, open(user_file_path, "wb"))
else:
print(f"\033[91mCould not log in\033[0m")
```
#### File: devbot/github_logout/logout.py
```python
import click
import os
from ..decorators.auth_state import check_auth_state
from ..decorators.user_filepath import get_user_filepath
@click.command()
@get_user_filepath
@check_auth_state
def logout(is_authenticated, user, user_file_path):
if is_authenticated:
os.remove(user_file_path)
print(f"\033[92mLogged out Successfully\033[0m")
else:
print(f"\033[91mYou're not logged in. run `devbot login` to login\033[0m")
``` |
{
"source": "jimjh/arc_cache",
"score": 4
} |
#### File: arc_cache/arc_cache/decorator.py
```python
from functools import wraps, partial, _make_key
from collections import namedtuple, OrderedDict as od
_CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'max_size',
't1_size', 't2_size', 'split'])
def _shift(src, dst):
"""Pops the first item in ``src`` and moves it to ``dst``."""
key, value = src.popitem(last=False)
dst.append(key)
def _pop(src):
"""Pops the first item in ``src``."""
src.pop(0)
def _delta(x, y):
"""Computes |y|/|x|."""
return max(float(len(y))/float(len(x)), 1.0)
def _adapt_plus(b1, b2, max_size, p):
return min(p + _delta(b1, b2), float(max_size))
def _adapt_minus(b2, b1, p):
return max(p - _delta(b2, b1), 0.0)
def arc_cache(max_size=128, typed=False):
"""Decorator to memoize the given callable using an adaptive replacement cache.
:param max_size: maximum number of elements in the cache
:type max_size: int
:param typed: cache arguments of different types separately
:type typed: bool
``max_size`` must be a positive integer.
If ``typed`` is ``True``, arguments of different types will be cached
separately. For example, ``f(3.0)`` and ``f(3)`` will be treated as
distinct calls with distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple
``(hits, misses, max_size, t1_size, t2_size, split)`` with ``f.cache_info()``.
Reset the cache using ``f.cache_clear()``.
TODO worry about thread-safety
"""
if not isinstance(max_size, int):
raise TypeError('max_size must be of type int.')
if max_size is None or 0 >= max_size:
raise ValueError('max_size must be a positive integer. If you want an'
' unbounded cache, use functools.lru_cache.')
def decorating_function(func):
# one instance of each var per decorated function
# in LRU to MRU order
t1, b1, t2, b2 = od(), [], od(), []
p = max_size / 2
hits = misses = 0
# == invariants ==
# l1: elements were used only once
# l2: elements were used at least twice
# len(l1) ≤ c
# len(l2) ≤ 2c
# len(l1) + len(l2) ≤ 2c
# t1, t2, b1, and b2 are always pairwise disjoint
# if len(l1) + len(l2) < c, then both b1 and b2 are empty
# conversely, if b1/b2 is not empty, then len(l1) + len(l2) ≥ c
# if len(l1) + len(l2) ≥ c, then len(t1) + len(t2) = c
evict_t1 = partial(_shift, t1, b1)
evict_t2 = partial(_shift, t2, b2)
evict_b1 = partial(_pop, b1)
evict_b2 = partial(_pop, b2)
adapt_plus = partial(_adapt_plus, b1, b2, max_size)
adapt_minus = partial(_adapt_minus, b2, b1)
def evict_t1_t2():
if t1 and len(t1) > p:
evict_t1()
else:
evict_t2()
def evict_l1(): # DBL: evict from l1, slide everything back
if b1: # i.e. |t1| < max_size
evict_b1()
evict_t1_t2()
else:
t1.popitem(last=False)
def evict_l2():
total = len(t1) + len(b1) + len(t2) + len(b2)
if total >= max_size:
if total == 2 * max_size:
# DBL: if lists are full, evict from l2
evict_b2()
# ARC: if cache is full, evict from t1/t2
evict_t1_t2()
# else: cache is not full, don't evict from t1/t2
@wraps(func)
def wrapper(*args, **kwargs):
nonlocal p, hits, misses
key = _make_key(args, kwargs, typed)
# ARC hit: Case I
if key in t1:
hits += 1
result = t1[key]
del t1[key]
t2[key] = result # MRU in t2
return result
elif key in t2:
hits += 1
t2.move_to_end(key) # MRU in t2
return t2[key]
# ARC miss
misses += 1
result = func(*args, **kwargs)
if key in b1: # Case II: hit in l1
p = adapt_plus(p)
# by invariant, the cache must be full, so evict from t1 or t2
evict_t1_t2()
t2[key] = result
elif key in b2: # Case III: hit in l2
# by invariant, the cache must be full, so evict from t1 or t2
p = adapt_minus(p)
evict_t1_t2()
t2[key] = result
else: # Case IV: cache miss in DBL(2c)
len_l1 = len(t1) + len(b1)
if len_l1 == max_size:
evict_l1()
elif len_l1 < max_size:
evict_l2()
# if cache is not full, add it to t1 even if we exceed p
t1[key] = result # MRU in t1
return result
def cache_info():
return _CacheInfo(hits, misses, max_size, len(t1), len(t2), p)
def cache_clear():
nonlocal hits, misses, p
nonlocal t1, b1, t2, b2
t1, b1, t2, b2 = od(), [], od(), []
p = max_size / 2
hits = misses = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
```
#### File: arc_cache/tests/test_import.py
```python
import arc_cache
def test_import():
"""from arc_cache import arc_cache works"""
assert 'arc_cache' in vars(arc_cache)
assert callable(arc_cache.arc_cache)
``` |
{
"source": "jimjh/challenges",
"score": 3
} |
#### File: snakes_and_ladders/python/main.py
```python
import sys
from collections import deque
class Board:
def __init__(self, leaps_dict, N=100):
self.N = N
self.leaps = leaps_dict
def next_moves(self, idx):
roll_moves = []
for i in range(idx + 1, idx + 7):
if i in self.leaps:
roll_moves.append(self.leaps[i])
elif i <= self.N:
roll_moves.append(i)
return roll_moves
def solve(self):
Q = set([0])
num_rolls = 0
while True:
NQ = set()
for curr_square in Q:
if curr_square == self.N:
return num_rolls
NQ |= set(self.next_moves(curr_square))
Q = NQ
num_rolls += 1
# grab input
stdin = sys.stdin
T = int(stdin.next().strip()) # number of test cases
for case in xrange(0, T):
stdin.next() # ignore num_ladders, num_snakes
leap_lines = stdin.next().strip() + ' ' + stdin.next().strip()
leaps_dict = dict([map(int, l.split(',')) for l in leap_lines.split(' ')])
B = Board(leaps_dict)
print B.solve()
``` |
{
"source": "jimjimliu/LOL_Match_Prediction",
"score": 3
} |
#### File: jimjimliu/LOL_Match_Prediction/Live_Game_Prediction.py
```python
from keras.models import load_model
import tensorflow as tf
from Live_Game import Live_Game
import numpy as np
from UTIL import utils
from sklearn.preprocessing import StandardScaler
import time
class GamePredict():
def __init__(self, player_name):
self.player_name = player_name
def predict(self):
'''
:return:
'''
"load the model from file"
# load neural network
NN_clf = tf.keras.models.load_model("MODELS/FNN.h5")
# load LR
LR_clf = utils.load_pkl('LR', "MODELS")
# load baseline
BL_clf = tf.keras.models.load_model("MODELS/FNN_baseline.h5")
# load naive bayes
GNB_clf = utils.load_pkl('NB', 'MODELS')
while True:
try:
"get active game data"
game = Live_Game(self.player_name)
game_data, game_lineup = game.live_game()
# print(game_data)
# print(len(game_data))
# ss = StandardScaler()
# start_data = ss.fit_transform(start_data)
# game_data = ss.fit_transform(game_data)
NN_prob = NN_clf.predict(game_data)
LR_prob = LR_clf.predict_proba(game_data)
# BL_prob = BL_clf.predict(game_lineup)
# GNB_prob = GNB_clf.predict_proba(game_data)
print("Neural network: ", NN_prob)
print("Logistic regression: ", LR_prob)
# print("baseline model: ", BL_prob)
# print("Naive bayes: ", GNB_prob)
final_pred = np.divide(np.add(NN_prob, LR_prob), 2)
# print(final_pred)
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
red_win_rate = round(final_pred[0][0], 4)
blue_win_rate = round(final_pred[0][1], 4)
print("Blue team win rate: {}%".format(blue_win_rate*100))
print("Red team win rate: {}%".format(red_win_rate*100))
# predict every 1 minutes
time.sleep(60)
except Exception as e:
print(e)
print("No Active Game Found for User {}.".format(self.player_name))
break
return
if __name__ == '__main__':
player_name = "<NAME>"
GamePredict(player_name).predict()
``` |
{
"source": "jimjimliu/Riot-Watcher",
"score": 2
} |
#### File: valorant/urls/MatchApiUrls.py
```python
from .ValEndpoint import ValEndpoint
class MatchEndpoint(ValEndpoint):
def __init__(self, url: str, **kwargs):
super().__init__(f"/match/v1{url}", **kwargs)
class MatchApiUrls:
by_id = MatchEndpoint("/matches/{match_id}")
matchlist_by_puuid = MatchEndpoint("/matchlists/by-puuid/{puuid}")
``` |
{
"source": "jimjing/MAndM",
"score": 3
} |
#### File: MAndM/code/globalConfig.py
```python
import logging
import ConfigParser
import sys, os
import time
def get_mAndm_root():
# Climb the tree to find out where we are
p = os.path.abspath(__file__)
t = ""
while t != "code":
(p, t) = os.path.split(p)
if p == "" or p == "/":
print "I have no idea where I am; this is ridiculous"
return None
return os.path.join(p, "code")
def setupLogging(loggerLevel=None):
# Set up loggers for printing error messages
class ColorLogFormatter(logging.Formatter):
def __init__(self, *args, **kwds):
super(ColorLogFormatter, self).__init__(*args, **kwds)
self.plain_formatter = logging.Formatter(" [ %(module)s ] %(message)s")
self.debug_formatter = logging.Formatter(" --> [%(levelname)s] (%(processName)s) (%(filename)s, line %(lineno)s): %(message)s")
self.detailed_formatter = logging.Formatter(" --> [%(levelname)s] (%(pathname)s, line %(lineno)s): %(message)s")
def colorize(self, level, string):
if sys.platform in ['win32', 'cygwin']:
# Message with color is not yet supported in Windows
return string
# elif not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty():
# # Only try to colorize if outputting to a terminal
# return string
else:
colors = {'ERROR': 91, 'WARNING': 93, 'INFO': 97, 'DEBUG': 94, 'Level 1': 100, 'Level 2': 105, 'Level 4': 104, 'Level 6': 102, 'Level 8': 101}
return "\033[{0}m{1}\033[0m".format(colors[level], string)
def format(self, record):
if record.levelname == "INFO":
precolor = self.plain_formatter.format(record)
elif record.levelname == "DEBUG":
precolor = self.debug_formatter.format(record)
else:
precolor = self.detailed_formatter.format(record)
return self.colorize(record.levelname, precolor)
mAndm_logger = logging.getLogger('mAndm_logger')
# make sure rospy does not overwrite our logger
#logging.root = logging.getLogger('mAndm_logger')
h = logging.StreamHandler()
f = ColorLogFormatter()
h.setFormatter(f)
if not mAndm_logger.handlers:
mAndm_logger.addHandler(h)
cfg = ConfigParser.ConfigParser()
try:
cfg.read(os.path.join(os.path.dirname(os.path.realpath(__file__)),"global.cfg"))
loggerLevel = cfg.get("logging", "level").lower()
except:
logging.warning("Could not parse global.cfg file; using defaults")
loggerLevel = "info"
if loggerLevel == 'error':
mAndm_logger.setLevel(logging.ERROR)
elif loggerLevel == 'warning':
mAndm_logger.setLevel(logging.WARNING)
elif loggerLevel == 'info':
mAndm_logger.setLevel(logging.INFO)
elif loggerLevel == 'debug':
mAndm_logger.setLevel(logging.DEBUG)
elif loggerLevel == 'notset':
#mAndm_logger.setLevel(logging.NOTSET)
# for some reason logging.NOTSET does not work
mAndm_logger.setLevel(int(1))
else:
mAndm_logger.setLevel(int(loggerLevel))
# Choose the timer func with maximum accuracy for given platform
if sys.platform in ['win32', 'cygwin']:
best_timer = time.clock
else:
best_timer = time.time
# Set-up logging automatically on import
setupLogging()
```
#### File: src/highlevel_planner/file_interface.py
```python
import os
import xml.etree.ElementTree as ET
import yaml
import rospy
from highlevel_planner.configuration_object import ConfigurationObject
class FileInterface:
def __init__(self):
pass
def _loadFile(self, path):
if not os.path.isfile (path):
raise IOError('Cannot find file: {}'.format(path))
with open(path,'r') as f:
output = f.read()
return output
def loadConfigurationFile(self, path):
configuration_object = ConfigurationObject()
data = self._loadFile(path)
root = ET.fromstring(data)
for module_node in root.iter('ModuleState'):
configuration_object.addModuleFromXml(module_node)
for connection_node in root.iter('Connection'):
configuration_object.addConnectionFromXml(connection_node)
return configuration_object
def loadAutFile(self, path):
data = self._loadFile(path)
def loadMappingFile(self, path):
data = self._loadFile(path)
return yaml.load(data)
if __name__ == "__main__":
CFL = FileInterface()
```
#### File: aprilposehandler/scripts/poseFilter.py
```python
import rospy
import tf
import numpy
from functools import partial
from std_msgs.msg import String
import std_msgs.msg
from geometry_msgs.msg import PoseArray, PointStamped, Pose
from apriltags_ros.msg import AprilTagDetectionArray, AprilTagDetection
class PoseFilter:
def __init__(self):
rospy.init_node('pose_filter', anonymous=True)
self.sub_topics = ["/camera/tag_detections","/cameraUP/tag_detections"]
self.subs = {}
self.tf = None
self.camera_data = {}
self.tag_list = []
def initialize(self):
self.tf = tf.TransformListener()
for name in self.sub_topics:
cb = partial(self.callback, topic=name)
self.subs[name] = rospy.Subscriber(name, AprilTagDetectionArray, cb)
self.camera_data[name] = {}
def talker(self):
pub = rospy.Publisher('tag_detections_merged', AprilTagDetectionArray, queue_size=10)
rate = rospy.Rate(1) # 10hz
while not rospy.is_shutdown():
detection_array = AprilTagDetectionArray()
for tag in self.tag_list:
detections_from_cameras = []
for topic in self.camera_data.keys():
if (tag in self.camera_data[topic].keys()):
if (self.camera_data[topic][tag] != None):
detections_from_cameras.append(self.camera_data[topic][tag])
self.camera_data[topic][tag] = None
if (len(detections_from_cameras)>0):
merged_detection = AprilTagDetection()
merged_detection.id = tag
merged_detection.size = detections_from_cameras[0].size
merged_detection.pose.header = detections_from_cameras[0].pose.header
pose_list = [d.pose.pose for d in detections_from_cameras]
merged_detection.pose.pose = self.averagePose (pose_list)
detection_array.detections.append(merged_detection)
pub.publish(detection_array)
rate.sleep()
def averagePose (self, pose_list):
p = Pose()
p.position.x = numpy.mean([pose.position.x for pose in pose_list])
p.position.y = numpy.mean([pose.position.y for pose in pose_list])
p.position.z = numpy.mean([pose.position.z for pose in pose_list])
p.orientation = pose_list[0].orientation
return p
def callback(self, data, topic):
for detection in data.detections:
if (detection.id not in self.tag_list):
self.tag_list.append(detection.id)
self.camera_data[topic][detection.id] = detection
#point_in_world = self.tf.transformPoint("/world", ps)
def listener(self):
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
try:
pf = PoseFilter()
pf.initialize()
pf.talker()
except rospy.ROSInterruptException:
pass
```
#### File: code/test_motion_planner_files/test_action.py
```python
import rospy
import actionlib
import time
from mandm_motion_planner.msg import PlanAndExecuteTrajAction, PlanAndExecuteTrajFeedback, PlanAndExecuteTrajResult
class PlanAndExecuteTrajServer:
def __init__(self):
self.server = actionlib.SimpleActionServer('plan_and_execute_traj', PlanAndExecuteTrajAction, self.execute, False)
print "we are starting...!"
self.feedback = PlanAndExecuteTrajFeedback()
self.result = PlanAndExecuteTrajResult()
print "feedback:" + str(self.feedback)
print type(self.feedback)
print self.feedback.rough_plan_found
print self.feedback.executing_rough_plan
print self.feedback.executing_grasp
print "result:" + str(self.result)
print type(self.result)
print self.result.rough_execution_result
print self.result.rough_planning_result
print self.result.grasp_result
self.server.start()
def execute(self, goal):
# Do lots of awesome groundbreaking robot stuff here
print "we are executing...!"
print goal.tag_name
print goal.target_pose
print goal.action_type
self.result.grasp_result = True
self.server.publish_feedback(self.feedback)
print dir(self.server)
time.sleep(5)
self.feedback.rough_plan_found = True
self.server.publish_feedback(self.feedback)
self.server.set_succeeded(result=self.result)
if __name__ == '__main__':
rospy.init_node('plan_and_execute_traj_server')
server = PlanAndExecuteTrajServer()
rospy.spin()
```
#### File: code/test_motion_planner_files/trajectory_main.py
```python
import rospy
from baxter_pykdl import baxter_kinematics
import time
import baxter_interface
from baxter_interface import CHECK_VERSION
import os, sys
# Climb the tree to find out where we are
p = os.path.abspath(__file__)
t = ""
while t != "code":
(p, t) = os.path.split(p)
if p == "":
print "I have no idea where I am; this is ridiculous"
sys.exit(1)
sys.path.append(os.path.join(p,"code"))
import globalConfig
import logging
mAndm_logger = logging.getLogger('mAndm_logger')
import trajHelper
import numpy
import time
"""
This python file gives an overview of how to do planning
"""
# inputs: current joint location, goal position of gripper
# outputs return trajectory
def set_j(limb, joint_name, desired_pos):
current_position = limb.joint_angle(joint_name)
joint_command = {joint_name: desired_pos}
limb.set_joint_positions(joint_command)
def inverse_kinematics_wrapper(position, orientation=[], seed=[]):
"""
takes in seed in radians. convert to degrees and feed into inverse_kinematics in pyKDL.
then convert outputto radians and return.
"""
seed_degrees = []
if seed:
#seed = seed_degrees # in radians now
for x in seed:
seed_degrees.append(x*180/numpy.pi) # convert from radians to degree
mAndm_logger.log(2,"seed_degrees:" + str(seed_degrees))
if orientation and seed_degrees:
output_angles_degrees = kin.inverse_kinematics(position, orientation=orientation, seed=seed_degrees)
elif orientation:
output_angles_degrees = kin.inverse_kinematics(position, orientation=orientation)
elif seed_degrees:
output_angles_degrees = kin.inverse_kinematics(position, seed=seed_degrees)
else:
output_angles_degrees = kin.inverse_kinematics(position)
mAndm_logger.log(2,"output_angles_degrees:" + str(output_angles_degrees))
output_angles_radians = []
if output_angles_degrees is not None:
for x in output_angles_degrees:
output_angles_radians.append(x/180*numpy.pi)
else:
output_angles_radians = None
mAndm_logger.log(2,"output_angles_radians:" + str(output_angles_radians))
return output_angles_radians
limb = 'right'
# enable robot
rospy.init_node('baxter_kinematics')
rs = baxter_interface.RobotEnable(CHECK_VERSION)
mAndm_logger.info("Enabling robot... ")
rs.enable()
waypointEx = trajHelper.WayPointExecution(limb)
#waypointEx._limb.move_to_neutral()
#do IK
kin = baxter_kinematics(limb)
# find current position
mAndm_logger.debug('kin.forward_position_kinematics():' + str(kin.forward_position_kinematics()))
current_gripper_pos = kin.forward_position_kinematics()
current_gripper_pos = [0.582583, -0.180819, 0.216003] #[x, y, z]
rot = [0.03085, 0.9945, 0.0561, 0.0829]
# later on only test it here. inverse to forward
mAndm_logger.debug('current_gripper_pos:' + str(current_gripper_pos))
mAndm_logger.debug('current_gripper_pos[:3]:' + str(current_gripper_pos[:3]))
mAndm_logger.debug('rot:' + str(rot))
current_joint_angles = [waypointEx._limb.joint_angle(jnt) for jnt in waypointEx._limb.joint_names()]
mAndm_logger.debug('current_joint_angles:' + str(current_joint_angles))
output_angles_radians = kin.inverse_kinematics(current_gripper_pos[:3], orientation=rot, seed=current_joint_angles)
print output_angles_radians
#next_joint_angles = inverse_kinematics_wrapper(current_gripper_pos[:3], seed=current_joint_angles)
#mAndm_logger.debug('next_joint_angles:' + str(next_joint_angles))
output_angles_radians = ( output_angles_radians + numpy.pi) % (2 * numpy.pi ) - numpy.pi
#output_angles_radians = numpy.unwrap(output_angles_radians, axis=0) # wrap around 0 to 2 pi
print output_angles_radians
# now make sure the angles are within range
# radians
jointRanges = {'S0':[-1.7016 ,+1.7016],\
'S1':[-2.147 ,+1.047],\
'E0':[-3.0541 , +3.0541],\
'E1':[-0.05 ,+2.618 ],\
'W0':[-3.059 ,+3.059],\
'W1':[-1.5707 ,+2.094],\
'W2':[-3.059 ,+3.059]}
# some angles are out of range.. don't know what to do..
# i guess try their IK example provided. not with pyKDL
next_joint_angles_dict = dict(zip(waypointEx._limb.joint_names(), output_angles_radians))
mAndm_logger.debug('next_joint_angles_dict:' + str(next_joint_angles_dict))
current_gripper_pos = kin.forward_position_kinematics(next_joint_angles_dict)
mAndm_logger.debug('from inverse_gripper_pos:' + str(current_gripper_pos))
# traj accuracy problem
waypointEx._limb.set_joint_positions(next_joint_angles_dict) #need to check proximity yourselves
time.sleep(10.0)
current_joint_angles = waypointEx._limb.joint_angles()
mAndm_logger.debug('current_joint_angles:' + str(current_joint_angles))
current_gripper_pos = kin.forward_position_kinematics(current_joint_angles)
mAndm_logger.debug('from inverse_gripper_pos:' + str(current_gripper_pos))
# constraints wasn't too right, as in it is over (KDL returns things that cannot be executed.)
# # !!! replace with random pose?
# pos = [0.582583, -0.180819, 0.216003] #[x, y, z]
# #pos = [0.582583, -0.90819, 0.216003] #[x, y, z]
# rot = [0.03085, 0.9945, 0.0561, 0.0829] # [i, j, k, w]
# stepPose = []
# noOfSteps = 10
# inverseTrialsThres = 10
# # now separate pos into steps
# for idx, coordPose in enumerate(pos):
# stepPose.append(numpy.linspace(current_gripper_pos[idx], coordPose, noOfSteps))
# mAndm_logger.debug('stepPose:' + str(stepPose))
# # now append waypoints
# #mAndm_logger.debug("waypointEx._limb.joint_names():" + str(waypointEx._limb.joint_names()))
# #mAndm_logger.debug("waypointEx._limb.joint_angles():" + str(waypointEx._limb.joint_angles()))
# current_joint_angles = [waypointEx._limb.joint_angle(jnt) for jnt in waypointEx._limb.joint_names()]
# step_joint_angles = current_joint_angles
# for x in range(noOfSteps):
# # can also add seed
# currentStep = [subArray[x] for subArray in stepPose]
# mAndm_logger.log(4, 'currentStep:' + str(currentStep))
# mAndm_logger.log(6, 'step_joint_angles:' + str(step_joint_angles))
# #mAndm_logger.debug(kin.inverse_kinematics(pos, orientation=rot, seed=cur_cmd)) # return joint angles
# next_joint_angles = inverse_kinematics_wrapper(currentStep, seed=step_joint_angles)
# mAndm_logger.log(6, "next_joint_angles:" + str(next_joint_angles)) # return joint angles
# # inverseTrailsCount = 0
# # while inverseTrailsCount < inverseTrialsThres:
# # next_joint_angles = inverse_kinematics_wrapper(currentStep, seed=step_joint_angles)
# # #next_joint_angles = inverse_kinematics_wrapper(currentStep)
# # mAndm_logger.log(6, "next_joint_angles:" + str(next_joint_angles)) # return joint angles
# # # try a couple times.
# # if next_joint_angles is not None:
# # inverseTrailsCount += inverseTrialsThres
# # else:
# # inverseTrailsCount += 1
# # add new joints if it exists
# if next_joint_angles is not None:
# waypointEx.addWaypoint(next_joint_angles)
# step_joint_angles = next_joint_angles
# waypointEx.playWaypoints(timeout=20.0)
# mAndm_logger.debug('kin.forward_position_kinematics():' + str(kin.forward_position_kinematics()))
# rospy.spin()
# # radians
# jointRanges = {'S0':[-1.7016 ,+1.7016],\
# 'S1':[-2.147 ,+1.047],\
# 'E0':[-3.0541 , +3.0541],\
# 'E1':[-0.05 ,+2.618 ],\
# 'W0':[-3.059 ,+3.059],\
# 'W1':[-1.5707 ,+2.094],\
# 'W2':[-3.059 ,+3.059]}
# # what have done
# # try different methods to move arm (trajectory executor with server/client)
# # ended up with just move_to_joint positions (blocking)
# # problems
# # not accurate end pos: accuracy problem (set_positions - non-blocking)
# # radians/degrees not sure (testing with forward inverse kinamatices that requires
# # modification of the library)
# # moving on
# # check object/arm collisions
# # RRT
# # also find out how far each arm can reach
```
#### File: verified_torque_controller/scripts/verified_torque_control_motion.py
```python
import rospy
import actionlib
import os.path
import matlab.engine
from dynamic_reconfigure.server import (
Server,
)
from std_msgs.msg import (
Empty,
)
import baxter_interface
from baxter_examples.cfg import (
JointSpringsExampleConfig,
)
from baxter_interface import CHECK_VERSION
from baxter_pykdl import baxter_kinematics
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from verified_torque_controller.msg import verified_motionAction, verified_motionFeedback, verified_motionResult
class verifiedMotionServer:
def __init__(self, reconfig_server):
self.server = actionlib.SimpleActionServer('execute_verified_motion', verified_motionAction, self.execute, False)
self.feedback = None
self.result = None
self._dyn = reconfig_server
# control parameters
self._rate = 1000.0 # Hz
self._missed_cmds = 20.0 # Missed cycles before triggering timeout
# Include gripper(s)
#self._left_gripper = baxter_interface.gripper.Gripper("left")
# Limbs,limb parameters
self.limb = {}
self.limb["left_arm"] = baxter_interface.Limb('left')
self.limb["right_arm"] = baxter_interface.Limb('right')
self._left_arm = baxter_interface.Limb('left')
self._kin = {}
self._kin["left_arm"] = baxter_kinematics('left')
self._kin["right_arm"] = baxter_kinematics('right')
self._springs = dict()
self._damping = dict()
self._start_angles = dict()
self._goal_angles = dict()
self._spring_modifier = []
self._damping_modifier = []
self._sim_time = []
# Initialize Matlab
# Start matlab
print("\n Starting MATLAB...")
self.eng = matlab.engine.start_matlab() # start matlab
print("Matlab started.")
print("Initializing robotic toolbox files ...")
self.eng.baxter_matlab_robotics_toolbox_setup(nargout = 0)
print("Initialized.")
# Is this necessary?
# verify robot is enabled
print("Getting robot state... ")
self._rs = baxter_interface.RobotEnable(CHECK_VERSION)
self._init_state = self._rs.state().enabled
print("Enabling robot... ")
self._rs.enable()
#self.move_to_neutral('left_arm')
#self.move_to_neutral('right_arm')
print('Starting server ...')
self.server.start()
def execute(self, goal):
print('======= motion request recieved from client ...')
self.feedback = verified_motionFeedback()
self.result = verified_motionResult()
#self.move_to_neutral(goal.arm)
self._spring_modifier = goal.motion_parameters[0]
self._damping_modifier = goal.motion_parameters[1]
self._sim_time = goal.motion_parameters[2]
#print(self._spring_modifier)
#print(self._damping_modifier)
# Generate goal angles (inverse kinematics)
self._start_angles = self.limb[goal.arm].joint_angles()
found_solution = self.generate_target_angles(goal.arm,goal.EFF_movement)
if not found_solution:
self.result.error_message = "No inverse kinematics solution found."
self.server.set_aborted(result=self.result)
return
# Verify the motion
# Compute flowstar file - verify the controller
print("Computing Flow* file, verifying the motion ...")
#self._update_parameters(goal.arm)
#timeEnd = matlab.double([self._sim_time])
#springMod = matlab.double([self._spring_modifier])
#dampingMod = matlab.double([self._damping_modifier])
#maxMinValues = self.eng.generate_flowstar_file(self._start_angles,self._goal_angles,self._springs,self._damping,timeEnd,springMod, dampingMod)
#print(maxMinValues)
print("... verified.")
# set control rate
control_rate = rospy.Rate(self._rate)
# for safety purposes, set the control rate command timeout.
# if the specified number of command cycles are missed, the robot
# will timeout and disable
self.limb[goal.arm].set_command_timeout((1.0 / self._rate) * self._missed_cmds)
#print('you made it this far?')
ctrlTime = self._sim_time # in seconds
ctrlTrigger = True
#start = rospy.Time.now()
startTime = rospy.get_time()
print("Implementing torque control ...")
dataSavePath = '/home/scotto/Documents/MATLAB/flowstar_plots'
fileName = 'python_EFF_motion_data'
timeVec = []
effX = []
effY = []
effZ = []
while ctrlTrigger:
#nowTime = rospy.Time.now()
nowTime = rospy.get_time()
#timeDiff = nowTime.secs - startTime.secs
timeDiff = nowTime - startTime
# Record time/position data
timeVec.append(timeDiff)
currentEFFposition = self.limb[goal.arm].endpoint_pose()
effX.append(currentEFFposition['position'].x)
effY.append(currentEFFposition['position'].y)
effZ.append(currentEFFposition['position'].z)
# End recording stuffs
if timeDiff >= ctrlTime:
ctrlTrigger = False
self._update_forces(goal.arm)
control_rate.sleep()
print("Exiting torque control ...")
self.limb[goal.arm].exit_control_mode()
print("Torqe control complete.")
self.result.motion_complete = True
self.server.set_succeeded(result=self.result)
# Record position and time data
# Concatenate data
concatenatedData = []
for i in range(0, len(timeVec)):
concatenatedData.append(str(timeVec[i]) + ' ' + str(effX[i]) + ' ' + str(effY[i]) + ' ' + str(effZ[i]))
completeName = os.path.join(dataSavePath,fileName + '.txt')
file1 = open(completeName, "w+")
for item in concatenatedData:
file1.write("%s\n" % item)
file1.close()
def generate_target_angles(self,arm,targetXYZ):
print("Generating target joint angles ...")
# Matlab - inverse kinematics
# Find EFF Position
Eff_Start_baxter_7 = self.limb[arm].endpoint_pose() # EFF Position here is different than that used by the DH parameters
#print('Baxter start EFF')
#print(Eff_Start_baxter_7)
#EFF_left = self._left_arm.endpoint_pose()
#print('Alternate EFF:')
#print(EFF_left)
#print('Arm start angles:')
#print(self.limb[arm].joint_angles())
move_x = targetXYZ[0]
move_y = targetXYZ[1]
move_z = targetXYZ[2]
pykdl_end_point = [Eff_Start_baxter_7['position'].x + move_x, Eff_Start_baxter_7['position'].y + move_y, Eff_Start_baxter_7['position'].z + move_z]
pykdl_end_orientation = [Eff_Start_baxter_7['orientation'].x, Eff_Start_baxter_7['orientation'].y, Eff_Start_baxter_7['orientation'].z, Eff_Start_baxter_7['orientation'].w]
#print('pykdl end point')
#print(pykdl_end_point)
#print('pykdl end orientation')
#print(pykdl_end_orientation)
#print("KDL forward kinematics test:")
#print(self._kin[arm].forward_position_kinematics())
pykdl_end_angles = self._kin[arm].inverse_kinematics(pykdl_end_point,pykdl_end_orientation)
#print('PYKDL angles:')
#print(pykdl_end_angles)
if len(pykdl_end_angles) == 0:
# Error - no solution found
ikin_solution_found = False
return ikin_solution_found
else:
ikin_solution_found = True
angles = self.limb[arm].joint_angles()
if arm == 'left_arm':
angles['left_s0'] = pykdl_end_angles[0]
angles['left_s1'] = pykdl_end_angles[1]
angles['left_e0'] = pykdl_end_angles[2]
angles['left_e1'] = pykdl_end_angles[3]
angles['left_w0'] = pykdl_end_angles[4]
angles['left_w1'] = pykdl_end_angles[5]
angles['left_w2'] = pykdl_end_angles[6]
elif arm == 'right_arm':
angles['right_s0'] = pykdl_end_angles[0]
angles['right_s1'] = pykdl_end_angles[1]
angles['right_e0'] = pykdl_end_angles[2]
angles['right_e1'] = pykdl_end_angles[3]
angles['right_w0'] = pykdl_end_angles[4]
angles['right_w1'] = pykdl_end_angles[5]
angles['right_w2'] = pykdl_end_angles[6]
self._goal_angles = angles
print("Goal angles determined.")
#print('Final angle positions:')
#print(angles)
return ikin_solution_found
def _update_forces(self,arm):
"""
Calculates the current angular difference between the start position
and the current joint positions applying the joint torque spring forces
as defined on the dynamic reconfigure server.
"""
# get latest spring constants
self._update_parameters(arm)
# disable cuff interaction
#self._pub_cuff_disable.publish()
# create our command dict
cmd = dict()
# record current angles/velocities
cur_pos = self.limb[arm].joint_angles()
cur_vel = self.limb[arm].joint_velocities()
# calculate current forces
for joint in self._start_angles.keys():
# spring portion
#cmd[joint] = self._springs[joint] * (self._start_angles[joint] - cur_pos[joint])
cmd[joint] = self._springs[joint] * self._spring_modifier * (self._goal_angles[joint] - cur_pos[joint])
# damping portion
cmd[joint] -= self._damping[joint] * self._damping_modifier * cur_vel[joint]
# command new joint torques
self.limb[arm].set_joint_torques(cmd)
def _update_parameters(self,arm):
for joint in self.limb[arm].joint_names():
self._springs[joint] = self._dyn.config[joint[-2:] + '_spring_stiffness']
self._damping[joint] = self._dyn.config[joint[-2:] + '_damping_coefficient']
def move_to_neutral(self,arm):
"""
Moves the limb to neutral location.
"""
print("Moving to the neutral arm position")
self.limb[arm].move_to_neutral()
def clean_shutdown(self):
"""
Switches out of joint torque mode to exit cleanly
"""
print("\nExiting process...")
self.limb["left_arm"].exit_control_mode()
self.limb["right_arm"].exit_control_mode()
#if not self._init_state and self._rs.state().enabled:
# print("Disabling robot...")
# self._rs.disable()
def main():
print("Initializing node... ")
rospy.init_node("verified_arm_motion")
dynamic_cfg_srv = Server(JointSpringsExampleConfig, lambda config, level: config)
server = verifiedMotionServer(dynamic_cfg_srv)
rospy.on_shutdown(server.clean_shutdown)
rospy.spin()
if __name__ == "__main__":
main()
``` |
{
"source": "jimjkelly/celery-simple-elasticsearch",
"score": 2
} |
#### File: celery-simple-elasticsearch/celery_simple_elasticsearch/tasks.py
```python
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.db.models.loading import get_model
from django.utils.importlib import import_module
from celery.utils.log import get_task_logger
from .conf import settings
if settings.CELERY_SIMPLE_ELASTICSEARCH_TRANSACTION_SAFE \
and not getattr(settings, 'CELERY_ALWAYS_EAGER', False):
from djcelery_transactions import PostTransactionTask as Task
else:
from celery.task import Task # noqa
logger = get_task_logger(__name__)
class CelerySimpleElasticSearchSignalHandler(Task):
using = settings.CELERY_SIMPLE_ELASTICSEARCH_DEFAULT_ALIAS
max_retries = settings.CELERY_SIMPLE_ELASTICSEARCH_MAX_RETRIES
default_retry_delay = settings.CELERY_SIMPLE_ELASTICSEARCH_RETRY_DELAY
def split_identifier(self, identifier, **kwargs):
"""
Break down the identifier representing the instance.
Converts 'notes.note.23' into ('notes.note', 23).
"""
bits = identifier.split('.')
if len(bits) < 2:
logger.error("Unable to parse object "
"identifer '%s'. Moving on..." % identifier)
return (None, None)
pk = bits[-1]
# In case Django ever handles full paths...
object_path = '.'.join(bits[:-1])
return (object_path, pk)
def get_method(self, method_identifier):
"""
Given a method identifier, return the method
"""
class_string, method_string = method_identifier.rsplit('.', 1)
try:
class_obj = self.get_model_class(class_string)
except ImproperlyConfigured:
# We assume that just means this isn't a Django model - try
# loading it as a module:
class_obj = import_module(class_string)
method = getattr(class_obj, method_string, None)
if not method:
msg = 'Could not get method from "{}"'.format(method_identifier)
logger.error(msg)
raise ValueError(msg)
return method
def get_model_class(self, object_path, **kwargs):
"""
Fetch the model's class in a standarized way.
"""
bits = object_path.split('.')
app_name = '.'.join(bits[:-1])
classname = bits[-1]
model_class = get_model(app_name, classname)
if model_class is None:
raise ImproperlyConfigured("Could not load model '%s'." %
object_path)
return model_class
def get_instance(self, model_class, pk, **kwargs):
"""
Fetch the instance in a standarized way.
"""
instance = None
try:
instance = model_class._default_manager.get(pk=pk)
except model_class.DoesNotExist:
logger.error("Couldn't load %s.%s.%s. Somehow it went missing?" %
(model_class._meta.app_label.lower(),
model_class._meta.object_name.lower(), pk))
except model_class.MultipleObjectsReturned:
logger.error("More than one object with pk %s. Oops?" % pk)
return instance
def run(self, action, identifier, instantiator=None, **kwargs):
"""
Trigger the actual index handler depending on the
given action.
"""
# First get the object path and pk (e.g. ('notes.note', 23))
object_path, pk = self.split_identifier(identifier, **kwargs)
if object_path is None or pk is None:
msg = "Couldn't handle object with identifier %s" % identifier
logger.error(msg)
raise ValueError(msg)
# Then get the model class for the object path
model_class = self.get_model_class(object_path, **kwargs)
if instantiator:
instantiator_method = self.get_method(instantiator)
else:
instantiator_method = self.get_instance
instance = instantiator_method(model_class, pk)
if hasattr(model_class, 'get_index_name'):
current_index_name = model_class.get_index_name()
else:
current_index_name = settings.ELASTICSEARCH_INDEX
action_method = self.get_method(action)
try:
action_method(instance)
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
else:
logger.debug('Successfully executed {} on {} for index {}'.format(
action,
identifier,
current_index_name,
))
class CelerySimpleElasticSearchUpdateIndex(Task):
"""
A celery task class to be used to call the update_index management
command from Celery.
"""
def run(self, apps=None, **kwargs):
defaults = {
'batchsize': settings.CELERY_SIMPLE_ELASTICSEARCH_COMMAND_BATCH_SIZE,
'age': settings.CELERY_SIMPLE_ELASTICSEARCH_COMMAND_AGE,
'remove': settings.CELERY_SIMPLE_ELASTICSEARCH_COMMAND_REMOVE,
'using': [settings.CELERY_SIMPLE_ELASTICSEARCH_DEFAULT_ALIAS],
'workers': settings.CELERY_SIMPLE_ELASTICSEARCH_COMMAND_WORKERS,
'verbosity': settings.CELERY_SIMPLE_ELASTICSEARCH_COMMAND_VERBOSITY,
}
defaults.update(kwargs)
if apps is None:
apps = settings.CELERY_SIMPLE_ELASTICSEARCH_COMMAND_APPS
# Run the update_index management command
logger.info("Starting update index")
call_command('--rebuild', *apps, **defaults)
logger.info("Finishing update index")
```
#### File: celery-simple-elasticsearch/celery_simple_elasticsearch/utils.py
```python
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.db import connection, transaction
from .conf import settings
def get_update_task(task_path=None):
default_task = settings.CELERY_SIMPLE_ELASTICSEARCH_DEFAULT_TASK
import_path = task_path or default_task
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Task = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
return Task()
def enqueue_task(action, instance, instantiator=None):
"""
Common utility for enqueing a task for the given action and
model instance. Optionally provide an instantiator that handles
instance instantiation.
"""
def submit_task():
if transaction.get_connection().in_atomic_block:
with transaction.atomic():
task.delay(action, identifier, instantiator)
else:
task.delay(action, identifier, instantiator)
action = get_method_identifier(action)
identifier = get_object_identifier(instance)
if instantiator:
instantiator = get_method_identifier(instantiator)
kwargs = {}
if settings.CELERY_SIMPLE_ELASTICSEARCH_QUEUE:
kwargs['queue'] = settings.CELERY_SIMPLE_ELASTICSEARCH_QUEUE
if settings.CELERY_SIMPLE_ELASTICSEARCH_COUNTDOWN:
kwargs['countdown'] = settings.CELERY_SIMPLE_ELASTICSEARCH_COUNTDOWN
task = get_update_task()
if hasattr(connection, 'on_commit'):
connection.on_commit(
lambda: submit_task()
)
else:
submit_task()
def get_object_identifier(obj):
"""
This function will provide a dot notated reference to the
item to identify.
"""
return u'{}.{}.{}'.format(
obj._meta.app_label,
obj.__class__.__name__,
obj.id
)
def get_method_identifier(identify):
"""
This function provides a dot notated reference to a bound
function
"""
return u'{}.{}.{}'.format(
identify.im_self._meta.app_label,
identify.im_self.__name__,
identify.im_func.__name__
)
``` |
{
"source": "jimj/sopelmodules",
"score": 2
} |
#### File: jimj/sopelmodules/mongosupport.py
```python
from pymongo import MongoClient
import sopel.module
def setup(bot):
mongoclient = MongoClient()
bot.mongodb = mongoclient.ircbot
@sopel.module.commands('findone')
@sopel.module.require_admin
def findone(bot, trigger):
collection = bot.mongodb[trigger.group(2)]
doc = collection.find_one()
bot.reply('found %s' % doc)
```
#### File: jimj/sopelmodules/quote.py
```python
import random
import re
from sopel.module import commands, example
#Matches: <@+nickNames123>
_NICKNAME = '(<[^\s]+>)'
@commands('quote')
def quote(bot, trigger):
quotes = bot.mongodb.quotes
input = trigger.group(2) if trigger.group(2) else ''
quotable = re.findall(_NICKNAME, input)
if quotable:
who, quote = parse_quotable_params(input)
store_quote(quotes, who, quote)
bot.reply('quote stored.')
else:
nick, search = parse_search_params(input)
bot.say(get_random_quote(quotes, nick, search))
def parse_search_params(params):
param_re = '(?P<nick>\w+)?\s?(?P<search>\/.*\/)?$'
params = re.match(param_re, params)
if params:
search = params.group('search')
if search:
search = search.strip().replace('/','')
return (params.group('nick'), search)
else:
return (None, None)
def parse_quotable_params(params):
param_re = '(?P<realname>\w+)?\s*[\d:]{0,5}\s*(?P<quote><.*)'
params = re.match(param_re, params)
quoted = [params.group('realname')] if params.group('realname') else []
quote = params.group('quote')
nicks = re.findall(_NICKNAME, quote)
if nicks:
nicks = [re.subn(r'[@<>+]', '', n)[0] for n in nicks]
quoted = [nick.lower() for nick in set(quoted + nicks)]
return (quoted, quote)
def get_random_quote(quotes, nick, search):
query = {}
if nick:
query['nick'] = nick.lower()
if search:
query['quote'] = re.compile(search, re.IGNORECASE)
ids = quotes.find(query, [])
num_quotes = ids.count()
if num_quotes == 0:
return "No quotes found."
quote = random.randint(0, num_quotes - 1)
quote = quotes.find({'_id': ids[quote]['_id']}, ['quote'])
return quote[0]['quote']
def store_quote(quotes, quoted, quote):
quote_doc = {
'nick': quoted,
'network': 'slashnet',
'quote': quote
}
quotes.insert(quote_doc)
``` |
{
"source": "jimjudd/imitationbot",
"score": 2
} |
#### File: jimjudd/imitationbot/log_messages.py
```python
import requests
import logging
import sys
from rate_check import rate_check
import redis
from urlparse import urlparse
import os
import time
import csv
from pymongo import MongoClient
import datetime
from pprint import pprint
import json
#Setup Logger
logger = logging.getLogger(__name__)
#Set Environment Variables
ROOM_ID = os.getenv('ROOM_ID')
MONGODB_URI = os.getenv('MONGODB_URI')
USER_KEY = os.getenv('USER_KEY')
REDIS_URL = os.getenv('REDIS_URL')
base_url = "https://api.hipchat.com/v2/"
message_log_file = 'message_log.csv'
#get messages and add to csv
def get_messages():
uri = urlparse(REDIS_URL)
r = redis.StrictRedis(host = uri.hostname, port = uri.port, password = <PASSWORD>)
last_run = r.get('last_run')
if last_run:
pass
else:
last_run = 1
now = int(time.time())
f = open(message_log_file, 'wb')
writer = csv.writer(f)
writer.writerow( ('user_id', 'user_name', 'message'))
f.close()
url = base_url + 'room/' + str(ROOM_ID) + '/history'
payload = {
'auth_token': USER_KEY,
'max-results': 999,
'reverse': 'true',
'date': now,
'include_deleted': 'false',
'end-date': last_run
}
get_room_history = requests.get(url, params = payload)
rate_check(get_room_history.headers)
write_to_log(get_room_history.text)
response = json.loads(get_room_history.text)
while 'next' in response['links']:
url = response['links']['next']
payload = {'auth_token': USER_KEY}
get_room_history = requests.get(url, params = payload)
rate_check(get_room_history.headers)
response = get_room_history.json()
write_to_log(get_room_history.text)
logger.debug('iteration ' + str(response['startIndex']))
time.sleep(.1)
r.set('last_run', now)
def write_to_log(response):
message_log = open(message_log_file, 'ab')
log = json.loads(response)
for item in log['items']:
if 'id' in item['from']:
user_id = item['from']['id']
name = item['from']['name']
name = name.encode('utf-8').strip()
message = item['message']
if message:
message = message.encode('utf-8').strip()
else:
message = ''
messages = open(message_log_file, 'ab')
unicodewriter = csv.writer(messages)
unicodewriter.writerow((user_id, name, message))
messages.close()
def update_trainer():
client = MongoClient(MONGODB_URI)
db = client.get_default_database()
collection = client.messages
posts = db.posts
user_ids = []
#https://api.mongodb.com/python/current/tutorial.html
with open(message_log_file, 'rb') as message_file:
messagereader = csv.DictReader(message_file)
for row in messagereader:
user_id = row['user_id']
if user_id not in user_ids:
user_ids.append(user_id)
for user_id in iter(user_ids):
logger.debug('starting user_id: ' + str(user_id))
messages = ''
entry = posts.find_one({"_id": user_id})
if entry:
messages = entry['text']
messages = messages.encode('utf-8').strip()
message_file.seek(0)
for row in messagereader:
if row['user_id'] == str(user_id):
messages += '\n'
messages += row['message']
post = {
'author': 'imitationBot',
'text': messages,
'tags': ['messages', str(user_id)],
'date': datetime.datetime.utcnow(),
'_id': user_id
}
post_id = posts.update_one({'_id': user_id}, {'$set':post}, upsert = True)
logger.debug('inserted mongodb document ' + str(user_id))
logger.info('trainers updated')
os.remove(message_log_file)
if __name__ == '__main__':
get_messages()
update_trainer()
```
#### File: jimjudd/imitationbot/markov_gen.py
```python
import markovify
from pymongo import MongoClient
import logging
import os
import re
import nltk
import requests
logger = logging.getLogger(__name__)
MONGODB_URI = os.getenv('MONGODB_URI')
USER_KEY = os.getenv('USER_KEY')
class POSifiedText(markovify.Text):
def word_split(self, sentence):
words = re.split(self.word_split_pattern, sentence)
words = [ "::".join(tag) for tag in nltk.pos_tag(words) ]
return words
def word_join(self, words):
sentence = " ".join(word.split("::")[0] for word in words)
return sentence
def gen_markov(user_id):
client = MongoClient(MONGODB_URI)
db = client.get_default_database()
posts = db.posts
entry = posts.find_one({"_id": str(user_id)})
if entry:
corpus = entry['text']
text_model = markovify.Text(corpus)
return text_model.make_sentence()
else:
logger.info('No corpus for user' + str(user_id))
``` |
{
"source": "jimjyang/concepttordf",
"score": 3
} |
#### File: concepttordf/concepttordf/betydningsbeskrivelse.py
```python
from abc import ABC
from enum import Enum
class RelationToSource(Enum):
sitatFraKilde = "quoteFromSource"
basertPåKilde = "basedOnSource"
egendefinert = "noSource"
class Betydningsbeskrivelse(ABC):
def __init__(self, b: dict = None):
if b is not None:
if 'text' in b:
self.text = b['text']
if 'remark' in b:
self.remark = b['remark']
if 'scope' in b:
self.scope = b['scope']
if 'relationtosource' in b:
self.relationtosource = b['relationtosource']
if 'source' in b:
self.source = b['source']
if 'modified' in b:
self.modified = b['modified']
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def text(self) -> dict:
return self._text
@text.setter
def text(self, text: dict):
self._text = text
@property
def remark(self) -> dict:
return self._remark
@remark.setter
def remark(self, remark: dict):
self._remark = remark
@property
def scope(self) -> dict:
return self._scope
@scope.setter
def scope(self, scope: dict):
self._scope = scope
@property
def relationtosource(self) -> str:
return self._relationtosource
@relationtosource.setter
def relationtosource(self, relationtosource: str):
self._relationtosource = relationtosource
@property
def source(self) -> dict:
return self._source
@source.setter
def source(self, source: dict):
self._source = source
@property
def modified(self) -> dict:
return self._modified
@modified.setter
def modified(self, modified: dict):
self._modified = modified
```
#### File: concepttordf/concepttordf/contact.py
```python
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
DCT = Namespace('http://purl.org/dc/terms/')
SKOSXL = Namespace('http://www.w3.org/2008/05/skos-xl#')
VCARD = Namespace('http://www.w3.org/2006/vcard/ns#')
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
SKOSNO = Namespace('http://difi.no/skosno#')
class Contact:
"""A class representing a contact """
def __init__(self, contact: dict = None):
self._g = Graph()
if contact is not None:
if 'name' in contact:
self._name = contact['name']
if 'email' in contact:
self._email = contact['email']
if 'url' in contact:
self._url = contact['url']
if 'telephone' in contact:
self._telephone = contact['telephone']
@property
def name(self) -> dict:
return self._name
@name.setter
def name(self, name: dict):
self._name = name
@property
def email(self) -> str:
return self._email
@email.setter
def email(self, email: str):
self._email = email
@property
def telephone(self) -> str:
return self._telephone
@telephone.setter
def telephone(self, telephone: str):
self._telephone = telephone
@property
def url(self) -> str:
return self._url
@url.setter
def url(self, url: str):
self._url = url
def to_graph(self) -> Graph:
self._add_contact_to_graph()
return self._g
def to_rdf(self, format='turtle') -> str:
"""Maps the contact to rdf and returns a serialization
as a string according to format"""
return self.to_graph().serialize(format=format, encoding='utf-8')
# -----
def _add_contact_to_graph(self):
"""Adds the concept to the Graph _g"""
self._g.bind('vcard', VCARD)
if hasattr(self, 'identifier'):
_self = URIRef(self.identifier)
else:
_self = BNode()
self._g.add((_self, RDF.type, VCARD.Organization))
# name
if hasattr(self, 'name'):
for key in self.name:
self._g.add((_self, VCARD.hasOrganizationName,
Literal(self.name[key], lang=key)))
# email
if hasattr(self, 'email'):
self._g.add((_self, VCARD.hasEmail,
URIRef('mailto:' + self.email)))
# telephone
if hasattr(self, 'telephone'):
self._g.add((_self, VCARD.hasTelephone,
URIRef('tel:' + self.telephone)))
# url
if hasattr(self, 'url'):
self._g.add((_self, VCARD.hasURL,
URIRef(self.url)))
``` |
{
"source": "jimkiiru/james-kiiru-bc17-week1",
"score": 3
} |
#### File: james-kiiru-bc17-week1/day2/OOP.py
```python
class Account(object):
def __init__(self, holder, number, balance,credit_line=1500):
self.Holder = holder
self.Number = number
self.Balance = balance
self.CreditLine = credit_line
def deposit(self, amount):
self.Balance = amount
def withdraw(self, amount):
if(self.Balance - amount < -self.CreditLine):
# coverage insufficient
return False
else:
self.Balance -= amount
return True
def balance(self):
return self.Balance
def transfer(self, target, amount):
if(self.Balance - amount < -self.CreditLine):
# coverage insufficient
return False
else:
self.Balance -= amount
target.Balance += amount
return True
```
#### File: james-kiiru-bc17-week1/day3/HttpAndWeb.py
```python
import requests
def Get(url, PostId):
try:
isinstance(int(PostId), int)
if int(PostId) <= 100 and int(PostId) > 0:
r = requests.get(url + PostId)
return r
else:
print("Number must be between 1 and 100")
except ValueError as err:
raise(err)
return "No Results"
def Post(PostUrl,title, body, userId=11):
Post= {
'title': title,
'body': body,
'userId': userId
}
request = requests.post(PostUrl, data=Postdata)
return request
def main():
print("Python HTTP API command line app %s\n" %("-"*31))
print("Simple Python HTTP API command line app")
url = "https://jsonplaceholder.typicode.com/posts/"
PostId = input("Enter a number between 1 and 100: ")
get = Get(url,PostId)
print("GET Response data\n\t%s\n%s\n\tStatus code\n\t%s\n%s\n\tHeaders\n\t%s\n%s" %
("-"*17,get.text, "-"*11, get.status_code,"-"*7, get.headers))
title = input("Enter a title for your post: ")
body = input("Enter a body for your post: ")
post = Post(url,title,body)
print("\tPOST Response data\n\t%s\n%s\n\tStatus code\n\t%s\n%s\n\tHeaders\n\t%s\n%s" %
("-"*17,post.text, "-"*11, post.status_code,"-"*7, post.headers))
if __name__ == '__main__':
main()
```
#### File: james-kiiru-bc17-week1/day4/BinarySearch.py
```python
class BinarySearch(list):
def __init__(self, a, b):
self.a = a
self.b = b
def search(self, itemToSearch):
i = 0
lowerItemIndex = 0
higherItemIndex = self.a - 1
midpoint = lowerItemIndex + (higherItemIndex - lowerItemIndex)//2
if itemToSearch == midpoint:
i = i + 1
return {i: midpoint}
elif itemToSearch > midpoint:
# item is in the upper half of the list
lowerItemIndex = midpoint + 1
midpoint = lowerItemIndex + (higherItemIndex - lowerItemIndex)//2
self.search(itemToSearch)
elif itemToSearch < midpoint:
# item is in the lower half of the listt
higherItemIndex = midpoint - 1
midpoint = lowerItemIndex + (higherItemIndex - lowerItemIndex)//2
self.search(itemToSearch)
``` |
{
"source": "JimKing100/Decision_Tree",
"score": 4
} |
#### File: Decision_Tree/final/decision_tree1.py
```python
import numpy as np
# A decision tree node
class Node:
def __init__(self, gini, num_samples, num_samples_per_class, pred_class):
self.gini = gini
self.num_samples = num_samples
self.num_samples_per_class = num_samples_per_class
self.pred_class = pred_class
self.feature_index = 0
self.threshold = 0
self.left = None
self.right = None
# Print a decision tree
def print_tree(self, feature_names, class_names, show_details):
lines, _, _, _ = self._print_aux(
feature_names, class_names, show_details, root=True
)
for line in lines:
print(line)
# See https://stackoverflow.com/a/54074933/1143396 for similar code.
def _print_aux(self, feature_names, class_names, show_details, root=False):
is_leaf = not self.right
if is_leaf:
lines = [class_names[self.pred_class]]
else:
lines = [
"{} < {:.2f}".format(feature_names[self.feature_index], self.threshold)
]
if show_details:
lines += [
"gini = {:.2f}".format(self.gini),
"samples = {}".format(self.num_samples),
str(self.num_samples_per_class),
]
width = max(len(line) for line in lines)
height = len(lines)
if is_leaf:
lines = ["║ {:^{width}} ║".format(line, width=width) for line in lines]
lines.insert(0, "╔" + "═" * (width + 2) + "╗")
lines.append("╚" + "═" * (width + 2) + "╝")
else:
lines = ["│ {:^{width}} │".format(line, width=width) for line in lines]
lines.insert(0, "┌" + "─" * (width + 2) + "┐")
lines.append("└" + "─" * (width + 2) + "┘")
lines[-2] = "┤" + lines[-2][1:-1] + "├"
width += 4 # for padding
if is_leaf:
middle = width // 2
lines[0] = lines[0][:middle] + "╧" + lines[0][middle + 1:]
return lines, width, height, middle
# If not a leaf, must have two children.
left, n, p, x = self.left._print_aux(feature_names, class_names, show_details)
right, m, q, y = self.right._print_aux(feature_names, class_names, show_details)
top_lines = [n * " " + line + m * " " for line in lines[:-2]]
# fmt: off
middle_line = x * " " + "┌" + (n - x - 1) * "─" + lines[-2] + y * "─" + "┐" + (m - y - 1) * " "
bottom_line = x * " " + "│" + (n - x - 1) * " " + lines[-1] + y * " " + "│" + (m - y - 1) * " "
# fmt: on
if p < q:
left += [n * " "] * (q - p)
elif q < p:
right += [m * " "] * (p - q)
zipped_lines = zip(left, right)
lines = (
top_lines
+ [middle_line, bottom_line]
+ [a + width * " " + b for a, b in zipped_lines]
)
middle = n + width // 2
if not root:
lines[0] = lines[0][:middle] + "┴" + lines[0][middle + 1:]
return lines, n + m + width, max(p, q) + 2 + len(top_lines), middle
class DTreeClassifier:
def __init__(self, max_depth=None):
self.max_depth = max_depth
# Fit the model using data in a dataframe
def fit(self, X, y):
self.n_classes_ = len(set(y))
self.n_features_ = X.shape[1]
X = X.to_numpy()
self.tree_ = self._build_tree(X, y)
return self.tree_
# Make a prediction
def predict(self, X):
X = X.to_numpy()
return [self._predict(inputs) for inputs in X]
# Calculate the accuracy
def accuracy(self, actual, predicted):
act = actual.to_numpy()
correct = 0
for i in range(len(act)):
if act[i] == predicted[i]:
correct += 1
return correct / float(len(act)) * 100
# Print the decision tree
def print_tree(self, feature_names, class_names, show_details=True):
self.tree_.print_tree(feature_names, class_names, show_details)
# Compute the gini
def _gini(self, y):
size = y.size
return 1.0 - sum((np.sum(y == c) / size) ** 2 for c in range(self.n_classes_))
# Find the best split
def _best_split(self, X, y):
size = y.size
if size <= 1:
return None, None
# Count of each class in the current node
num_parent = [np.sum(y == c) for c in range(self.n_classes_)]
# Gini of current node.
best_gini = 1.0 - sum((n / size) ** 2 for n in num_parent)
best_idx, best_thr = None, None
# Loop through all features.
for idx in range(self.n_features_):
# Sort data along selected feature.
thresholds, classes = zip(*sorted(zip(X[:, idx], y)))
num_left = [0] * self.n_classes_
num_right = num_parent.copy()
for i in range(1, size):
c = int(classes[i - 1])
num_left[c] += 1
num_right[c] -= 1
gini_left = 1.0 - sum(
(num_left[x] / i) ** 2 for x in range(self.n_classes_)
)
gini_right = 1.0 - sum(
(num_right[x] / (size - i)) ** 2 for x in range(self.n_classes_)
)
# The gini of a split is the weighted average of the gini
# impurity of the children.
gini = (i * gini_left + (size - i) * gini_right) / size
# Don't split identical values
if thresholds[i] == thresholds[i - 1]:
continue
if gini < best_gini:
best_gini = gini
best_idx = idx
best_thr = (thresholds[i] + thresholds[i - 1]) / 2
return best_idx, best_thr
# Build the decision tree recursively finding the best split
def _build_tree(self, X, y, depth=0):
# Population for each class in current node.
# The predicted class is the one with the largest population
num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes_)]
predicted_class = np.argmax(num_samples_per_class)
node = Node(gini=self._gini(y),
num_samples=y.size,
num_samples_per_class=num_samples_per_class,
pred_class=predicted_class
)
# Split recursively until maximum depth is reached.
if depth < self.max_depth:
idx, thr = self._best_split(X, y)
if idx is not None:
indices_left = X[:, idx] < thr
X_left, y_left = X[indices_left], y[indices_left]
X_right, y_right = X[~indices_left], y[~indices_left]
node.feature_index = idx
node.threshold = thr
node.left = self._build_tree(X_left, y_left, depth + 1)
node.right = self._build_tree(X_right, y_right, depth + 1)
return node
# Predict class for a single sample
def _predict(self, inputs):
node = self.tree_
while node.left:
if inputs[node.feature_index] < node.threshold:
node = node.left
else:
node = node.right
return node.pred_class
```
#### File: Decision_Tree/final/decision_tree.py
```python
import numpy as np
import sys
# A decision tree node
class Node:
class_counter = 0
def __init__(self, gini, num_samples, num_samples_per_class, pred_class):
self.gini = gini
self.num_samples = num_samples
self.num_samples_per_class = num_samples_per_class
self.pred_class = pred_class
self.feature_index = 0
self.threshold = 0
self.index = Node.class_counter
self.left = None
self.right = None
Node.class_counter += 1
class DTreeClassifier:
def __init__(self, max_depth=None):
self.max_depth = max_depth
# Fit the model using data in a dataframe
def fit(self, X, y):
self.n_classes_ = len(set(y))
self.n_features_ = X.shape[1]
X = X.to_numpy()
self.tree_ = self._build_tree(X, y)
return self.tree_
# Make a prediction
def predict(self, X):
X = X.to_numpy()
return [self._predict(inputs) for inputs in X]
# Calculate the accuracy
def accuracy(self, actual, predicted):
act = actual.to_numpy()
correct = 0
for i in range(len(act)):
if act[i] == predicted[i]:
correct += 1
return correct / float(len(act)) * 100
# Print a decision tree
def print_tree(self, node, feature_names, depth=0):
self.feature_names = feature_names
indent = ' ' * (depth * 5)
if node is not None:
is_leaf = not node.right
if is_leaf:
print(indent, node.pred_class)
else:
print(indent, feature_names[node.feature_index], '<', node.threshold)
print(indent, 'gini=', node.gini)
print(indent, 'samples=', node.num_samples)
print(indent, 'samples/class', node.num_samples_per_class)
print(' ')
self.print_tree(node.left, feature_names, depth + 1)
self.print_tree(node.right, feature_names, depth + 1)
# Print a dot decision tree
def print_tree_dot(self, node, feature_names, class_names):
dot_file = open('data/output.dot', 'w')
self._print_tree(node, feature_names, class_names, dot_file)
print('}', file=dot_file)
dot_file.close()
# Traverse the tree breadth-first, printing dot code for each node
def _print_tree(self, node, feature_names, class_names, dot_file, depth=0):
output_str = ''
if depth == 0:
print('digraph Tree {', file=dot_file)
print('node [shape=box] ;', file=dot_file)
self.feature_names = feature_names
if node is not None:
is_leaf = not node.right
if is_leaf:
output_str = str(node.index) + ' ' + \
'[label=\"' + \
str(class_names[node.pred_class]) + '\\'
else:
output_str = str(node.index) + ' ' + \
'[label=\"' + \
feature_names[node.feature_index] + ' < ' + str(node.threshold) + '\\'
output_str += 'ngini = ' + str(node.gini) + '\\' + \
'nsamples = ' + str(node.num_samples) + '\\' + \
'nvalue = ' + str(node.num_samples_per_class) + \
'\"] ;'
print(output_str, file=dot_file)
if is_leaf is False:
print(str(node.index) + ' -> ' + str(node.left.index), file=dot_file)
self._print_tree(node.left, feature_names, class_names, dot_file, depth + 1)
if is_leaf is False:
print(str(node.index) + ' -> ' + str(node.right.index), file=dot_file)
self._print_tree(node.right, feature_names, class_names, dot_file, depth + 1)
# Compute the gini
def _gini(self, y):
size = y.size
return 1.0 - sum((np.sum(y == c) / size) ** 2 for c in range(self.n_classes_))
# Find the best split
def _best_split(self, X, y):
size = y.size
if size <= 1:
return None, None
# Count of each class in the current node
num_parent = [np.sum(y == c) for c in range(self.n_classes_)]
# Gini of current node.
best_gini = 1.0 - sum((n / size) ** 2 for n in num_parent)
best_idx, best_thr = None, None
# Loop through all features.
for idx in range(self.n_features_):
# Sort data along selected feature.
thresholds, classes = zip(*sorted(zip(X[:, idx], y)))
num_left = [0] * self.n_classes_
num_right = num_parent.copy()
for i in range(1, size):
c = int(classes[i - 1])
num_left[c] += 1
num_right[c] -= 1
gini_left = 1.0 - sum(
(num_left[x] / i) ** 2 for x in range(self.n_classes_)
)
gini_right = 1.0 - sum(
(num_right[x] / (size - i)) ** 2 for x in range(self.n_classes_)
)
# The gini of a split is the weighted average of the gini
# impurity of the children.
gini = (i * gini_left + (size - i) * gini_right) / size
# Don't split identical values
if thresholds[i] == thresholds[i - 1]:
continue
if gini < best_gini:
best_gini = gini
best_idx = idx
best_thr = (thresholds[i] + thresholds[i - 1]) / 2
return best_idx, best_thr
# Build the decision tree recursively finding the best split
def _build_tree(self, X, y, depth=0):
# Population for each class in current node.
# The predicted class is the one with the largest population
num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes_)]
predicted_class = np.argmax(num_samples_per_class)
node = Node(gini=self._gini(y),
num_samples=y.size,
num_samples_per_class=num_samples_per_class,
pred_class=predicted_class
)
# Split recursively until maximum depth is reached.
if depth < self.max_depth:
idx, thr = self._best_split(X, y)
if idx is not None:
indices_left = X[:, idx] < thr
X_left, y_left = X[indices_left], y[indices_left]
X_right, y_right = X[~indices_left], y[~indices_left]
node.feature_index = idx
node.threshold = thr
node.left = self._build_tree(X_left, y_left, depth + 1)
node.right = self._build_tree(X_right, y_right, depth + 1)
return node
# Predict class for a single sample
def _predict(self, inputs):
node = self.tree_
while node.left:
if inputs[node.feature_index] < node.threshold:
node = node.left
else:
node = node.right
return node.pred_class
```
#### File: Decision_Tree/src/tester2.py
```python
import pandas as pd
from sklearn.preprocessing import LabelEncoder
class DTree:
def __init__(self, max_depth, min_size):
self.max_depth = max_depth
self.min_size = min_size
self.root = None
# Calculate the Gini index for a split dataset
def __gini_index(self, groups, classes):
# count all samples at split point
n_instances = float(sum([len(group) for group in groups]))
# Sum weighted Gini index for each group
gini = 0.0
for group in groups:
size = float(len(group))
# Avoid divide by zero
if size == 0:
continue
score = 0.0
# Score the group based on the score for each class
for class_val in classes:
p = [row[-1] for row in group].count(class_val) / size
score += (p * p)
# Weight the group score by its relative size
gini += (1.0 - score) * (size / n_instances)
return gini
# Split a dataset based on an attribute and an attribute value
def __test_split(self, index, value, data):
left = []
right = []
for row in data:
if row[index] < value:
left.append(row)
else:
right.append(row)
return left, right
# Create a terminal node value
def __to_terminal(self, group):
outcomes = [row[-1] for row in group]
return max(set(outcomes), key=outcomes.count)
# Select the best split point for a dataset
def __get_split(self, data):
class_values = list(set(row[-1] for row in data))
b_index = 999
b_value = 999
b_score = 999
b_groups = None
for index in range(len(data[0])-1):
for row in data:
groups = self.__test_split(index, row[index], data)
gini = self.__gini_index(groups, class_values)
if gini < b_score:
b_index = index
b_value = row[index]
b_score = gini
b_groups = groups
return{'index': b_index, 'value': b_value, 'groups': b_groups}
# Create child splits for a node or make terminal
def __split(self, node, max_depth, min_size, depth):
left, right = node['groups']
del(node['groups'])
# Check for a no split
if not left or not right:
node['left'] = node['right'] = self.__to_terminal(left + right)
return
# Check for max depth
if depth >= max_depth:
node['left'], node['right'] = self.__to_terminal(left), self.__to_terminal(right)
return
# Process left child
if len(left) <= min_size:
node['left'] = self.__to_terminal(left)
else:
node['left'] = self.__get_split(left)
self.__split(node['left'], self.max_depth, self.min_size, depth + 1)
# Process right child
if len(right) <= min_size:
node['right'] = self.__to_terminal(right)
else:
node['right'] = self.__get_split(right)
self.__split(node['right'], self.max_depth, self.min_size, depth + 1)
# Print the decision tree
def print_tree(self, node, depth=0):
if isinstance(node, dict):
print('%s[X%d < %.3f]' % ((depth*' ', (node['index']+1), node['value'])))
self.print_tree(node['left'], depth + 1)
self.print_tree(node['right'], depth + 1)
else:
print('%s[%s]' % ((depth*' ', node)))
# Fit the training data, build the decision tree
def fit(self, X, y):
train = pd.concat([X, y], axis=1)
train = train.to_numpy()
self.train = train
self.root = self.__get_split(train)
self.__split(self.root, self.max_depth, self.min_size, 1)
return self.root
def predict_item(self, node, row):
self.node = node
self.row = row
if row[node['index']] < node['value']:
if isinstance(node['left'], dict):
return self.predict_item(node['left'], row)
else:
return node['left']
else:
if isinstance(node['right'], dict):
return self.predict_item(node['right'], row)
else:
return node['right']
def predict(self, X):
test = X.to_numpy()
self.test = test
predictions = list()
for row in self.test:
prediction = self.predict_item(self.root, row)
predictions.append(prediction)
return(predictions)
iris_data = pd.read_csv('https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv')
labelencoder = LabelEncoder()
iris_data['variety'] = labelencoder.fit_transform(iris_data['variety'])
train1 = iris_data.to_numpy()
# print(train1)
train = pd.read_csv('data/test.csv')
target = 'Y'
features = train.columns.drop(target)
X = train[features]
y = train[target]
print(train)
"""
train = [[2.771244718, 1.784783929, 0],
[1.728571309, 1.169761413, 0],
[3.678319846, 2.81281357, 0],
[3.961043357, 2.61995032, 0],
[2.999208922, 2.209014212, 0],
[7.497545867, 3.162953546, 1],
[9.00220326, 3.339047188, 1],
[7.444542326, 0.476683375, 1],
[10.12493903, 3.234550982, 1],
[6.642287351, 3.319983761, 1]]
"""
tree = DTree(max_depth=2, min_size=1)
clf = tree.fit(X, y)
tree.print_tree(clf)
test = train
print(test)
y_pred = tree.predict(test)
print(y_pred)
"""
# Predict with a stump
train = train.to_numpy()
stump = {'index': 0, 'right': 1, 'value': 6.642287351, 'left': 0}
for row in train:
prediction = tree.predict(stump, row)
print('Expected=%d, Got=%d' % (row[-1], prediction))
"""
"""
train = iris_data
target = 4
features = train.columns.drop(target)
X = train[features]
y = train[target]
print(y)
tree = DTree(max_depth=5, min_size=10)
clf = tree.fit(X, y)
"""
``` |
{
"source": "JimKing100/ds_helper",
"score": 3
} |
#### File: JimKing100/ds_helper/test_ds_helper.py
```python
import pandas as pd
from ds_helper_data import df_utils
def test_nulls():
df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/DS-Unit-3-Sprint-1-Software-Engineering/master/module1-python-modules-packages-and-environments/drink_test.csv')
assert df_utils.nulls(df) is None
def test_date_conv():
df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/DS-Unit-3-Sprint-1-Software-Engineering/master/module1-python-modules-packages-and-environments/MarinSalesJanJune2019.csv')
df_utils.date_conv(df, 'Listing Date')
assert 'Listing Date_year' in df.columns
def test_state_conv():
assert df_utils.state_conv('California') == "CA"
``` |
{
"source": "JimKing100/DS-Unit-3-Sprint-1-Software-Engineering",
"score": 4
} |
#### File: DS-Unit-3-Sprint-1-Software-Engineering/Sprint_Challenge/acme.py
```python
import random
# Create the class
class Product:
# Define the constructor with the instance attributes
def __init__(self, name, price=10, weight=20, flammability=0.5):
self.id = random.randint(1000000, 9999999)
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
# Create the greets method
def stealability(self):
price_weight = self.price / self.weight
if price_weight < 0.5:
print('Not so stealable')
else:
if (price_weight >= 0.5 and price_weight < 1.0):
print('Kinda stealable')
else:
print('Very stealable')
def explode(self):
flame_weight = self.flammability * self.weight
if flame_weight < 10:
print('...fizzle')
else:
if (flame_weight >= 10 and flame_weight < 50):
print('...boom!')
else:
print('...BABOOM!')
class BoxingGlove(Product):
# Define the constructor with the instance attributes
def __init__(self, name, price=10, weight=10, flammability=0.5):
super().__init__(name, price, weight, flammability)
def explode(self):
print("...it's a glove.")
def punch(self):
if self.weight < 5:
print('That tickles')
else:
if (self.weight >= 5 and self.weight < 15):
print('Hey that hurt!')
else:
print('OUCH!')
``` |
{
"source": "JimKing100/PrivacyContest",
"score": 3
} |
#### File: PrivacyContest/src/simulate_row.py
```python
import random
import globals
# Create two concurrent lists representing a split row
# - row is the front end of the row with sec_estimate faredecode_dict
# - row1 is the back end of the row
def simulate_row(epsilon, taxi_id, spd, cp, fr):
row = {}
row1 = {}
# Create row = epsilon, taxi_id, shift, company_id, pcs, dca, payment_type, sec_estimate
row['epsilon'] = epsilon
row['taxi_id'] = taxi_id
row['shift'] = int(str(spd)[1:3])
row['company_id'] = int(str(cp)[1:4])
pca = int(str(spd)[3:5])
if pca == 0:
pca = -1
row['pickup_community_area'] = pca
dca = int(str(spd)[5:7])
if dca == 0:
dca = -1
row['dropoff_community_area'] = dca
pay = int(str(cp)[4:5])
if pay == 9:
pay = -1
row['payment_type'] = pay
pca_dca = str(spd)[3:7]
sec_estimate = globals.prox_dict[pca_dca]
row['sec_estimate'] = sec_estimate
# Create row1 = fare, tips, trip_total, trip_seconds, trip_miles
fare_range = int(str(fr)[0:2])
tips_range = int(str(fr)[2:4])
if fare_range == 21:
v = 50
else:
v = globals.faredecode_dict[fare_range]
if v == 0:
value = 0
elif v == 50:
value = random.randrange(50, 100)
else:
value = random.randrange(v - 5, v)
row1['fare'] = value
fare = value
if tips_range == 21:
v = 20
else:
v = globals.tipsdecode_dict[tips_range]
if v == 0:
value = 0
elif v == 20:
value = random.randrange(20, 50)
else:
value = random.randrange(v - 2, v)
row1['tips'] = value
tips = value
row1['trip_total'] = fare + tips
sec_range = int(str(fr)[4:6])
miles_range = int(str(fr)[6:8])
if sec_range == 61:
v = 5000
else:
v = globals.secdecode_dict[sec_range]
if v == 0:
value = 0
elif v == 5000:
value = random.randrange(5000, 10000)
else:
value = random.randrange(v - 100, v)
row1['trip_seconds'] = value
if miles_range == 21:
v = 20
else:
v = globals.milesdecode_dict[miles_range]
if v == 0:
value = 0
elif v == 20:
value = random.randrange(20, 50)
else:
value = random.randrange(v - 2, v)
row1['trip_miles'] = value
return row, row1
``` |
{
"source": "JimKing100/sprint_aq",
"score": 2
} |
#### File: sprint_aq/TEST/app.py
```python
from decouple import config
from flask import Flask, render_template
from TEST.models import DB, Record
# import requests
import openaq
def create_app():
# Create Flask web server, makes the application
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
# Routes determine location
@app.route("/")
def home():
DB.drop_all()
DB.create_all()
api = openaq.OpenAQ()
status, body = api.measurements(city='Los Angeles', parameter='pm25')
for i in range(0, 100):
date1 = body['results'][i]['date']['utc']
value1 = body['results'][i]['value']
db_record = Record(id=i, datetime=date1, value=value1)
DB.session.add(db_record)
DB.session.commit()
records = Record.query.filter(Record.value > 10)
return render_template('home.html', title=date1, records=records)
return app
``` |
{
"source": "JimKing100/techsearch",
"score": 3
} |
#### File: techsearch/TECHSEARCH/app.py
```python
from flask import Flask, request, render_template
import pandas as pd
import re
import string
def create_app():
# Create Flask web server, makes the application
app = Flask(__name__)
# Routes determine location
@app.route("/")
def home():
return render_template('index.html')
@app.route("/search", methods=['GET'])
def input():
return render_template('search.html')
@app.route("/output", methods=['POST'])
def output():
df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/techsearch/master/data/scrape_results1.csv')
df = df.drop(df.columns[0], axis=1)
title = request.values['title']
city = request.values['city']
result_df = df.loc[(df['job'] == title) & (df['city'] == city)]
r_title = result_df['job'].iloc[0]
r_city = result_df['city'].iloc[0]
r_count = result_df['counts'].iloc[0]
r_lsalary = result_df['low_salary'].iloc[0]
r_hsalary = result_df['high_salary'].iloc[0]
r_skills = re.sub('['+string.punctuation+']', '', result_df['skills'].iloc[0]).split()
return render_template('response.html',
title='Search Results',
r_title=r_title,
r_city=r_city,
r_count=r_count,
r_lsalary=r_lsalary,
r_hsalary=r_hsalary,
r_skills=r_skills
)
@app.route("/about")
def about():
return render_template('about.html')
return app
``` |
{
"source": "jimklo/sublime-rst-completion",
"score": 3
} |
#### File: jimklo/sublime-rst-completion/helpers.py
```python
import re
from sublime import Region
import sublime_plugin
class BaseBlockCommand(sublime_plugin.TextCommand):
def _get_row_text(self, row):
if row < 0 or row > self.view.rowcol(self.view.size())[0]:
raise RuntimeError('Cannot find table bounds.')
point = self.view.text_point(row, 0)
region = self.view.line(point)
text = self.view.substr(region)
return text
def get_cursor_position(self):
return self.view.rowcol(self.view.sel()[0].begin())
def get_block_bounds(self):
"""given the cursor position as started point,
returns the limits and indentation"""
row, col = self.get_cursor_position()
upper = lower = row
try:
while self._get_row_text(upper - 1).strip():
upper -= 1
except Exception as e:
print(e)
pass
else:
upper += 1
try:
while self._get_row_text(lower + 1).strip():
lower += 1
except Exception as e:
print(e)
pass
else:
lower -= 1
block_region = Region(self.view.text_point(upper - 1, 0),
self.view.text_point(lower + 2, 0))
lines = [self.view.substr(region) for region in self.view.lines(block_region)]
indent = re.match('^(\s*).*$', self._get_row_text(upper - 1)).group(1)
return block_region, lines, indent
``` |
{
"source": "JimKnowler/profile-visualiser",
"score": 3
} |
#### File: JimKnowler/profile-visualiser/profileparser.py
```python
class ProfileParser:
def __init__(self, consumer):
self._consumer = consumer
def load_file(self, filename):
with open(filename, "r") as file:
for line_number, line in enumerate(file):
try:
line = line.rstrip()
self.parse(line)
except Exception as e:
print "exception while parsing line ", line_number
print ">> line: [", line, "]"
print ">>", e
raise e
def parse(self, line):
if line.startswith('#'):
# ignore comment lines
return
split_line = line.split(' ',1)
line_type = split_line[0]
if line_type == 'T':
split_line = line.split(' ',2)
thread_id = int(split_line[1])
thread_label = split_line[2]
self._consumer.on_thread(thread_id, thread_label)
elif line_type == 'F':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
function_id = int(split_line[2])
function_label = split_line[3]
self._consumer.on_function(thread_id, function_id, function_label)
elif line_type == 'S':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
function_id = int(split_line[2])
time = int(split_line[3])
self._consumer.on_sample_start(thread_id, function_id, time)
elif line_type == 'E':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
function_id = int(split_line[2])
time = int(split_line[3])
self._consumer.on_sample_finish(thread_id, function_id, time)
elif line_type == 'V':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
event_id = int(split_line[2])
event_label = split_line[3]
self._consumer.on_event(thread_id, event_id, event_label)
elif line_type == 'Y':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
event_id = int(split_line[2])
time = int(split_line[3])
self._consumer.on_event_emit(thread_id, event_id, time)
elif line_type == 'C':
split_line = line.split(' ',2)
counter_id = int(split_line[1])
counter_label = split_line[2]
self._consumer.on_counter(counter_id, counter_label)
elif line_type == 'D':
split_line = line.split(' ',3)
counter_id = int(split_line[1])
time = int(split_line[2])
counter_value = int(split_line[3])
self._consumer.on_counter_value(counter_id, time, counter_value)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.