metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeffreytang09/cloud_storage",
"score": 3
}
|
#### File: cloud_storage/free_storage/_cloud_storage.py
```python
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, TextIO, Type
class CloudStorage(ABC):
def __init__(self, retry_limit: int, api_error: Type[IOError]) -> None:
self._drive = None
self._api_error = api_error
self.retry_limit = retry_limit
@abstractmethod
def connect(self) -> None:
pass
@abstractmethod
def is_connected(self) -> bool:
pass
@abstractmethod
def reconnect(self) -> None:
pass
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
def list_files(self, remote_path: str) -> List[str]:
pass
@abstractmethod
def path_exists(self, remote_path: str) -> Optional[str]:
"""
Checks if path exists and if so return path identifier,
otherwise return empty str
"""
pass
@abstractmethod
def download_file(self, remote_path: str, local_path: Optional[str] = None) -> None:
pass
@abstractmethod
def read_file(self, remote_path: str) -> TextIO:
pass
@abstractmethod
def create_file(
self,
remote_path: str,
content: Optional[str] = None,
local_path: Optional[str] = None,
) -> None:
pass
@abstractmethod
def delete_file(self, remote_path: str):
pass
def _run_command(
self, command: Callable, params: Optional[Dict[str, Any]] = None
) -> Any:
result = None
success = False
for i in range(self.retry_limit):
try:
result = command() if params is None else command(**params)
success = True
except self._api_error:
continue
break
if success:
return result
else:
raise self._api_error
```
|
{
"source": "jeffreythewang/scales",
"score": 2
}
|
#### File: scales/pool/singleton.py
```python
from .base import PoolSink
from ..async import AsyncResult
from ..constants import (ChannelState, SinkRole)
from ..sink import SinkProvider, SinkProperties
class SingletonPoolSink(PoolSink):
"""A SingletonPool maintains at most one underlying sink, and allows concurrent
requests through it. If the underlying sink fails, it's closed and reopened.
This sink is intended to be used for transports that can handle concurrent
requests, such as multiplexing transports.
"""
def __init__(self, sink_provider, sink_properties, global_properties):
super(SingletonPoolSink, self).__init__(sink_provider, global_properties)
endpoint = global_properties[SinkProperties.Endpoint]
endpoint_source = '%s:%d' % (endpoint.host, endpoint.port)
self.endpoint = endpoint_source
self._ref_count = 0
def Open(self):
"""Open the underlying sink and increase the ref count."""
self._ref_count += 1
if self._ref_count > 1:
return AsyncResult.Complete()
def TryGet():
self._Get()
return True
# We don't want to link _Get directly as it'll hold a reference
# to the sink returned forever.
return AsyncResult.Run(TryGet)
def Close(self):
"""Decrease the reference count and, if zero, close the underlying transport."""
self. _ref_count -= 1
if self.next_sink and self._ref_count <= 0:
sink, self.next_sink = self.next_sink, None
sink.on_faulted.Unsubscribe(self.__PropagateShutdown)
sink.Close()
@property
def state(self):
if self.next_sink:
return self.next_sink.state
else:
return ChannelState.Idle
def __PropagateShutdown(self, value):
self.on_faulted.Set(value)
def _Get(self):
if not self.next_sink:
self.next_sink = self._sink_provider.CreateSink(self._properties)
self.next_sink.on_faulted.Subscribe(self.__PropagateShutdown)
self.next_sink.Open().wait()
return self.next_sink
elif self.next_sink.state == ChannelState.Idle:
self.next_sink.Open().wait()
return self.next_sink
elif self.next_sink.is_closed:
self.next_sink.on_faulted.Unsubscribe(self.__PropagateShutdown)
self.next_sink = None
return self._Get()
else:
return self.next_sink
def _Release(self, sink):
pass
SingletonPoolSink.Builder = SinkProvider(SingletonPoolSink, SinkRole.Pool)
```
#### File: scales/thrift/builder.py
```python
from ..resurrector import ResurrectorSink
from ..loadbalancer import ApertureBalancerSink
from ..pool import WatermarkPoolSink
from .sink import (
SocketTransportSink,
ThriftSerializerSink,
)
from ..core import Scales
class Thrift(object):
"""A builder for Thrift clients."""
@staticmethod
def NewBuilder(Iface):
return Scales.NewBuilder(Iface)\
.WithSink(ThriftSerializerSink.Builder())\
.WithSink(ApertureBalancerSink.Builder())\
.WithSink(ResurrectorSink.Builder())\
.WithSink(WatermarkPoolSink.Builder())\
.WithSink(SocketTransportSink.Builder())
@staticmethod
def NewClient(Iface, uri, timeout=60):
return Thrift.NewBuilder(Iface)\
.SetUri(uri)\
.SetTimeout(timeout)\
.Build()
```
#### File: scales/kafka/test_protocol.py
```python
from cStringIO import StringIO
import unittest
from scales.constants import MessageProperties
from scales.message import MethodCallMessage
from scales.kafka.protocol import (
BrokerMetadata,
KafkaProtocol,
MessageType,
MetadataResponse,
PartitionMetadata,
ProduceResponse,
)
from scales.kafka.sink import KafkaEndpoint
class KafkaProtocolTestCase(unittest.TestCase):
def testPutSerialization(self):
expected = 'AAEAAAPoAAAAAQAKdGVzdF90b3BpYwAAAAEAAAABAAAAJgAAAAAAAAAAAAAAGr0KwrwAAP////8AAAAMbWVzc2FnZV9kYXRh'.decode('base64')
s = KafkaProtocol()
mcm = MethodCallMessage(None, 'Put', ('test_topic', ['message_data']), {})
mcm.properties[MessageProperties.Endpoint] = KafkaEndpoint('host', 0, 1)
buf = StringIO()
s.SerializeMessage(mcm, buf, {})
self.assertEqual(buf.getvalue(), expected)
def testPutResponseDeserialization(self):
expected = 'AAAAAgAAAAEABmxvZ2hvZwAAAAEAAAAAAAAAAAAAAA5Xsw=='.decode('base64')
s = KafkaProtocol()
ret = s.DeserializeMessage(StringIO(expected), MessageType.ProduceRequest)
expected = [
ProduceResponse('loghog', 0, 0, 939955)
]
self.assertEqual(ret.return_value, expected)
def testBrokerInfoDeserialization(self):
raw_data = '<KEY>ABmxvZ2hvZwAAAAEACQAAAAAAAAABAAAAAgAAAAEAAAAAAAAAAgAAAAAAAAAB'.decode('base64')
s = KafkaProtocol()
ret = s.DeserializeMessage(StringIO(raw_data), MessageType.MetadataRequest)
expected = MetadataResponse(
brokers = {
0: BrokerMetadata(0, 'ec2-54-159-110-192.compute-1.amazonaws.com', 15063),
1: BrokerMetadata(1, 'ec2-54-81-106-88.compute-1.amazonaws.com', 15939)
},
topics = {
'loghog': {
0: PartitionMetadata('loghog', 0, 1, (1, 0), (0, 1))
}
}
)
self.assertEqual(ret.return_value, expected)
if __name__ == '__main__':
unittest.main()
```
#### File: scales/pool/test_singleton.py
```python
import unittest
from scales.constants import SinkProperties
from scales.pool.singleton import SingletonPoolSink
from scales.loadbalancer.zookeeper import Endpoint
from test.scales.util.base import SinkTestCase
class SingletonPoolTestCast(SinkTestCase):
SINK_CLS = SingletonPoolSink
def customize(self):
self.global_properties[SinkProperties.Endpoint] = Endpoint('localhost', 1234)
def testSingletonPoolFowardsMessage(self):
self._submitTestMessage()
self._completeTestMessage()
self.assertEqual(self.return_message, self.MSG_SENTINEL)
def testSingletonPoolReusesSink(self):
self._submitTestMessage()
self._completeTestMessage()
self.assertEqual(self.return_message, self.MSG_SENTINEL)
self._prepareSinkStack()
self._submitTestMessage()
self._completeTestMessage()
self.assertEqual(self.return_message, self.MSG_SENTINEL)
self.assertEqual(len(self.mock_provider.sinks_created), 1)
def testSingletonPoolRecreatesFailedSink(self):
self.mock_provider.sinks_created[0].Fault()
self._submitTestMessage()
self._completeTestMessage()
self.assertEqual(self.return_message, self.MSG_SENTINEL)
self.assertEqual(len(self.mock_provider.sinks_created), 2)
if __name__ == '__main__':
unittest.main()
```
#### File: test/scales/test_varz.py
```python
import itertools
import random
import unittest
from scales.varz import (
_SampleSet,
Source,
VarzAggregator,
VarzReceiver,
VarzType,
)
def _round(arr, n):
return [round(i, n) for i in arr]
class VarzTestCase(unittest.TestCase):
def _getSampleData(self):
test_varz = {
'metric1': {
Source(None, 'service1', 'enpoint1', None): 1,
Source(None, 'service1', 'enpoint2', None): 2
},
'metric2': {
Source(None, 'service1', 'endpoint1', None): 1,
Source(None, 'service2', 'endpoint1', None): 2
},
'metric3': {
Source('method1', 'service1', None, None): 1,
Source('method2', 'service1', None, None): 2,
},
'metric4': {
Source(None, "service1", None, "client1"): 1,
Source(None, "service1", None, "client2"): 2,
Source(None, "service2", None, "client1"): 3,
},
}
return test_varz
def _getSampleRateData(self):
test_varz = {
'metric1': {
Source(None, 'service1', 'enpoint1', None): _SampleSet(2, [1,2]),
Source(None, 'service1', 'enpoint2', None): _SampleSet(2, [2,3])
},
'metric2': {
Source(None, 'service1', 'endpoint1', None): _SampleSet(2, [1,2]),
Source(None, 'service2', 'endpoint1', None): _SampleSet(2, [2,3])
},
'metric3': {
Source('method1', 'service1', None, None): _SampleSet(2, [1,2]),
Source('method2', 'service1', None, None): _SampleSet(2, [2,3]),
},
'metric4': {
Source(None, 'service1', None, 'client1'): _SampleSet(2, [1,2]),
Source(None, 'service1', None, 'client2'): _SampleSet(2, [2,3]),
Source(None, 'service2', None, 'client1'): _SampleSet(2, [4,5])
},
}
return test_varz
def testVarzAggregatorBasic(self):
test_varz = self._getSampleData()
metrics = {
'metric1': VarzType.Counter,
'metric2': VarzType.Counter,
'metric3': VarzType.Counter,
'metric4': VarzType.Counter,
}
aggs = VarzAggregator.Aggregate(test_varz, metrics)
# Endpoints are aggregated to a service
self.assertEqual(aggs['metric1'][('service1', None)].total, 3.0)
# Services are kept separate
self.assertEqual(aggs['metric2'][('service1', None)].total, 1.0)
self.assertEqual(aggs['metric2'][('service2', None)].total, 2.0)
# Method are aggregated to a service
self.assertEqual(aggs['metric3'][('service1', None)].total, 3.0)
# Methods are agregated to a client + service
self.assertEqual(aggs['metric4'][('service1', 'client1')].total, 1.0)
self.assertEqual(aggs['metric4'][('service1', 'client2')].total, 2.0)
self.assertEqual(aggs['metric4'][('service2', 'client1')].total, 3.0)
def testVarzAggregatorAverageRate(self):
test_varz = self._getSampleRateData()
metrics = {
'metric1': VarzType.AverageRate,
'metric2': VarzType.AverageRate,
'metric3': VarzType.AverageRate,
'metric4': VarzType.AverageRate,
}
random.seed(1)
aggs = VarzAggregator.Aggregate(test_varz, metrics)
self.assertEqual(_round(aggs['metric1'][('service1', None)].total, 2), [2.0, 2.0, 2.70, 2.97, 3.0, 3.0])
self.assertEqual(_round(aggs['metric2'][('service1', None)].total, 2), [1.5, 1.5, 1.90, 1.99, 2.0, 2.0])
self.assertEqual(_round(aggs['metric2'][('service2', None)].total, 2), [2.5, 2.5, 2.90, 2.99, 3.0, 3.0])
self.assertEqual(_round(aggs['metric3'][('service1', None)].total, 2), [2.0, 2.0, 2.70, 2.97, 3.0, 3.0])
self.assertEqual(_round(aggs['metric4'][('service1', 'client1')].total, 2), [1.5, 1.5, 1.9, 1.99, 2.0, 2.0])
self.assertEqual(_round(aggs['metric4'][('service1', 'client2')].total, 2), [2.5, 2.5, 2.9, 2.99, 3.0, 3.0])
self.assertEqual(_round(aggs['metric4'][('service2', 'client1')].total, 2), [4.5, 4.5, 4.9, 4.99, 5.0, 5.0])
def testShortStreamingPercentile(self):
source = Source(None, 'test', None, None)
metric = 'test'
VarzReceiver.VARZ_METRICS.clear()
VarzReceiver.VARZ_DATA.clear()
VarzReceiver.RegisterMetric(metric, VarzType.AverageTimer)
VarzReceiver.RecordPercentileSample(source, metric, 1)
VarzReceiver.RecordPercentileSample(source, metric, 2)
VarzReceiver.RecordPercentileSample(source, metric, 3)
VarzReceiver.RecordPercentileSample(source, metric, 2)
aggs = VarzAggregator.Aggregate(VarzReceiver.VARZ_DATA, VarzReceiver.VARZ_METRICS)
self.assertEqual(
_round(aggs[metric][('test', None)].total, 2),
[2.0, 2.0, 2.70, 2.97, 3.0, 3.0])
def testLongStreamingPercentile(self):
source = Source(None, 'test', None, None)
metric = 'test'
VarzReceiver.VARZ_METRICS.clear()
VarzReceiver.VARZ_DATA.clear()
VarzReceiver.RegisterMetric(metric, VarzType.AverageTimer)
random.seed(1)
for n in xrange(10000):
VarzReceiver.RecordPercentileSample(source, metric, float(random.randint(0, 100)))
aggs = VarzAggregator.Aggregate(VarzReceiver.VARZ_DATA, VarzReceiver.VARZ_METRICS)
self.assertEqual(
_round(aggs[metric][('test', None)].total, 2),
[50.25, 50, 92.0, 100.0, 100.0, 100.0])
if __name__ == '__main__':
unittest.main()
```
#### File: scales/util/base.py
```python
import unittest
from scales.constants import SinkProperties
from scales.message import Message
from test.scales.util.mocks import (MockSinkProvider, MockSink, MockSinkStack)
class SinkTestCase(unittest.TestCase):
REQ_MSG_SENTINEL = Message()
MSG_SENTINEL = object()
STREAM_SENTINEL = object()
SINK_CLS = None
_open_delay = 0
def _prepareSinkStack(self):
self.sink_stack = MockSinkStack()
def save_message(sink_stack, context, stream, msg):
self.return_message = msg
self.return_stream = stream
terminator_sink = MockSink({ SinkProperties.Endpoint: None })
terminator_sink.ProcessResponse = save_message
self.sink_stack.Push(terminator_sink)
def _submitTestMessage(self):
self.sink.AsyncProcessRequest(self.sink_stack, self.REQ_MSG_SENTINEL, None, None)
def _completeTestMessage(self):
self.sink_stack.AsyncProcessResponse(self.STREAM_SENTINEL, self.MSG_SENTINEL)
def customize(self, **kwargs):
pass
def _waitForSink(self):
pass
def _createSink(self):
return self.SINK_CLS(self.mock_provider, self.sink_properties, self.global_properties)
def setUp(self, **kwargs):
self.return_message = None
self.return_stream = None
self.sink_properties = None
self.global_properties = {
SinkProperties.Label: 'mock'
}
self.customize(**kwargs)
self.mock_provider = MockSinkProvider()
self.sink = self._createSink()
self.sink.Open().wait()
self._prepareSinkStack()
self._waitForSink()
```
|
{
"source": "JeffreyThiessen/refseq_masher",
"score": 2
}
|
#### File: refseq_masher/mash/dist.py
```python
import logging
import os
from typing import Optional, List
import pandas as pd
from .sketch import sketch_fasta, sketch_fastqs
from .parser import mash_dist_output_to_dataframe
from ..utils import run_command
from ..const import MASH_REFSEQ_MSH
def mash_dist_refseq(sketch_path: str, mash_bin: str = "mash") -> str:
"""Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
sketch_path (str): Mash sketch file path or genome fasta file path
Returns:
(str): Mash STDOUT string
"""
assert os.path.exists(sketch_path)
cmd_list = [mash_bin,
'dist',
MASH_REFSEQ_MSH,
sketch_path]
exit_code, stdout, stderr = run_command(cmd_list)
if exit_code != 0:
raise Exception(
'Could not run Mash dist. EXITCODE="{}" STDERR="{}" STDOUT="{}"'.format(exit_code, stderr, stdout))
return stdout
def fasta_vs_refseq(fasta_path: str,
mash_bin: str = "mash",
sample_name: Optional[str] = None,
tmp_dir: str = "/tmp",
k: int = 16,
s: int = 400) -> pd.DataFrame:
"""Compute Mash distances between input FASTA against all RefSeq genomes
Args:
fasta_path: FASTA file path
mash_bin: Mash binary path
sample_name: Sample name
tmp_dir: Temporary working directory
k: Mash kmer size
s: Mash number of min-hashes
Returns:
(pd.DataFrame): Mash genomic distance results ordered by ascending distance
"""
sketch_path = None
try:
sketch_path = sketch_fasta(fasta_path,
mash_bin=mash_bin,
tmp_dir=tmp_dir,
sample_name=sample_name,
k=k,
s=s)
mashout = mash_dist_refseq(sketch_path, mash_bin=mash_bin)
logging.info('Ran Mash dist successfully (output length=%s). Parsing Mash dist output', len(mashout))
df_mash = mash_dist_output_to_dataframe(mashout)
df_mash['sample'] = sample_name
logging.info('Parsed Mash dist output into Pandas DataFrame with %s rows', df_mash.shape[0])
logging.debug('df_mash: %s', df_mash.head(5))
return df_mash
finally:
if sketch_path and os.path.exists(sketch_path):
logging.info('Deleting temporary sketch file "%s"', sketch_path)
os.remove(sketch_path)
logging.info('Sketch file "%s" deleted!', sketch_path)
def fastq_vs_refseq(fastqs: List[str],
mash_bin: str = 'mash',
sample_name: str = None,
tmp_dir: str = '/tmp',
k: int = 16,
s: int = 400,
m: int = 8) -> pd.DataFrame:
"""Compute Mash distances between input reads against all RefSeq genomes
Args:
fastqs: FASTQ paths
mash_bin: Mash binary path
sample_name: Sample name
tmp_dir: Temporary working directory
k: Mash kmer size
s: Mash number of min-hashes
m: Mash number of times a k-mer needs to be observed in order to be considered for Mash sketch DB
Returns:
(pd.DataFrame): Mash genomic distance results ordered by ascending distance
"""
assert len(fastqs) > 0, "Must supply one or more FASTQ paths"
sketch_path = None
try:
sketch_path = sketch_fastqs(fastqs,
mash_bin=mash_bin,
tmp_dir=tmp_dir,
sample_name=sample_name,
k=k,
s=s,
m=m)
logging.info('Mash sketch database created for "%s" at "%s"', fastqs, sketch_path)
logging.info('Querying Mash sketches "%s" against RefSeq sketch database', sketch_path)
mashout = mash_dist_refseq(sketch_path, mash_bin=mash_bin)
logging.info('Queried "%s" against RefSeq sketch database. Parsing into Pandas DataFrame', sketch_path)
df_mash = mash_dist_output_to_dataframe(mashout)
df_mash['sample'] = sample_name
logging.info('Parsed Mash distance results into DataFrame with %s entries', df_mash.shape[0])
logging.debug('df_mash %s', df_mash.head(5))
return df_mash
finally:
if sketch_path and os.path.exists(sketch_path):
logging.info('Deleting temporary sketch file "%s"', sketch_path)
os.remove(sketch_path)
logging.info('Sketch file "%s" deleted!', sketch_path)
```
#### File: refseq_masher/mash/parser.py
```python
import logging
from io import StringIO
from typing import Optional
import pandas as pd
#: Sometimes Mash dist outputs 4 columns other times it outputs 5 columns
MASH_DIST_4_COLUMNS = """
match_id
distance
pvalue
matching
""".strip().split('\n')
MASH_DIST_5_COLUMNS = """
match_id
query_id
distance
pvalue
matching
""".strip().split('\n')
#: Mash screen output columns
MASH_SCREEN_COLUMNS = """
identity
shared_hashes
median_multiplicity
pvalue
match_id
match_comment
""".strip().split('\n')
def _no_periods(s: str) -> Optional[str]:
return s if s != '.' else None
def parse_refseq_info(match_id: str) -> dict:
"""Parse a RefSeq Mash match_id
For example from the following `match_id`:
./rcn/refseq-NZ-1147754-PRJNA224116-.-GCF_000313715.1-.-Salmonella_enterica_subsp._enterica_serovar_Enteritidis_str._LA5.fna
If you split on '-' and ignoring the first two elements, you can extract, in order, the NCBI:
- Taxonomy UID = 1147754
- BioProject accession = PRJNA224116
- BioSample accession = None
- Genome accession = GCF_000313715.1
- plasmid name = None
- FNA filename (Salmonella_enterica_subsp._enterica_serovar_Enteritidis_str._LA5.fna)
If "Salmonella" is found in the FNA filename, then serovar and subspecies will be parsed if present.
For the example above, the subspecies would be "enterica" and the serovar would be "Enteritidis".
Values with periods ('.') will be treated as None (null).
Args:
match_id (str): Mash RefSeq match_id with taxid, bioproject, full strain name, etc delimited by '-'
Returns:
(dict): parsed NCBI accession and other info
"""
logging.debug('Parsing RefSeq info from "%s"', match_id)
sp = match_id.split('-')
_, prefix, taxid_str, bioproject, biosample, assembly_acc, plasmid, fullname = sp
taxid = int(taxid_str)
fullname = fullname.replace('.fna', '')
serovar = None
subsp = None
if 'Salmonella' in fullname:
if '_serovar_' in fullname:
serovar = fullname.split('_serovar_')[-1].split('_str.')[0]
if '_subsp._' in fullname:
subsp = fullname.split('_subsp._')[-1].split('_')[0]
return dict(match_id=match_id,
taxid=taxid,
biosample=_no_periods(biosample),
bioproject=_no_periods(bioproject),
assembly_accession=_no_periods(assembly_acc),
plasmid=_no_periods(plasmid),
serovar=serovar,
subspecies=subsp)
def mash_dist_output_to_dataframe(mash_out: str) -> pd.DataFrame:
"""Mash dist stdout to Pandas DataFrame
Args:
mash_out (str): Mash dist stdout
Returns:
(pd.DataFrame): Mash dist table ordered by ascending distance
"""
df = pd.read_table(StringIO(mash_out))
ncols = df.shape[1]
if ncols == 5:
df.columns = MASH_DIST_5_COLUMNS
df = df[MASH_DIST_4_COLUMNS]
if ncols == 4:
df.columns = MASH_DIST_4_COLUMNS
df.sort_values(by='distance', ascending=True, inplace=True)
match_ids = df.match_id
dfmatch = pd.DataFrame([parse_refseq_info(match_id=match_id) for match_id in match_ids])
return pd.merge(dfmatch, df, on='match_id')
def mash_screen_output_to_dataframe(mash_out: str) -> pd.DataFrame:
"""Mash screen stdout to Pandas DataFrame
Args:
mash_out: Mash screen stdout
Returns:
(pd.DataFrame): Mash screen output table ordered by `identity` and `median_multiplicity` columns in descending
order, or None if the Mash output is missing
"""
dfmerge = None
if len(mash_out) > 0:
df = pd.read_table(StringIO(mash_out))
ncols = df.shape[1]
df.columns = MASH_SCREEN_COLUMNS[:ncols]
df.sort_values(by=['identity', 'median_multiplicity'], ascending=[False, False], inplace=True)
match_ids = df.match_id
refseq_matches = [parse_refseq_info(match_id=match_id) for match_id in match_ids]
dfmatch = pd.DataFrame(refseq_matches)
dfmerge = pd.merge(dfmatch, df, on='match_id')
return dfmerge
```
#### File: refseq_masher/refseq_masher/utils.py
```python
import logging
import os
import re
from collections import defaultdict
from subprocess import Popen, PIPE
from typing import List, Tuple, Union, Optional, Any
import pandas as pd
from refseq_masher.const import REGEX_FASTA, REGEX_FASTQ
from .const import REGEX_FASTQ, REGEX_FASTA
NT_SUB = {x: y for x, y in zip('acgtrymkswhbvdnxACGTRYMKSWHBVDNX', 'tgcayrkmswdvbhnxTGCAYRKMSWDVBHNX')}
def run_command(cmdlist: List[str], stdin: Optional[Any] = None, stderr: Optional[Any] = PIPE) -> (int, str, str):
p = Popen(cmdlist,
stdout=PIPE,
stderr=stderr,
stdin=stdin)
stdout, stderr = p.communicate()
exit_code = p.returncode
if isinstance(stdout, bytes):
stdout = stdout.decode()
if isinstance(stderr, bytes):
stderr = stderr.decode()
return exit_code, stdout, stderr
def exc_exists(exc_name: str) -> bool:
"""Check if an executable exists
Args:
exc_name (str): Executable name or path (e.g. "blastn")
Returns:
bool: Does the executable exists in the user's $PATH?
"""
cmd = ['which', exc_name]
exit_code, stdout, stderr = run_command(cmd)
if exit_code == 0:
return True
else:
logging.warning('which exited with non-zero code {} with command "{}"'.format(exit_code, ' '.join(cmd)))
logging.warning(stderr)
return False
def sample_name_from_fasta_path(fasta_path: str) -> str:
"""Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
"""
filename = os.path.basename(fasta_path)
filename = re.sub(r'\.gz$', '', filename)
return re.sub(r'\.(fa|fas|fasta|fna|\w{1,})(\.gz)?$', '', filename)
def sample_name_from_fastq_paths(fastqs: List[str]) -> str:
"""Get the sample name from FASTQ file paths
Group FASTQs based on base filename without file extensions or expected FASTQ file delimiter (e.g. `_1`/`_2`)
Args:
fastqs: FASTQ paths
Returns:
(str): sample name
"""
grouped_fastqs = group_fastqs(fastqs)
for fastq_paths, sample_name in grouped_fastqs:
return sample_name
def group_fastqs(fastqs: List[str]) -> List[Tuple[List[str], str]]:
"""Group FASTQs based on common base filename
For example, if you have 2 FASTQs:
- reads_1.fastq
- reads_2.fastq
The common name would be `reads` and the files would be grouped based on that common name.
Args:
fastqs: FASTQ file paths
Returns:
list of grouped FASTQs grouped by common base filename
"""
genome_fastqs = defaultdict(list)
for fastq in fastqs:
filename = os.path.basename(fastq)
basefilename = re.sub(r'_\d', '', REGEX_FASTQ.sub(r'\1', filename))
genome_fastqs[basefilename].append(fastq)
return [(fastq_paths, sample_name) for sample_name, fastq_paths in genome_fastqs.items()]
def collect_fasta_from_dir(input_directory: str) -> List[Tuple[str, str]]:
fastas = []
for x in os.listdir(input_directory):
full_file_path = os.path.abspath(os.path.join(input_directory, x))
if os.path.isfile(full_file_path) and REGEX_FASTA.match(x):
sample_name = sample_name_from_fasta_path(full_file_path)
fastas.append((full_file_path, sample_name))
return fastas
def collect_fastq_from_dir(input_directory):
fastqs = []
for x in os.listdir(input_directory):
full_file_path = os.path.abspath(os.path.join(input_directory, x))
if os.path.isfile(full_file_path) and REGEX_FASTQ.match(x):
fastqs.append(full_file_path)
if len(fastqs) > 0:
logging.info('Found %s FASTQ files in %s',
len(fastqs),
input_directory)
reads_from_dir = group_fastqs(fastqs)
logging.info('Collected %s read sets from %s FASTQ files in %s',
len(reads_from_dir),
len(fastqs),
input_directory)
return reads_from_dir
return []
def collect_inputs(inputs: List[str]) -> Tuple[List[Tuple[str, str]], List[Tuple[List[str], str]]]:
"""Collect all input files for analysis
Sample names are derived from the base filename with no extensions.
Sequencing reads are paired if they share a common filename name without "_\d".
Filepaths for contigs and reads files are collected from an input directory if provided.
Args:
inputs: paths to FASTA/FASTQ files
Returns:
List of (contig filename, sample name)
List of ([reads filepaths], sample name)
"""
contigs = []
reads = []
fastas = [x for x in inputs if REGEX_FASTA.match(x)]
fastqs = [x for x in inputs if REGEX_FASTQ.match(x)]
dirs = [x for x in inputs if os.path.isdir(x)]
if len(fastas) > 0:
for fasta_path in fastas:
fasta_path = os.path.abspath(fasta_path)
if os.path.exists(fasta_path):
genome_name = sample_name_from_fasta_path(fasta_path)
contigs.append((fasta_path, genome_name))
else:
logging.error('Input fasta "%s" does not exist!', fasta_path)
if len(fastqs) > 0:
grouped_fastqs = group_fastqs(fastqs)
logging.info('Grouped %s fastqs into %s groups',
len(fastqs),
len(grouped_fastqs))
reads += grouped_fastqs
for d in dirs:
fasta_from_dir = collect_fasta_from_dir(d)
if len(fasta_from_dir) > 0:
logging.info('Collected %s FASTA from dir "%s"', len(fasta_from_dir), d)
contigs = contigs + fasta_from_dir
fastq_from_dir = collect_fastq_from_dir(d)
if len(fastq_from_dir) > 0:
logging.info('Collected %s FASTQ from dir "%s"', len(fastq_from_dir), d)
reads += fastq_from_dir
logging.info('Collected %s FASTA inputs and %s read sets', len(contigs), len(reads))
return contigs, reads
LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
def init_console_logger(logging_verbosity=3):
logging_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
if logging_verbosity > (len(logging_levels) - 1):
logging_verbosity = 3
lvl = logging_levels[logging_verbosity]
logging.basicConfig(format=LOG_FORMAT, level=lvl)
return lvl
def order_output_columns(dfout: pd.DataFrame, cols: List[str]) -> pd.DataFrame:
set_columns = set(dfout.columns)
present_columns = [x for x in cols if x in set_columns]
rest_columns = list(set_columns - set(present_columns))
return dfout[present_columns + rest_columns]
```
|
{
"source": "JeffreyThiessen/staramr",
"score": 3
}
|
#### File: blast/pointfinder/PointfinderDatabaseInfo.py
```python
import logging
from os import path
import pandas as pd
"""
A Class storing information about the specific PointFinder database.
"""
logger = logging.getLogger('PointfinderDatabaseInfo')
class PointfinderDatabaseInfo:
def __init__(self, database_info_dataframe, file=None):
"""
Creates a new PointfinderDatabaseInfo.
:param database_info_dataframe: A pd.DataFrame containing the information in PointFinder.
:param file: The file where the pointfinder database info originates from.
"""
self._pointfinder_info = database_info_dataframe
self._file = file
self._resistance_table_hacks(self._pointfinder_info)
@classmethod
def from_file(cls, file):
"""
Builds a new PointfinderDatabaseInfo from the passed file containing PointFinder information on drug resistance
mutations.
:param file: The file containing drug resistance mutations.
:return: A new PointfinderDatabaseInfo.
"""
pointfinder_info = pd.read_csv(file, sep='\t', index_col=False)
return cls(pointfinder_info, file)
@classmethod
def from_pandas_table(cls, database_info_dataframe):
"""
Builds a new PointfinderDatabaseInfo from the passed pd.DataFrame.
:param database_info_dataframe: A pd.DataFrame containing the information in PointFinder.
:return: A new PointfinderDatabaseInfo.
"""
return cls(database_info_dataframe)
def _resistance_table_hacks(self, table):
"""
A function implementing some hacks to try and fix mismatched strings in the pointfinder databases.
These should be removed when the underlying database is corrected.
:param table: The pointfinder resistance table to fix.
:return: None, but modifies the passed table in place.
"""
if self._file and 'salmonella' in str(self._file) and path.exists(
path.join(path.dirname(self._file), '16S_rrsD.fsa')):
logger.debug("Replacing [16S] with [16S_rrsD] for pointfinder organism [salmonella]")
table[['#Gene_ID']] = table[['#Gene_ID']].replace('16S', '16S_rrsD')
def _get_resistance_codon_match(self, gene, codon_mutation):
table = self._pointfinder_info
matches = table[(table['#Gene_ID'] == gene)
& (table['Codon_pos'] == codon_mutation.get_mutation_position())
& (table['Ref_codon'] == codon_mutation.get_database_amr_gene_mutation())
& (table['Res_codon'].str.contains(codon_mutation.get_input_genome_mutation(), regex=False))]
if len(matches.index) > 1:
raise Exception("Error, multiple matches for gene=" + str(gene) + ", codon_mutation=" + str(codon_mutation))
else:
return matches
def _get_resistance_nucleotide_match(self, gene, nucleotide_mutations):
return self._get_resistance_codon_match(gene, nucleotide_mutations)
def get_phenotype(self, gene, codon_mutation):
"""
Gets the phenotype for a given gene and codon mutation from PointFinder.
:param gene: The gene.
:param codon_mutation: The codon mutation.
:return: A string describing the phenotype.
"""
match = self._get_resistance_codon_match(gene, codon_mutation)
if len(match.index) > 0:
return match['Resistance'].iloc[0]
else:
raise Exception("Error, no match for gene=" + str(gene) + ", codon_mutation=" + str(codon_mutation))
def get_resistance_codons(self, gene, codon_mutations):
"""
Gets a list of resistance codons from the given gene and codon mutations.
:param gene: The gene.
:param codon_mutations: The codon mutations.
:return: The resistance codons.
"""
resistance_mutations = []
for codon_mutation in codon_mutations:
match = self._get_resistance_codon_match(gene, codon_mutation)
if len(match.index) > 0:
resistance_mutations.append(codon_mutation)
return resistance_mutations
def get_resistance_nucleotides(self, gene, nucleotide_mutations):
"""
Gets a list of resistance nucleotides from the given gene and nucleotide mutations.
:param gene: The gene.
:param nucleotide_mutations: The nucleotide mutations.
:return: The resistance nucleotides.
"""
resistance_mutations = []
for nucleotide_mutation in nucleotide_mutations:
match = self._get_resistance_nucleotide_match(gene, nucleotide_mutation)
if len(match.index) > 0:
resistance_mutations.append(nucleotide_mutation)
return resistance_mutations
```
#### File: blast/resfinder/ResfinderBlastDatabase.py
```python
import logging
from staramr.blast.AbstractBlastDatabase import AbstractBlastDatabase
logger = logging.getLogger('ResfinderBlastDatabase')
"""
A Class for pulling information from the ResFinder database.
"""
class ResfinderBlastDatabase(AbstractBlastDatabase):
def __init__(self, database_dir):
"""
Creates a new ResfinderBlastDatabase.
:param database_dir: The specific ResFinder database (drug class) directory.
"""
super().__init__(database_dir)
def get_name(self):
return 'resfinder'
```
#### File: blast/results/AMRHitHSP.py
```python
import abc
import os
import re
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from staramr.exceptions.InvalidPositionException import InvalidPositionException
"""
Class used to store/parse AMR BLAST hits/hsps.
"""
class AMRHitHSP:
def __init__(self, file, blast_record):
"""
Creates a new AMRHitHSP.
:param file: The particular file this BLAST hit came from.
:param blast_record: The Bio.Blast.Record this hit came from.
"""
__metaclass__ = abc.ABCMeta
self._file = file
if blast_record is not None:
self._blast_record = blast_record
if self.get_genome_contig_start() > self.get_genome_contig_end() and self.get_genome_contig_strand() != 'minus':
raise InvalidPositionException(
"contig start = {} > contig end = {} and strand is {}".format(self.get_genome_contig_start(),
self.get_genome_contig_end(),
self.get_genome_contig_strand()))
elif self.get_amr_gene_start() > self.get_amr_gene_end():
raise InvalidPositionException(
"amr gene start = {} > amr gene end = {}".format(self.get_amr_gene_start(),
self.get_amr_gene_end()))
def get_amr_gene_length(self):
"""
Gets the amr gene length.
:return: The amr gene length.
"""
return self._blast_record['qlen']
def get_hsp_length(self):
"""
Gets the BLAST HSP length.
:return: The BLAST HSP length.
"""
return self._blast_record['length']
def get_pid(self):
"""
Gets the percent identity of the HSP.
:return: The HSP percent identity.
"""
return self._blast_record['pident']
def get_plength(self):
"""
Gets the percent length of the HSP to the AMR gene.
:return: The percent length of the HSP to the AMR gene.
"""
return self._blast_record['plength']
def get_amr_gene_id(self):
"""
Gets the hit id.
:return: The hit id.
"""
return self._blast_record['qseqid']
@abc.abstractmethod
def get_amr_gene_name(self):
"""
Gets the gene name for the amr gene.
:return: The gene name.
"""
pass
def get_file(self):
"""
Gets the corresponding input file.
:return: The corresponding input file.
"""
return self._file
def get_genome_id(self):
"""
Gets genome id for the file.
:return: The genome id for the file
"""
return os.path.splitext(self._file)[0]
def get_genome_contig_id(self):
"""
Gets the particular id from the genome input file.
:return: The contig id.
"""
re_search = re.search(r'^(\S+)', self._blast_record['sseqid'])
return re_search.group(1)
def get_genome_contig_start(self) -> int:
"""
Gets the start of the HSP in the genome input file.
:return: The start of the HSP.
"""
return self._blast_record['sstart']
def get_genome_contig_end(self) -> int:
"""
Gets the end of the HSP in the genome input file.
:return: The end of the HSP.
"""
return self._blast_record['send']
def get_amr_gene_start(self):
"""
Gets the start of the hsp to the resistance gene.
:return: The start of the resistance gene hsp.
"""
return self._blast_record['qstart']
def get_amr_gene_end(self):
"""
Gets the end of the hsp to the resistance gene.
:return: The end of the resistance gene hsp.
"""
return self._blast_record['qend']
def get_amr_gene_seq(self):
"""
Gets the amr gene from the HSP.
:return: The amr gene (as a string) from the HSP.
"""
return self._blast_record['qseq']
def get_genome_contig_hsp_seq(self):
"""
Gets the genome sequence from the HSP.
:return: The genome sequence (as a string) from the HSP.
"""
return self._blast_record['sseq']
def get_genome_seq_in_amr_gene_strand(self):
"""
Gets the query sequence from the HSP.
:return: The query sequence (as a string) from the HSP.
"""
return self.get_genome_contig_hsp_seq()
def get_genome_contig_strand(self):
"""
Gets the genome contig strand for the BLAST hit.
:return: The genome contig strand for the BLAST hit.
"""
return self._blast_record['sstrand']
def get_seq_record(self):
"""
Gets a SeqRecord for this hit.
:return: A SeqRecord for this hit.
"""
return SeqRecord(Seq(self.get_genome_contig_hsp_seq()), id=self.get_amr_gene_id(),
description=(
'isolate: {}, contig: {}, contig_start: {}, contig_end: {}, database_gene_start: {},'
' database_gene_end: {}, hsp/length: {}/{}, pid: {:0.2f}%, plength: {:0.2f}%').format(
self.get_genome_id(),
self.get_genome_contig_id(),
self.get_genome_contig_start(),
self.get_genome_contig_end(),
self.get_amr_gene_start(),
self.get_amr_gene_end(),
self.get_hsp_length(),
self.get_amr_gene_length(),
self.get_pid(),
self.get_plength()))
```
#### File: blast/results/BlastResultsParser.py
```python
import abc
import logging
import os
from typing import List
import Bio.SeqIO
import numpy as np
import pandas as pd
from staramr.blast.JobHandler import JobHandler
from staramr.blast.results.BlastHitPartitions import BlastHitPartitions
logger = logging.getLogger('BlastResultsParser')
"""
Class for parsing BLAST results.
"""
class BlastResultsParser:
INDEX = 'Isolate ID' # type: str
COLUMNS = [] # type: List[str]
SORT_COLUMNS = [] # type: List[str]
BLAST_SORT_COLUMNS = [x.strip() for x in '''
plength
pident
sstart
'''.strip().split('\n')] # type: List[str]
def __init__(self, file_blast_map, blast_database, pid_threshold, plength_threshold, report_all=False,
output_dir=None, genes_to_exclude=[]):
"""
Creates a new class for parsing BLAST results.
:param file_blast_map: A map/dictionary linking input files to BLAST results files.
:param blast_database: The particular staramr.blast.AbstractBlastDatabase to use.
:param pid_threshold: A percent identity threshold for BLAST results.
:param plength_threshold: A percent length threshold for results.
:param report_all: Whether or not to report all blast hits.
:param output_dir: The directory where output files are being written.
:param genes_to_exclude: A list of gene IDs to exclude from the results.
"""
__metaclass__ = abc.ABCMeta
self._file_blast_map = file_blast_map
self._blast_database = blast_database
self._pid_threshold = pid_threshold
self._plength_threshold = plength_threshold
self._report_all = report_all
self._output_dir = output_dir
self._genes_to_exclude = genes_to_exclude
def parse_results(self):
"""
Parses the BLAST files passed to this particular object.
:return: A pd.DataFrame containing the AMR matches from BLAST.
"""
results = []
for file in self._file_blast_map:
databases = self._file_blast_map[file]
hit_seq_records = []
for database_name, blast_out in sorted(databases.items()):
logger.debug(str(blast_out))
if (not os.path.exists(blast_out)):
raise Exception("Blast output [" + blast_out + "] does not exist")
self._handle_blast_hit(file, database_name, blast_out, results, hit_seq_records)
if self._output_dir:
out_file = self._get_out_file_name(file)
if hit_seq_records:
logger.debug("Writting hits to %s", out_file)
Bio.SeqIO.write(hit_seq_records, out_file, 'fasta')
else:
logger.debug("No hits found, skipping writing output file to %s", out_file)
else:
logger.debug("No output directory defined for blast hits, skipping writing file")
return pd.DataFrame(results, columns=self.COLUMNS).sort_values(by=self.SORT_COLUMNS).set_index(self.INDEX)
@abc.abstractmethod
def _get_out_file_name(self, in_file):
"""
Gets hits output file name given input file.
:param in_file: The input file name.
:return: The output file name.
"""
pass
def _handle_blast_hit(self, in_file, database_name, blast_file, results, hit_seq_records):
blast_table = pd.read_csv(blast_file, sep='\t', header=None, names=JobHandler.BLAST_COLUMNS,
index_col=False).astype(
dtype={'qseqid': np.unicode_, 'sseqid': np.unicode_})
partitions = BlastHitPartitions()
blast_table['plength'] = (blast_table.length / blast_table.qlen) * 100.0
blast_table = blast_table[
(blast_table.pident >= self._pid_threshold) & (blast_table.plength >= self._plength_threshold) &
~blast_table.qseqid.isin(self._genes_to_exclude)]
blast_table.sort_values(by=self.BLAST_SORT_COLUMNS, inplace=True)
for index, blast_record in blast_table.iterrows():
partitions.append(self._create_hit(in_file, database_name, blast_record))
for hits_non_overlapping in partitions.get_hits_nonoverlapping_regions():
for hit in self._select_hits_to_include(hits_non_overlapping):
blast_results = self._get_result_rows(hit, database_name)
if blast_results is not None:
logger.debug("record = %s", blast_results)
results.extend(blast_results)
hit_seq_records.append(hit.get_seq_record())
def _select_hits_to_include(self, hits):
hits_to_include = []
if len(hits) >= 1:
sorted_hits_pid_first = sorted(hits, key=lambda x: (
x.get_pid(), x.get_plength(), x.get_amr_gene_length(), x.get_amr_gene_id()), reverse=True)
sorted_hits_length_first = sorted(hits, key=lambda x: (
x.get_amr_gene_length(), x.get_pid(), x.get_plength(), x.get_amr_gene_id()), reverse=True)
if self._report_all:
hits_to_include = sorted_hits_pid_first
else:
first_hit_pid = sorted_hits_pid_first[0]
first_hit_length = sorted_hits_length_first[0]
if first_hit_pid == first_hit_length:
hits_to_include.append(first_hit_length)
# if the top length hit is significantly longer, and the pid is not too much below the top pid hit (nor percent overlap too much below top pid hit), use the longer hit
elif (first_hit_length.get_amr_gene_length() - first_hit_pid.get_amr_gene_length()) > 10 and (
first_hit_length.get_pid() - first_hit_pid.get_pid()) > -1 and (
first_hit_length.get_plength() - first_hit_pid.get_plength()) > -1:
hits_to_include.append(first_hit_length)
# otherwise, prefer the top pid hit, even if it's shorter than the longest hit
else:
hits_to_include.append(first_hit_pid)
return hits_to_include
@abc.abstractmethod
def _create_hit(self, file, database_name, blast_record):
pass
@abc.abstractmethod
def _get_result_rows(self, hit, database_name):
pass
```
#### File: results/plasmidfinder/PlasmidfinderHitHSP.py
```python
import logging
import re
from typing import List
import pandas as pd
from staramr.blast.results.AMRHitHSP import AMRHitHSP
logger = logging.getLogger('PlasmidfinderHitHSP')
"""
A Class storing a PlasmidFinder-specific BLAST hit/HSP.
"""
class PlasmidfinderHitHSP(AMRHitHSP):
def __init__(self, file: str, blast_record: pd.Series) -> None:
"""
Builds a new PlasmidfinderHitHSP.
:param file: The input file.
:param blast_record: The Bio.Blast.Record this hit came from.
:param hit: The particular Bio.Blast.Record.Alignment.
:param hsp: The particular Bio.Blast.Record.HSP.
"""
super().__init__(file, blast_record)
logger.debug("record=%s", self._blast_record)
splitList = re.split('_', self.get_amr_gene_id())
re_search = list(filter(None, splitList)) # type: List[str]
if not re_search:
raise Exception("Could not split up seq name for [" + self.get_amr_gene_id() + "]")
length = len(re_search)
# Add empty string if gene variant is missing
if length == 2:
re_search.insert(1, '')
length += 1
if 3 <= length <= 5:
self._gene = re_search[0]
self._gene_variant = re_search[1]
if length == 3:
self._accession = re_search[2]
elif length == 4:
self._accession = re_search[3]
elif length == 5:
self._accession = re_search[3] + "_" + re_search[4]
else:
raise Exception("Length not valid for variable assignment, for [" + self.get_amr_gene_id() + "]")
else:
raise Exception("Index length not within range, for [" + self.get_amr_gene_id() + "]")
def get_amr_gene_name(self) -> str:
"""
Gets the gene name for the PlasmidFinder hit.
:return: The gene name.
"""
return self._gene
def get_amr_gene_name_with_variant(self) -> str:
"""
Gets the gene name + variant number for the PlasmidFinder hit.
:return: The gene name + variant number.
"""
return self.get_amr_gene_name() + '_' + self._gene_variant
def get_amr_gene_variant_accession(self) -> str:
"""
Gets the gene name + variant number + accession for the PlasmidFinder hit.
:return: The gene name + variant number + accession.
"""
return self._gene + '_' + self._gene_variant + '_' + self._accession
def get_amr_gene_variant(self) -> str:
"""
Gets the variant number for the PlasmidFinder hit.
:return: The variant number.
"""
return self._gene_variant
def get_amr_gene_accession(self) -> str:
"""
Gets the accession for the PlasmidFinder hit.
:return: The accession.
"""
return self._accession
```
#### File: results/pointfinder/BlastResultsParserPointfinder.py
```python
import logging
from os import path
from staramr.blast.results.BlastResultsParser import BlastResultsParser
from staramr.blast.results.pointfinder.PointfinderHitHSP import PointfinderHitHSP
from staramr.blast.results.pointfinder.nucleotide.PointfinderHitHSPRNA import PointfinderHitHSPRNA
logger = logging.getLogger('BlastResultsParserPointfinder')
"""
A Class for parsing BLAST results specific to PointFinder.
"""
class BlastResultsParserPointfinder(BlastResultsParser):
COLUMNS = [x.strip() for x in '''
Isolate ID
Gene
Type
Position
Mutation
%Identity
%Overlap
HSP Length/Total Length
Contig
Start
End
'''.strip().split('\n')]
SORT_COLUMNS = ['Isolate ID', 'Gene']
def __init__(self, file_blast_map, blast_database, pid_threshold, plength_threshold, report_all=False,
output_dir=None, genes_to_exclude=[]):
"""
Creates a new BlastResultsParserPointfinder.
:param file_blast_map: A map/dictionary linking input files to BLAST results files.
:param blast_database: The particular staramr.blast.AbstractBlastDatabase to use.
:param pid_threshold: A percent identity threshold for BLAST results.
:param plength_threshold: A percent length threshold for results.
:param report_all: Whether or not to report all blast hits.
:param output_dir: The directory where output files are being written.
:param genes_to_exclude: A list of gene IDs to exclude from the results.
"""
super().__init__(file_blast_map, blast_database, pid_threshold, plength_threshold, report_all,
output_dir=output_dir, genes_to_exclude=genes_to_exclude)
def _create_hit(self, file, database_name, blast_record):
logger.debug("database_name=%s", database_name)
if (database_name == '16S_rrsD') or (database_name == '23S'):
return PointfinderHitHSPRNA(file, blast_record)
else:
return PointfinderHitHSP(file, blast_record)
def _get_result(self, hit, db_mutation):
return [hit.get_genome_id(),
hit.get_amr_gene_id() + " (" + db_mutation.get_mutation_string_short() + ")",
db_mutation.get_type(),
db_mutation.get_mutation_position(),
db_mutation.get_mutation_string(),
hit.get_pid(),
hit.get_plength(),
str(hit.get_hsp_length()) + "/" + str(hit.get_amr_gene_length()),
hit.get_genome_contig_id(),
hit.get_genome_contig_start(),
hit.get_genome_contig_end()
]
def _get_result_rows(self, hit, database_name):
database_mutations = hit.get_mutations()
gene = hit.get_amr_gene_name()
for x in database_mutations:
logger.debug("database_mutations: position=%s, mutation=%s", x.get_mutation_position(),
x.get_mutation_string())
if (database_name == '16S_rrsD') or (database_name == '23S'):
database_resistance_mutations = self._blast_database.get_resistance_nucleotides(gene, database_mutations)
else:
database_resistance_mutations = self._blast_database.get_resistance_codons(gene, database_mutations)
logger.debug("database_resistance_mutations=%s", database_resistance_mutations)
if len(database_resistance_mutations) == 0:
logger.debug("No mutations for id=[%s], file=[%s]", hit.get_amr_gene_id(), hit.get_file())
else:
results = []
for db_mutation in database_resistance_mutations:
logger.debug("multiple resistance mutations for [%s]: mutations=[%s], file=[%s]",
hit.get_amr_gene_id(), database_resistance_mutations, hit.get_file())
results.append(self._get_result(hit, db_mutation))
return results
return None
def _get_out_file_name(self, in_file):
if self._output_dir:
return path.join(self._output_dir, 'pointfinder_' + path.basename(in_file))
else:
raise Exception("output_dir is None")
```
#### File: staramr/databases/BlastDatabaseRepositories.py
```python
import logging
import shutil
from collections import OrderedDict
from typing import Dict
from staramr.blast.AbstractBlastDatabase import AbstractBlastDatabase
from staramr.blast.plasmidfinder.PlasmidfinderBlastDatabase import PlasmidfinderBlastDatabase
from staramr.blast.pointfinder.PointfinderBlastDatabase import PointfinderBlastDatabase
from staramr.blast.resfinder.ResfinderBlastDatabase import ResfinderBlastDatabase
from staramr.databases.BlastDatabaseRepository import BlastDatabaseRepository, BlastDatabaseRepositoryStripGitDir
logger = logging.getLogger('BlastDatabaseRepositories')
"""
A Class used to handle interactions with blast database repository files.
"""
class BlastDatabaseRepositories:
def __init__(self, database_dir: str, is_dist: bool = False) -> None:
"""
Creates a new AMRDatabaseHandler.
:param database_dir: The root directory for the databases.
:param is_dist: Whether or not we are building distributable versions of the blast database repositories
(that is, should we strip out the .git directories).
"""
self._database_dir = database_dir
self._database_repositories = {} # type: Dict[str,BlastDatabaseRepository]
self._is_dist = is_dist
def register_database_repository(self, database_name: str, git_repository_url: str) -> None:
"""
Registers a new database repository.
:param database_name: The name of the database.
:param git_repository_url: The git repository url.
:param is_dist: True if this database should be interpreted as the distributable version (no .git directory).
:return: None
"""
database_repository = BlastDatabaseRepository(self._database_dir, database_name,
git_repository_url) # type: BlastDatabaseRepository
if self._is_dist:
database_repository = BlastDatabaseRepositoryStripGitDir(self._database_dir, database_name,
git_repository_url)
if database_name in self._database_repositories:
raise Exception("A database with name [{}] already exists", database_name)
else:
self._database_repositories[database_name] = database_repository
def build(self, commits: Dict[str, str] = None):
"""
Downloads and builds new databases.
:param commits: A map of {'database_name' : 'commit'} defining the particular commits to build.
:return: None
"""
for database_name in self._database_repositories:
commit = commits.get(database_name) if commits else None
self._database_repositories[database_name].build(commit)
def update(self, commits: Dict[str, str] = None):
"""
Updates an existing database to the latest revisions (or passed specific revisions).
:param commits: A map of {'database_name' : 'commit'} defining the particular commits to update to.
:return: None
"""
for database_name in self._database_repositories:
commit = commits.get(database_name) if commits else None
self._database_repositories[database_name].update(commit)
def remove(self):
"""
Removes the databases stored in this directory.
:return: None
"""
for name, repo in self._database_repositories.items():
repo.remove()
shutil.rmtree(self._database_dir)
def info(self) -> Dict[str, str]:
"""
Gets information on the ResFinder/PointFinder databases.
:return: Database information as a OrderedDict of key/value pairs.
"""
info = OrderedDict() # type: Dict[str,str]
for name, repo in self._database_repositories.items():
info.update(repo.info())
return info
def get_database_dir(self) -> str:
"""
Gets the root database dir.
:return: The root database dir.
"""
return self._database_dir
def get_repo_dir(self, name: str) -> str:
"""
Gets database repo directory for the given database name.
:param name: The database name.
:return: The database dir for the given database name.
"""
return self._database_repositories[name].get_git_dir()
def is_at_commits(self, commits: Dict[str, str]):
"""
Are the database repositories at the passed commits?
:param commits: A dict of the commits {'database_name': 'commit'}.
:return: True if the database repositories are at the passed commits (ignores repos not passed in dict). False otherwise.
"""
for name, repo in self._database_repositories.items():
if name in commits and not repo.is_at_commit(commits[name]):
return False
return True
def is_dist(self):
"""
Whether or not we are building distributable versions of the blast database repositories (that is, should we strip out the .git directories).
:return: True if is_dist, False otherwise.
"""
return self._is_dist
@classmethod
def create_default_repositories(cls, root_database_dir: str, is_dist: bool = False):
"""
Class method for creating a BlastDatabaseRepositories object configured with the default repositories.
:param database_dir: The root database directory.
:param is_dist: Whether or not we are building distributable versions of the blast database repositories
(that is, should we strip out the .git directories).
:return: The BlastDatabaseRepositories.
"""
repos = cls(root_database_dir, is_dist)
repos.register_database_repository('resfinder', 'https://bitbucket.org/genomicepidemiology/resfinder_db.git')
repos.register_database_repository('pointfinder',
'https://bitbucket.org/genomicepidemiology/pointfinder_db.git')
repos.register_database_repository('plasmidfinder',
'https://bitbucket.org/genomicepidemiology/plasmidfinder_db.git')
return repos
def build_blast_database(self, database_name: str, options: Dict[str, str] = {}) -> AbstractBlastDatabase:
"""
Builds a staramr.blast.AbstractBlastDatabase from the given parameters.
:param database_name: The name of the database to build.
:param options: Options for the particular database in the form of a map {'key': 'value'}
:return: A new staramr.blast.AbstractBlastDatabase.
"""
if database_name not in self._database_repositories:
raise Exception("database_name={} not registered", database_name)
if database_name == 'resfinder':
return ResfinderBlastDatabase(self.get_repo_dir(database_name))
elif database_name == 'pointfinder':
return PointfinderBlastDatabase(self.get_repo_dir(database_name), options['organism'])
elif database_name == 'plasmidfinder':
if options:
return PlasmidfinderBlastDatabase(self.get_repo_dir(database_name), options['database_type'])
else:
return PlasmidfinderBlastDatabase(self.get_repo_dir(database_name))
else:
raise Exception("Unknown database name [{}]", database_name)
```
#### File: staramr/detection/AMRDetection.py
```python
import copy
import logging
import os
import pandas as pd
from os import path
import re
from collections import Counter
from typing import List, Dict, Optional
from Bio import SeqIO
from pandas import DataFrame
from staramr.blast.plasmidfinder.PlasmidfinderBlastDatabase import PlasmidfinderBlastDatabase
from staramr.blast.pointfinder.PointfinderBlastDatabase import PointfinderBlastDatabase
from staramr.blast.resfinder.ResfinderBlastDatabase import ResfinderBlastDatabase
from staramr.blast.results.BlastResultsParser import BlastResultsParser
from staramr.blast.results.plasmidfinder.BlastResultsParserPlasmidfinder import BlastResultsParserPlasmidfinder
from staramr.blast.results.pointfinder.BlastResultsParserPointfinder import BlastResultsParserPointfinder
from staramr.blast.results.resfinder.BlastResultsParserResfinder import BlastResultsParserResfinder
from staramr.results.AMRDetectionSummary import AMRDetectionSummary
from staramr.results.QualityModule import QualityModule
logger = logging.getLogger("AMRDetection")
"""
A Class to handle scanning files for AMR genes.
"""
class AMRDetection:
def __init__(self, resfinder_database: ResfinderBlastDatabase, amr_detection_handler,
pointfinder_database: PointfinderBlastDatabase = None,
include_negative_results: bool = False, output_dir: str = None, genes_to_exclude: list = [],
plasmidfinder_database: PlasmidfinderBlastDatabase = None) -> None:
"""
Builds a new AMRDetection object.
:param resfinder_database: The staramr.blast.resfinder.ResfinderBlastDatabase for the particular ResFinder database.
:param amr_detection_handler: The staramr.blast.JobHandler to use for scheduling BLAST jobs.
:param pointfinder_database: The staramr.blast.pointfinder.PointfinderBlastDatabase to use for the particular PointFinder database.
:param plasmidfinder_database: The staramr.blast.plasmidfinder.PlasmidfinderBlastDatabase for the particular PlasmidFinder database.
:param include_negative_results: If True, include files lacking AMR genes in the resulting summary table.
:param output_dir: The directory where output fasta files are to be written into (None for no output fasta files).
:param genes_to_exclude: A list of gene IDs to exclude from the results.
"""
self._resfinder_database = resfinder_database
self._amr_detection_handler = amr_detection_handler
self._pointfinder_database = pointfinder_database
self._plasmidfinder_database = plasmidfinder_database
self._include_negative_results = include_negative_results
if pointfinder_database is None:
self._has_pointfinder = False
else:
self._has_pointfinder = True
self._output_dir = output_dir
self._genes_to_exclude = genes_to_exclude
def _create_amr_summary(self, files: List[str], resfinder_dataframe: DataFrame,quality_module_dataframe: DataFrame,
pointfinder_dataframe: Optional[BlastResultsParserPointfinder],
plasmidfinder_dataframe: DataFrame, mlst_dataframe: DataFrame) -> DataFrame:
amr_detection_summary = AMRDetectionSummary(files, resfinder_dataframe,quality_module_dataframe,
pointfinder_dataframe, plasmidfinder_dataframe, mlst_dataframe)
return amr_detection_summary.create_summary(self._include_negative_results)
def _create_detailed_amr_summary(self, files: List[str], resfinder_dataframe: DataFrame,quality_module_dataframe: DataFrame,
pointfinder_dataframe: Optional[BlastResultsParserPointfinder],
plasmidfinder_dataframe: DataFrame, mlst_dataframe: DataFrame) -> DataFrame:
amr_detection_summary = AMRDetectionSummary(files, resfinder_dataframe,quality_module_dataframe,
pointfinder_dataframe, plasmidfinder_dataframe, mlst_dataframe)
return amr_detection_summary.create_detailed_summary(self._include_negative_results)
def _create_resfinder_dataframe(self, resfinder_blast_map: Dict, pid_threshold: float, plength_threshold: int,
report_all: bool) -> DataFrame:
resfinder_parser = BlastResultsParserResfinder(resfinder_blast_map, self._resfinder_database, pid_threshold,
plength_threshold, report_all, output_dir=self._output_dir,
genes_to_exclude=self._genes_to_exclude)
return resfinder_parser.parse_results()
def _create_pointfinder_dataframe(self, pointfinder_blast_map: Dict, pid_threshold: float, plength_threshold: int,
report_all: bool) -> DataFrame:
pointfinder_parser = BlastResultsParserPointfinder(pointfinder_blast_map, self._pointfinder_database,
pid_threshold, plength_threshold, report_all,
output_dir=self._output_dir,
genes_to_exclude=self._genes_to_exclude)
return pointfinder_parser.parse_results()
def _create_plasmidfinder_dataframe(self, plasmidfinder_blast_map: Dict[str, BlastResultsParser],
pid_threshold: float, plength_threshold: int,
report_all: bool) -> DataFrame:
plasmidfinder_parser = BlastResultsParserPlasmidfinder(plasmidfinder_blast_map, self._plasmidfinder_database,
pid_threshold,
plength_threshold, report_all,
output_dir=self._output_dir,
genes_to_exclude=self._genes_to_exclude)
return plasmidfinder_parser.parse_results()
def create_quality_module_dataframe(self,files,genome_size_lower_bound,genome_size_upper_bound,minimum_N50_value,
minimum_contig_length,unacceptable_num_contigs) ->DataFrame:
quality_module = QualityModule(files,genome_size_lower_bound,genome_size_upper_bound,minimum_N50_value,
minimum_contig_length,unacceptable_num_contigs)
return quality_module.create_quality_module_dataframe()
def _generate_empty_columns(self, row: list, max_cols: int, cur_cols: int) -> list:
if(cur_cols < max_cols):
for i in range(max_cols-cur_cols):
row.append('-')
return row
def _create_mlst_dataframe(self, mlst_data: str) -> DataFrame:
columns = ['Isolate ID', 'Scheme', 'Sequence Type']
curr_data = []
max_columns = 0
extension = None
mlst_split = mlst_data.splitlines()
# Parse and format the current row
for row in mlst_split:
array_format = re.split('\t', row)
num_columns = len(array_format)
# We want the file name without the extension
array_format[0] = path.basename(path.splitext(array_format[0])[0])
if max_columns < num_columns:
max_columns = num_columns
curr_data.append(array_format)
# Go through each row and append additional columns for the dataframes
curr_data = list(map(lambda x: self._generate_empty_columns(x, max_columns, len(x)), curr_data))
# Append Locus Column names if any
locus_columns = max_columns - len(columns)
if locus_columns > 0:
for x in range(0, locus_columns):
columns.append(("Locus {}").format(x+1))
mlst_dataframe = pd.DataFrame(curr_data, columns=columns)
mlst_dataframe = mlst_dataframe.set_index('Isolate ID')
return mlst_dataframe
def run_amr_detection(self,files, pid_threshold, plength_threshold_resfinder, plength_threshold_pointfinder,
plength_threshold_plasmidfinder, genome_size_lower_bound,genome_size_upper_bound,
minimum_N50_value,minimum_contig_length,unacceptable_num_contigs,
report_all=False, ignore_invalid_files=False, mlst_scheme=None) -> None:
"""
Scans the passed files for AMR genes.
:param files: The files to scan.
:param pid_threshold: The percent identity threshold for BLAST results.
:param plength_threshold_resfinder: The percent length overlap for BLAST results (resfinder).
:param plength_threshold_pointfinder: The percent length overlap for BLAST results (pointfinder).
:param plength_threshold_plasmidfinder: The percent length overlap for BLAST results (plasmidfinder).
:param genome_size_lower_bound: The lower bound for the genome size as defined by the user for quality metrics
:param genome_size_upper_bound: The upper bound for the genome size as defined by the user for quality metrics
:param minimum_N50_value: The minimum N50 value as defined by the user for quality metrics
:param minimum_contig_length: The minimum contig length as defined by the user for quality metrics
:param unacceptable_num_contigs: The number of contigs in a file, equal to or above our minimum contig length, for which to raise a flag as defined by the user for quality metrics
:param report_all: Whether or not to report all blast hits.
:param ignore_invalid_files: Skips the invalid input files if set.
:param mlst_scheme: Specifys scheme name MLST uses if set.
:return: None
"""
files_copy = copy.deepcopy(files)
files = self._validate_files(files_copy, ignore_invalid_files)
self._quality_module_dataframe=self.create_quality_module_dataframe(files,genome_size_lower_bound,genome_size_upper_bound,minimum_N50_value,minimum_contig_length,unacceptable_num_contigs)
self._amr_detection_handler.run_blasts_mlst(files, mlst_scheme)
resfinder_blast_map = self._amr_detection_handler.get_resfinder_outputs()
self._resfinder_dataframe = self._create_resfinder_dataframe(resfinder_blast_map, pid_threshold,
plength_threshold_resfinder, report_all)
plasmidfinder_blast_map = self._amr_detection_handler.get_plasmidfinder_outputs()
self._plasmidfinder_dataframe = self._create_plasmidfinder_dataframe(plasmidfinder_blast_map, pid_threshold,
plength_threshold_plasmidfinder,
report_all)
mlst_data = self._amr_detection_handler.get_mlst_outputs()
self._mlst_dataframe = self._create_mlst_dataframe(mlst_data)
self._pointfinder_dataframe = None
if self._has_pointfinder:
pointfinder_blast_map = self._amr_detection_handler.get_pointfinder_outputs()
self._pointfinder_dataframe = self._create_pointfinder_dataframe(pointfinder_blast_map, pid_threshold,
plength_threshold_pointfinder, report_all)
self._summary_dataframe = self._create_amr_summary(files, self._resfinder_dataframe,self._quality_module_dataframe,
self._pointfinder_dataframe, self._plasmidfinder_dataframe, self._mlst_dataframe)
self._detailed_summary_dataframe = self._create_detailed_amr_summary(files, self._resfinder_dataframe,self._quality_module_dataframe,
self._pointfinder_dataframe,
self._plasmidfinder_dataframe,
self._mlst_dataframe)
def _validate_files(self, files: List[str], ignore_invalid_files: bool) -> List[str]:
total_files = len(files)
removeable_files = []
for file in files:
# Check if the file is not a directory
if os.path.isdir(file):
if ignore_invalid_files:
logger.warning('--ignore-invalid-files is set, skipping directory {}'.format(file))
removeable_files.append(file)
else:
raise Exception(
'Directory {} is invalid, please use --ignore-invalid-files to skip over this directory'.format(
file))
else:
# Will raise an error if the input returns an empty generator on non-FASTA files, returns a boolean
validInput = any(SeqIO.parse(file, "fasta"))
if not validInput:
if ignore_invalid_files:
logger.warning('--ignore-invalid-files is set, skipping file {}'.format(file))
removeable_files.append(file)
else:
raise Exception(
'File {} is invalid, please use --ignore-invalid-files to skip over invalid input files'.format(
file))
else:
# Check if there are any duplicate sequence id's in the valid files
record = []
# Store all the sequence id's in a list
for sequence in SeqIO.parse(file, "fasta"):
record.append(sequence.id)
duplicates = []
# Each sequence contains a tuple (sequence id, frequency)
for sequence in (Counter(record)).items():
if sequence[1] > 1:
# We want the sequence id's that are duplicates
duplicates.append(sequence[0])
# Raise an error if there's any duplicates in the file
if len(duplicates) > 0:
raise Exception(
'File {} contains the following duplicate sequence IDs: {}'.format(file, duplicates))
# Check to see if the invalid file is not the only file in the directory
if total_files == len(removeable_files):
raise Exception('Cannot produce output due to no valid input files')
# Remove the skipped files
if ignore_invalid_files:
for file in removeable_files:
files.remove(file)
return files
def get_mlst_results(self):
"""
Gets a pd.DataFrame for the MLST results.
:return: A pd.DataFrame for the MLST results.
"""
return self._mlst_dataframe
def get_resfinder_results(self):
"""
Gets a pd.DataFrame for the ResFinder results.
:return: A pd.DataFrame for the ResFinder results.
"""
return self._resfinder_dataframe
def get_pointfinder_results(self):
"""
Gets a pd.DataFrame for the PointFinder results.
:return: A pd.DataFrame for the PointFinder results.
"""
return self._pointfinder_dataframe
def get_plasmidfinder_results(self):
"""
Gets a pd.DataFrame for the PlasmidFinder results.
:return: A pd.DataFrame for the PlasmidFinder results.
"""
self._plasmidfinder_dataframe = self._plasmidfinder_dataframe.rename({'Gene':'Plasmid'}, axis=1)
return self._plasmidfinder_dataframe
def get_summary_results(self):
"""
Gets a pd.DataFrame for a summary table of the results.
:return: A pd.DataFrame for a summary table of the results.
"""
return self._summary_dataframe
def get_detailed_summary_results(self):
"""
Gets a pd.DataFrame for a detailed summary table of the results.
:return: A pd.DataFrame for a detailed summary table of the results.
"""
self._detailed_summary_dataframe = self._detailed_summary_dataframe.rename({'Gene':'Data'}, axis=1)
return self._detailed_summary_dataframe
```
#### File: staramr/detection/AMRDetectionResistance.py
```python
from typing import List, Optional
from pandas import DataFrame
from staramr.blast.results.plasmidfinder.BlastResultsParserPlasmidfinderResistance import \
BlastResultsParserPlasmidfinderResistance
from staramr.blast.results.pointfinder.BlastResultsParserPointfinder import BlastResultsParserPointfinder
from staramr.blast.results.pointfinder.BlastResultsParserPointfinderResistance import \
BlastResultsParserPointfinderResistance
from staramr.blast.results.resfinder.BlastResultsParserResfinderResistance import BlastResultsParserResfinderResistance
from staramr.detection.AMRDetection import AMRDetection
from staramr.results.AMRDetectionSummaryResistance import AMRDetectionSummaryResistance
"""
A Class to handle scanning files for AMR genes and also include pheneotypes/resistances in results.
"""
class AMRDetectionResistance(AMRDetection):
def __init__(self, resfinder_database, arg_drug_table_resfinder, amr_detection_handler, arg_drug_table_pointfinder,
pointfinder_database=None, include_negative_results=False, output_dir=None, genes_to_exclude=[],
plasmidfinder_database=None):
"""
Builds a new AMRDetectionResistance.
:param resfinder_database: The staramr.blast.resfinder.ResfinderBlastDatabase for the particular ResFinder database.
:param arg_drug_table_resfinder: The staramr.databases.resistance.ARGDrugTable for searching for resfinder resistances.
:param amr_detection_handler: The staramr.blast.JobHandler to use for scheduling BLAST jobs.
:param arg_drug_table_pointfinder: The staramr.databases.resistance.ARGDrugTable for searching for pointfinder resistances.
:param pointfinder_database: The staramr.blast.pointfinder.PointfinderBlastDatabase to use for the particular PointFinder database.
:param include_negative_results: If True, include files lacking AMR genes in the resulting summary table.
:param output_dir: The directory where output fasta files are to be written into (None for no output fasta files).
:param genes_to_exclude: A list of gene IDs to exclude from the results.
"""
super().__init__(resfinder_database, amr_detection_handler, pointfinder_database, include_negative_results,
output_dir=output_dir, genes_to_exclude=genes_to_exclude,
plasmidfinder_database=plasmidfinder_database)
self._arg_drug_table_resfinder = arg_drug_table_resfinder
self._arg_drug_table_pointfinder = arg_drug_table_pointfinder
def _create_resfinder_dataframe(self, resfinder_blast_map, pid_threshold, plength_threshold, report_all):
resfinder_parser = BlastResultsParserResfinderResistance(resfinder_blast_map, self._arg_drug_table_resfinder,
self._resfinder_database, pid_threshold,
plength_threshold, report_all,
output_dir=self._output_dir,
genes_to_exclude=self._genes_to_exclude)
return resfinder_parser.parse_results()
def _create_pointfinder_dataframe(self, pointfinder_blast_map, pid_threshold, plength_threshold, report_all):
pointfinder_parser = BlastResultsParserPointfinderResistance(pointfinder_blast_map,
self._arg_drug_table_pointfinder,
self._pointfinder_database,
pid_threshold, plength_threshold, report_all,
output_dir=self._output_dir,
genes_to_exclude=self._genes_to_exclude)
return pointfinder_parser.parse_results()
def _create_plasmidfinder_dataframe(self, plasmidfinder_blast_map, pid_threshold, plength_threshold, report_all):
plasmidfinder_parser = BlastResultsParserPlasmidfinderResistance(plasmidfinder_blast_map,
self._plasmidfinder_database,
pid_threshold,
plength_threshold, report_all,
output_dir=self._output_dir,
genes_to_exclude=self._genes_to_exclude)
return plasmidfinder_parser.parse_results()
def _create_amr_summary(self, files, resfinder_dataframe, quality_module_dataframe,pointfinder_dataframe, plasmidfinder_dataframe, mlst_dataframe):
amr_detection_summary = AMRDetectionSummaryResistance(files, resfinder_dataframe,quality_module_dataframe,pointfinder_dataframe,
plasmidfinder_dataframe, mlst_dataframe)
return amr_detection_summary.create_summary(self._include_negative_results)
def _create_detailed_amr_summary(self, files: List[str], resfinder_dataframe: DataFrame, quality_module_dataframe: DataFrame,
pointfinder_dataframe: Optional[BlastResultsParserPointfinder],
plasmidfinder_dataframe: DataFrame, mlst_dataframe) -> DataFrame:
amr_detection_summary = AMRDetectionSummaryResistance(files, resfinder_dataframe,quality_module_dataframe,
pointfinder_dataframe, plasmidfinder_dataframe, mlst_dataframe)
return amr_detection_summary.create_detailed_summary(self._include_negative_results)
```
#### File: staramr/exceptions/DatabaseNotFoundException.py
```python
class DatabaseNotFoundException(Exception):
def __init__(self, msg):
"""
Constructs a new DatabaseNotFoundException
:param msg: The Exception message.
"""
super().__init__(msg)
```
#### File: integration/detection/test_AMRDetectionMLST.py
```python
import logging
import tempfile
import unittest
from os import path
from Bio import SeqIO
from staramr.blast.JobHandler import JobHandler
from staramr.blast.plasmidfinder.PlasmidfinderBlastDatabase import PlasmidfinderBlastDatabase
from staramr.blast.resfinder.ResfinderBlastDatabase import ResfinderBlastDatabase
from staramr.databases.AMRDatabasesManager import AMRDatabasesManager
from staramr.databases.resistance.resfinder.ARGDrugTableResfinder import ARGDrugTableResfinder
from staramr.databases.resistance.pointfinder.ARGDrugTablePointfinder import ARGDrugTablePointfinder
from staramr.detection.AMRDetectionResistance import AMRDetectionResistance
logger = logging.getLogger('AMRDetectionMLST')
class AMRDetectionMLST(unittest.TestCase):
def setUp(self):
blast_databases_repositories = AMRDatabasesManager.create_default_manager().get_database_repos()
self.resfinder_dir = blast_databases_repositories.get_repo_dir(
'resfinder')
self.pointfinder_dir = blast_databases_repositories.get_repo_dir(
'pointfinder')
self.plasmidfinder_dir = blast_databases_repositories.get_repo_dir(
'plasmidfinder')
self.resfinder_database = ResfinderBlastDatabase(self.resfinder_dir)
self.resfinder_drug_table = ARGDrugTableResfinder()
self.pointfinder_drug_table = ARGDrugTablePointfinder()
self.plasmidfinder_database = PlasmidfinderBlastDatabase(
self.plasmidfinder_dir)
self.pointfinder_database = None
self.blast_out = tempfile.TemporaryDirectory()
self.blast_handler = JobHandler(
{'resfinder': self.resfinder_database, 'pointfinder': self.pointfinder_database,
'plasmidfinder': self.plasmidfinder_database}, 2, self.blast_out.name)
self.outdir = tempfile.TemporaryDirectory()
self.amr_detection = AMRDetectionResistance(self.resfinder_database, self.resfinder_drug_table,
self.blast_handler, self.pointfinder_drug_table,
self.pointfinder_database, output_dir=self.outdir.name)
self.test_data_dir = path.join(path.dirname(__file__), '..', 'data')
def tearDown(self):
self.blast_out.cleanup()
self.outdir.cleanup()
def testMLSTResults(self):
file = path.join(self.test_data_dir, "test-mlst-summary.fsa")
files = [file]
self.amr_detection.run_amr_detection(files, 99, 90, 90, 90,0,0,0,0,0)
mlst_results = self.amr_detection.get_mlst_results()
self.assertEqual(len(mlst_results.index), 1, 'Wrong number of results detected')
self.assertEqual(len(mlst_results.columns), 9, 'Wrong number of columns detected')
self.assertEqual(mlst_results['Scheme'].iloc[0], 'senterica', msg='Wrong Scheme')
self.assertEqual(mlst_results['Sequence Type'].iloc[0], '1', msg='Wrong Sequence Type')
self.assertEqual(mlst_results['Locus 1'].iloc[0], 'aroC(1)', msg='Wrong Locus 1 Result')
self.assertEqual(mlst_results['Locus 2'].iloc[0], 'dnaN(1)', msg='Wrong Locus 2 Result')
self.assertEqual(mlst_results['Locus 3'].iloc[0], 'hemD(1)', msg='Wrong Locus 3 Result')
self.assertEqual(mlst_results['Locus 4'].iloc[0], 'hisD(1)', msg='Wrong Locus 4 Result')
self.assertEqual(mlst_results['Locus 5'].iloc[0], 'purE(1)', msg='Wrong Locus 5 Result')
self.assertEqual(mlst_results['Locus 6'].iloc[0], 'sucA(1)', msg='Wrong Locus 6 Result')
self.assertEqual(mlst_results['Locus 7'].iloc[0], 'thrA(5)', msg='Wrong Locus 7 Result')
def testNoMLSTResults(self):
file = path.join(self.test_data_dir, "gyrA-S97N.fsa")
files = [file]
self.amr_detection.run_amr_detection(files, 99, 90, 90, 90,0,0,0,0,0)
mlst_results = self.amr_detection.get_mlst_results()
self.assertEqual(len(mlst_results.index), 1, 'Wrong number of results detected')
self.assertEqual(len(mlst_results.columns), 2, 'Wrong number of columns detected')
self.assertEqual(mlst_results['Scheme'].iloc[0], '-', msg='Scheme is found, expected none')
self.assertEqual(mlst_results['Sequence Type'].iloc[0], '-', msg='Sequence Type is found, expected none')
```
#### File: pointfinder/codon/test_CodonMutationPosition.py
```python
import unittest
from staramr.blast.results.pointfinder.codon.CodonMutationPosition import CodonMutationPosition
class CodonMutationPositionTest(unittest.TestCase):
def testMutationPositionStartCodon1(self):
mutation_position = 0
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "TTCGATCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'TTC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'F', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1F', 'Incorrect string')
def testMutationPositionMiddleCodon1(self):
mutation_position = 1
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "AGCGATCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 2, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'AGC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'S', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1S', 'Incorrect string')
def testMutationPositionEndCodon1(self):
mutation_position = 2
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "ATGGATCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 3, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATG', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'M', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1M', 'Incorrect string')
def testMutationPositionStartCodon2(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "ATCAATCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'GAT', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'AAT', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'D', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'N', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'D2N', 'Incorrect string')
def testMutationPositionEndCodon2(self):
mutation_position = 5
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "ATCGACCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 6, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'GAT', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'GAC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'D', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'D', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'D2D', 'Incorrect string')
def testMutationPositionStartCodon3(self):
mutation_position = 6
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "ATCGATGGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 7, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 3, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 3, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'CGA', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'GGA', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'R', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'G', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'R3G', 'Incorrect string')
def testMutationPositionStartCodon1StartMethionine(self):
mutation_position = 0
# @formatter:off
database_amr_gene_string = "ATCGATCGA"
input_genome_string = "ATGGATCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 1, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATG', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'M', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1M', 'Incorrect string')
def testMutationPositionStartCodon1Stop(self):
mutation_position = 2
# @formatter:off
database_amr_gene_string = "TACGATCGA"
input_genome_string = "TAAGATCGA"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 3, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'TAC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'TAA', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'Y', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), '*', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'Y1*', 'Incorrect string')
def testMutationPositionGapStart(self):
mutation_position = 0
# @formatter:off
database_amr_gene_string = "ATCG"
input_genome_string = "-TCG"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 1, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), '-TC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1del', 'Incorrect string')
def testMutationPositionGapMiddle(self):
mutation_position = 1
# @formatter:off
database_amr_gene_string = "ATCG"
input_genome_string = "A-CG"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 2, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'A-C', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1del', 'Incorrect string')
def testMutationPositionGapEnd(self):
mutation_position = 2
# @formatter:off
database_amr_gene_string = "ATCG"
input_genome_string = "AT-G"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 3, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'AT-', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1del', 'Incorrect string')
def testMutationPositionGapMiddleEnd(self):
mutation_position = 2
# @formatter:off
database_amr_gene_string = "ATCGG"
input_genome_string = "AT--G"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 3, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'AT-', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I1del', 'Incorrect string')
def testMutationPositionGapStartMiddleEnd(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CCCATCGAC"
input_genome_string = "CCC---GAC"
# @formatter:on
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), '---', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I2del', 'Incorrect string')
def testMutationPositionGapPreviousCodon(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CCCATCGACT"
input_genome_string = "CC----GACT"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), '---', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I2del', 'Incorrect string')
def testMutationPositionGapLargerPreviousCodon(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CCCATCGACTT"
input_genome_string = "C-----GACTT"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), '---', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I2del', 'Incorrect string')
def testMutationPositionGapBefore(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CCCATCGAC"
input_genome_string = "-CCA--GAC"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'A--', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I2del', 'Incorrect string')
def testMutationPositionGapBeforeAfter(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CCCATCGACT"
input_genome_string = "-CCA--GA-T"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'ATC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'A--', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'I', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'del', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'I2del', 'Incorrect string')
def testMutationPositionGapReferenceStart(self):
mutation_position = 0
# @formatter:off
database_amr_gene_string = "-TCG"
input_genome_string = "ATCG"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 1, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), '-TC', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins1I', 'Incorrect string')
def testMutationPositionGapReferenceMiddle(self):
mutation_position = 1
# @formatter:off
database_amr_gene_string = "A-CG"
input_genome_string = "ATCG"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 2, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'A-C', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins1I', 'Incorrect string')
def testMutationPositionGapReferenceEnd(self):
mutation_position = 2
# @formatter:off
database_amr_gene_string = "AT-G"
input_genome_string = "ATCG"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 3, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 1, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 1, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'AT-', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins1I', 'Incorrect string')
def testMutationPositionGapReferenceStartMiddleEnd(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CCC---GAC"
input_genome_string = "CCCATCGAC"
# @formatter:on
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), '---', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins2I', 'Incorrect string')
def testMutationPositionGapReferencePreviousCodon(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "CC----GACT"
input_genome_string = "CCCATCGACT"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), '---', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins2I', 'Incorrect string')
def testMutationPositionGapReferenceLargerPreviousCodon(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "C-----GACTT"
input_genome_string = "CCCATCGACTT"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), '---', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins2I', 'Incorrect string')
def testMutationPositionGapReferenceBefore(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "-CCA--GAC"
input_genome_string = "CCCATCGAC"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'A--', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins2I', 'Incorrect string')
def testMutationPositionGapReferenceBeforeAfter(self):
mutation_position = 3
# @formatter:off
database_amr_gene_string = "-CCA--GA-T"
input_genome_string = "CCCATCGACT"
# @formatter:on
amr_gene_start = 1
mutation = CodonMutationPosition(mutation_position, database_amr_gene_string, input_genome_string,
amr_gene_start)
self.assertEqual(mutation.get_nucleotide_position(), 4, 'Incorrect nucleotide position')
self.assertEqual(mutation.get_codon_start(), 2, 'Incorrect codon start')
self.assertEqual(mutation.get_mutation_position(), 2, 'Incorrect mutation start')
self.assertEqual(mutation.get_database_amr_gene_codon(), 'A--', 'Incorrect database codon')
self.assertEqual(mutation.get_input_genome_codon(), 'ATC', 'Incorrect query codon')
self.assertEqual(mutation.get_database_amr_gene_mutation(), 'ins', 'Incorrect database amino acid')
self.assertEqual(mutation.get_input_genome_mutation(), 'I', 'Incorrect query amino acid')
self.assertEqual(mutation.get_mutation_string_short(), 'ins2I', 'Incorrect string')
```
#### File: results/pointfinder/test_PointfinderHitHSP.py
```python
import unittest
import pandas as pd
from staramr.blast.results.pointfinder.PointfinderHitHSP import PointfinderHitHSP
from staramr.exceptions.InvalidPositionException import InvalidPositionException
class PointfinderHitHSPTest(unittest.TestCase):
def testBuildPointfinderHitHSPSuccess(self):
blast_record = pd.Series({'sstart': 1, 'send': 10, 'qstart': 1, 'qend': 10, 'sstrand': 'plus'})
PointfinderHitHSP(file=None, blast_record=blast_record)
def testBuildPointfinderHitHSPFailInvalidSubjectCoords(self):
blast_record = pd.Series({'sstart': 10, 'send': 1, 'qstart': 1, 'qend': 10, 'sstrand': 'plus'})
self.assertRaises(InvalidPositionException, PointfinderHitHSP, None, blast_record)
def testBuildPointfinderHitHSPInvalidQueryCoords(self):
blast_record = pd.Series({'sstart': 1, 'send': 10, 'qstart': 10, 'qend': 1, 'sstrand': 'plus'})
self.assertRaises(InvalidPositionException, PointfinderHitHSP, None, blast_record)
```
|
{
"source": "JeffreyTsang/Brickbreaker",
"score": 3
}
|
#### File: JeffreyTsang/Brickbreaker/colormodel.py
```python
import colorsys
# To handle round off error
_epsilon = 1e-13
class RGB(object):
"""An instance is a RGB color value."""
# MUTABLE ATTRIBUTES
@property
def red(self):
"""The red channel.
**Invariant**: Value must be an int between 0 and 255, inclusive."""
return self._red
@red.setter
def red(self, value):
assert (type(value) == int), "value %s is not an int" % `value`
assert (value >= 0 and value <= 255), "value %s is outside of range [0,255]" % `value`
self._red = value
@red.deleter
def red(self):
del self._red
@property
def green(self):
"""The green channel.
**Invariant**: Value must be an int between 0 and 255, inclusive."""
return self._green
@green.setter
def green(self, value):
assert (type(value) == int), "value %s is not an int" % `value`
assert (value >= 0 and value <= 255), "value %s is outside of range [0,255]" % `value`
self._green = value
@green.deleter
def green(self):
del self._green
@property
def blue(self):
"""The blue channel.
**Invariant**: Value must be an int between 0 and 255, inclusive."""
return self._blue
@blue.setter
def blue(self, value):
assert (type(value) == int), "value %s is not an int" % `value`
assert (value >= 0 and value <= 255), "value %s is outside of range [0,255]" % `value`
self._blue = value
@blue.deleter
def blue(self):
del self._blue
@property
def alpha(self):
"""The alpha channel.
Used for transparency effects (but not in this course).
**Invariant**: Value must be an int between 0 and 255, inclusive."""
return self._alpha
@alpha.setter
def alpha(self, value):
assert (type(value) == int), "value %s is not an int" % `value`
assert (value >= 0 and value <= 255), "value %s is outside of range [0,255]" % `value`
self._alpha = value
@alpha.deleter
def alpha(self):
del self._alpha
# BUILT-IN METHODS
def __init__(self, r, g, b, a=255):
"""**Constructor**: creates a new RGB value (r,g,b,a).
:param r: initial red value
**Precondition**: int between 0 and 255, inclusive.
:param g: initial green value
**Precondition**: int between 0 and 255, inclusive.
:param b: initial blue value
**Precondition**: int between 0 and 255, inclusive.
:param a: initial alpha value (default 255)
**Precondition**: int between 0 and 255, inclusive.
The alpha channel is 255 by default, unless otherwise specified."""
self.red = r
self.green = g
self.blue = b
self.alpha = a
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent RGB colors. """
return (type(other) == RGB and self.red == other.red and
self.green == other.green and self.blue == other.blue and
self.alpha == other.alpha)
def __ne__(self, other):
"""**Returns**: True if self and other are not equivalent RGB colors. """
return (type(other) != RGB or self.red != other.red or
self.green != other.green or self.blue != other.blue or
self.alpha != other.alpha)
def __str__(self):
"""**Returns**: A readable string representation of this color. """
return "("+str(self.red)+","+str(self.green)+","+str(self.blue)+","+str(self.alpha)+")"
def __repr__(self):
"""**Returns**: An unambiguous String representation of this color. """
return "(red="+str(self.red)+",green="+str(self.green)+",blue="+str(self.blue)+",alpha="+str(self.alpha)+")"
# PUBLIC METHODS
def glColor(self):
"""**Returns**: a 4 element list of the attributes in the range 0 to 1
This is a conversion of this object into a format that can be used in
openGL graphics"""
return [self.red/255.0, self.green/255.0, self.blue/255.0, self.alpha/255.0]
def tkColor(self):
"""**Returns**: a 3 element tuple of the attributes in the range 0 to 1
This is a conversion of this object into a format that can be used by
Tkinter (e.g. the drawing turtle)."""
return (self.red/255.0, self.green/255.0, self.blue/255.0)
# CLASS METHODS FOR TKinter SUPPORT
@classmethod
def CreateName(cls,name):
"""Creates a new RGB object with the given color name.
Color name conversion is handled by the standard RGB color space.
:param name: the color name
**Precondition**: name is a valid tkinter color name
"""
assert name in _TK_COLOR_MAP, "parameter %s is not a valid color name" % `name`
return cls.CreateWebColor(_TK_COLOR_MAP[name])
@classmethod
def CreateWebColor(cls,color):
"""Creates a new RGB object from the given web color string.
A web color string is a 6-digit hexadecimal string starting with a
hashtag (#). It does not include an alpha value.
:param color: the web color
**Precondition**: color is a valid web color string
"""
assert type(color) == str, "paramter %s is not a string" % `color`
assert color[0] == '#' and len(color) == 7, "paramter % is not a valid web color" % `color`
try:
red = int(color[1:3],16)
except:
assert false, "red value %s is out of range" % `color[1:3]`
try:
green = int(color[3:5],16)
except:
assert false, "green value %s is out of range" % `color[3:5]`
try:
blue = int(color[5:7],16)
except:
assert false, "green value %s is out of range" % `color[5:7]`
return cls(red,green,blue)
class CMYK(object):
"""An instance is a CMYK color value."""
# MUTABLE ATTRIBUTES
@property
def cyan(self):
"""The cyan channel.
**Invariant**: Value must be a float between 0.0 and 100.0, inclusive."""
return self._cyan
@cyan.setter
def cyan(self, value):
assert (type(value) == int or type(value) == float), "value %s is not a number" % `value`
if (value > 100.0):
value = min(value,100.0) if value < 100.0+_epsilon else value
if (value < 0.0):
value = max(value,0.0) if value > -_epsilon else value
assert (value >= 0.0 and value <= 100.0), "value %s is outside of range [0.0,100.0]" % `value`
self._cyan = float(value)
@cyan.deleter
def cyan(self):
del self._cyan
@property
def magenta(self):
"""The magenta channel.
**Invariant**: Value must be a float between 0.0 and 100.0, inclusive."""
return self._magenta
@magenta.setter
def magenta(self, value):
assert (type(value) == int or type(value) == float), "value %s is not a number" % `value`
if (value > 100.0):
value = min(value,100.0) if value < 100.0+_epsilon else value
if (value < 0.0):
value = max(value,0.0) if value > -_epsilon else value
assert (value >= 0.0 and value <= 100.0), "value %s is outside of range [0.0,100.0]" % `value`
self._magenta = float(value)
@magenta.deleter
def magenta(self):
del self._magenta
@property
def yellow(self):
"""The yellow channel.
**Invariant**: Value must be a float between 0.0 and 100.0, inclusive."""
return self._yellow
@yellow.setter
def yellow(self, value):
assert (type(value) == int or type(value) == float), "value %s is not a number" % `value`
if (value > 100.0):
value = min(value,100.0) if value < 100.0+_epsilon else value
if (value < 0.0):
value = max(value,0.0) if value > -_epsilon else value
assert (value >= 0.0 and value <= 100.0), "value %s is outside of range [0.0,100.0]" % `value`
self._yellow = float(value)
@yellow.deleter
def yellow(self):
del self._yellow
@property
def black(self):
"""The black channel.
**Invariant**: Value must be a float between 0.0 and 100.0, inclusive."""
return self._black
@black.setter
def black(self, value):
assert (type(value) == int or type(value) == float), "value %s is not a number" % `value`
if (value > 100.0):
value = min(value,100.0) if value < 100.0+_epsilon else value
if (value < 0.0):
value = max(value,0.0) if value > -_epsilon else value
assert (value >= 0 and value <= 255), "value %s is outside of range [0,255]" % `value`
self._black = float(value)
@black.deleter
def black(self):
del self._black
# BUILT-IN METHODS
def __init__(self, c, m, y, k):
"""**Constructor**: creates a new CMYK color (c,m,y,k).
:param c: initial cyan value
**Precondition**: float between 0.0 and 100.0, inclusive.
:param m: initial magenta value
**Precondition**: float between 0.0 and 100.0, inclusive.
:param y: initial yellow value
**Precondition**: float between 0.0 and 100.0, inclusive.
:param k: initial black value
**Precondition**: float between 0.0 and 100.0, inclusive.
No arguments are optional."""
self.cyan = c
self.magenta = m
self.yellow = y
self.black = k
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent CMYK colors. """
return (type(other) == CMYK and self.cyan == other.cyan and
self.magenta == other.magenta and self.yellow == other.yellow and
self.black == other.black)
def __ne__(self, other):
"""**Returns**: True if self and other are not equivalent CMYK colors. """
return (type(other) != CMYK or self.cyan != other.cyan or
self.magenta != other.magenta or self.yellow != other.yellow or
self.black != other.black)
def __str__(self):
"""**Returns**: A readable String representation of this color. """
return "("+str(self.cyan)+","+str(self.magenta)+","+str(self.yellow)+","+str(self.black)+")"
def __repr__(self):
"""**Returns**: An unambiguous String representation of this color. """
return "(cyan="+str(self.cyan)+",magenta="+str(self.magenta)+",yellow="+str(self.yellow)+",black="+str(self.black)+")"
class HSV(object):
"""An instance is a HSV color value."""
# MUTABLE ATTRIBUTES
@property
def hue(self):
"""The hue channel.
**Invariant**: Value must be a float between 0.0 and 360.0, not including 360.0."""
return self._hue
@hue.setter
def hue(self, value):
assert (type(value) == int or type(value) == float), "value %s is not a number" % `value`
if (value < 0.0):
value = max(value,0.0) if value > -_epsilon else value
assert (value >= 0.0 and value < 360.0), "value %s is outside of range [0.0,360.0)" % `value`
self._hue = float(value)
@hue.deleter
def hue(self):
del self._hue
@property
def saturation(self):
"""The staturation channel.
**Invariant**: Value must be a float between 0.0 and 1.0, inclusive."""
return self._saturation
@saturation.setter
def saturation(self, value):
assert (type(value) == int or type(value) == float), "value %s is not a number" % `value`
if (value > 1.0):
value = min(value,1.0) if value < 1.0+_epsilon else value
if (value < 0.0):
value = max(value,0.0) if value > -_epsilon else value
assert (value >= 0.0 and value <= 1.0), "value %s is outside of range [0.0,1.0]" % `value`
self._saturation = float(value)
@saturation.deleter
def saturation(self):
del self._saturation
@property
def value(self):
"""The value channel.
**Invariant**: Value must be a float between 0.0 and 1.0, inclusive."""
return self._value
@value.setter
def value(self, val):
assert (type(val) == int or type(val) == float), "value %s is not a number" % `val`
if (val > 1.0):
val = min(val,1.0) if val < 1.0+_epsilon else val
if (val < 0.0):
val = max(val,0.0) if val > -_epsilon else val
assert (val >= 0.0 and val <= 1.0), "value %s is outside of range [0.0,1.0]" % `val`
self._value = float(val)
@value.deleter
def value(self):
del self._value
# BUILT-IN METHODS
def __init__(self, h, s, v):
"""**Constructor**: creates a new HSV color (h,s,v).
:param h: the initial hue
**Precondition**: float between 0.0 and 360.0, not including including 360.0.
:param s: the initial saturation
**Precondition**: float between 0.0 and 1.0, inclusive.
:param v: the initial value
**Precondition**: float between 0.0 and 1.0, inclusive.
No arguments are optional."""
self.hue = h
self.saturation = s
self.value = v
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent HSV colors. """
return (type(other) == HSV and self.hue == other.hue and
self.saturation == other.saturation and self.value == other.value)
def __ne__(self, other):
"""**Returns**: True if self and other are equivalent HSV colors. """
return (type(other) != HSV or self.hue != other.hue or
self.saturation != other.saturation or self.value != other.value)
def __str__(self):
"""**Returns**: A readable String representation of this color. """
return "("+str(self.hue)+","+str(self.saturation)+","+str(self.value)+")"
def __repr__(self):
"""**Returns**: An unambiguous String representation of this color. """
return "(hue="+str(self.hue)+",saturation="+str(self.saturation)+",value="+str(self.value)+")"
# PUBLIC METHODS
def glColor(self):
"""**Returns**: a 4 element list of the equivalent rgba color.
This method converts this object to an RGB object and then extracts
a 4 element list with color values between 0 and 1. This is a conversion
of this object into a format that can be used in openGL graphics"""
rgb = colorsys.hsv_to_rgb(self.hue/360.0,self.saturation,self.value)
return [rgb[0], rgb[1], rgb[2], 1.0]
def tkColor(self):
"""**Returns**: a 3 element tuple of the equivalent rgb color.
This method converts this object to an RGB object and then extracts
a 3 element tuple with color values between 0 and 1. This is a conversion
of this object into a format that can be used by the drawing turtle"""
return colorsys.hsv_to_rgb(self.hue/360.0,self.saturation,self.value)
# COLOR CONSTANTS
#: The color white in the default sRGB space.
WHITE = RGB(255, 255, 255)
#: The color light gray in the default sRGB space.
LIGHT_GRAY = RGB(192, 192, 192)
#: The color gray in the default sRGB space.
GRAY = RGB(128, 128, 128)
#: The color dark gray in the default sRGB space.
DARK_GRAY = RGB(64, 64, 64)
#: The color black in the default sRGB space.
BLACK = RGB(0, 0, 0)
#: The color red, in the default sRGB space.
RED = RGB(255, 0, 0)
#: The color pink in the default sRGB space.
PINK = RGB(255, 175, 175)
#: The color orange in the default sRGB space.
ORANGE = RGB(255, 200, 0)
#: The color yellow in the default sRGB space.
YELLOW = RGB(255, 255, 0)
#: The color green in the default sRGB space.
GREEN = RGB(0, 255, 0);
#: The color magenta in the default sRGB space.
MAGENTA = RGB(255, 0, 255)
#: The color cyan in the default sRGB space.
CYAN = RGB(0, 255, 255)
#: The color blue in the default sRGB space.
BLUE = RGB(0, 0, 255)
# TKinter COLOR SUPPORT
def is_color(name):
"""**Returns**: True if name is the name of a supported color
:param name: the color name
**Precondition**: color is a string"""
return name in _TK_COLOR_MAP
# Unfortunately, this had to be done manually.
_TK_COLOR_MAP = {
'alice blue': '#F0F8FF',
'AliceBlue' : '#F0F8FF',
'antique white': '#FAEBD7',
'AntiqueWhite': '#FAEBD7',
'AntiqueWhite1': '#FFEFDB',
'AntiqueWhite2': '#EEDFCC',
'AntiqueWhite3': '#CDC0B0',
'AntiqueWhite4': '#8B8378',
'aquamarine': '#7FFFD4',
'aquamarine1': '#7FFFD4',
'aquamarine2': '#76EEC6',
'aquamarine3': '#66CDAA',
'aquamarine4': '#458B74',
'azure': '#F0FFFF',
'azure1': '#F0FFFF',
'azure2': '#E0EEEE',
'azure3': '#C1CDCD',
'azure4': '#838B8B',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'bisque1': '#FFE4C4',
'bisque2': '#EED5B7',
'bisque3': '#CDB79E',
'bisque4': '#8B7D6B',
'black': '#000000',
'blanched almond': '#FFEBCD',
'BlanchedAlmond': '#FFEBCD',
'blue': '#0000FF',
'blue violet': '#8A2BE2',
'blue1': '#0000FF',
'blue2': '#0000EE',
'blue3': '#0000CD',
'blue4': '#00008B',
'BlueViolet': '#8A2BE2',
'brown': '#A52A2A',
'brown1': '#FF4040',
'brown2': '#EE3B3B',
'brown3': '#CD3333',
'brown4': '#8B2323',
'burlywood': '#DEB887',
'burlywood1': '#FFD39B',
'burlywood2': '#EEC591',
'burlywood3': '#CDAA7D',
'burlywood4': '#8B7355',
'cadet blue': '#5F9EA0',
'CadetBlue': '#5F9EA0',
'CadetBlue1': '#98F5FF',
'CadetBlue2': '#8EE5EE',
'CadetBlue3': '#7AC5CD',
'CadetBlue4': '#53868B',
'chartreuse': '#7FFF00',
'chartreuse1': '#7FFF00',
'chartreuse2': '#76EE00',
'chartreuse3': '#66CD00',
'chartreuse4': '#458B00',
'chocolate': '#D2691E',
'chocolate1': '#FF7F24',
'chocolate2': '#EE7621',
'chocolate3': '#CD661D',
'chocolate4': '#8B4513',
'coral': '#FF7F50',
'coral1': '#FF7256',
'coral2': '#EE6A50',
'coral3': '#CD5B45',
'coral4': '#8B3E2F',
'cornflower blue': '#6495ED',
'CornflowerBlue': '#6495ED',
'cornsilk': '#FFF8DC',
'cornsilk1': '#FFF8DC',
'cornsilk2': '#EEE8CD',
'cornsilk3': '#CDC8B1',
'cornsilk4': '#8B8878',
'cyan': '#00FFFF',
'cyan1': '#00FFFF',
'cyan2': '#00EEEE',
'cyan3': '#00CDCD',
'cyan4': '#008B8B',
'dark blue': '#00008B',
'dark cyan': '#008B8B',
'dark goldenrod': '#B8860B',
'dark gray': '#A9A9A9',
'dark green': '#006400',
'dark grey': '#A9A9A9',
'dark khaki': '#BDB76B',
'dark magenta': '#8B008B',
'dark olive green': '#556B2F',
'dark orange': '#FF8C00',
'dark orchid': '#9932CC',
'dark red': '#8B0000',
'dark salmon': '#E9967A',
'dark sea green': '#8FBC8F',
'dark slate blue': '#483D8B',
'dark slate gray': '#2F4F4F',
'dark slate grey': '#2F4F4F',
'dark turquoise': '#00CED1',
'dark violet': '#9400D3',
'DarkBlue': '#00008B',
'DarkCyan': '#008B8B',
'DarkGoldenrod': '#B8860B',
'DarkGoldenrod1': '#FFB90F',
'DarkGoldenrod2': '#EEAD0E',
'DarkGoldenrod3': '#CD950C',
'DarkGoldenrod4': '#8B6508',
'DarkGray': '#A9A9A9',
'DarkGreen': '#006400',
'DarkGrey': '#A9A9A9',
'DarkKhaki': '#BDB76B',
'DarkMagenta': '#8B008B',
'DarkOliveGreen': '#556B2F',
'DarkOliveGreen1': '#CAFF70',
'DarkOliveGreen2': '#BCEE68',
'DarkOliveGreen3': '#A2CD5A',
'DarkOliveGreen4': '#6E8B3D',
'DarkOrange': '#FF8C00',
'DarkOrange1': '#FF7F00',
'DarkOrange2': '#EE7600',
'DarkOrange3': '#CD6600',
'DarkOrange4': '#8B4500',
'DarkOrchid': '#9932CC',
'DarkOrchid1': '#BF3EFF',
'DarkOrchid2': '#B23AEE',
'DarkOrchid3': '#9A32CD',
'DarkOrchid4': '#68228B',
'DarkRed': '#8B0000',
'DarkSalmon': '#E9967A',
'DarkSeaGreen': '#8FBC8F',
'DarkSeaGreen1': '#C1FFC1',
'DarkSeaGreen2': '#B4EEB4',
'DarkSeaGreen3': '#9BCD9B',
'DarkSeaGreen4': '#698B69',
'DarkSlateBlue': '#483D8B',
'DarkSlateGray': '#2F4F4F',
'DarkSlateGray1': '#97FFFF',
'DarkSlateGray2': '#8DEEEE',
'DarkSlateGray3': '#79CDCD',
'DarkSlateGray4': '#528B8B',
'DarkSlateGrey': '#2F4F4F',
'DarkTurquoise': '#00CED1',
'DarkViolet': '#9400D3',
'deep pink': '#FF1493',
'deep sky blue': '#00BFFF',
'DeepPink': '#FF1493',
'DeepPink1': '#FF1493',
'DeepPink2': '#EE1289',
'DeepPink3': '#CD1076',
'DeepPink4': '#8B0A50',
'DeepSkyBlue': '#00BFFF',
'DeepSkyBlue1': '#00BFFF',
'DeepSkyBlue2': '#00B2EE',
'DeepSkyBlue3': '#009ACD',
'DeepSkyBlue4': '#00688B',
'dim gray': '#696969',
'dim grey': '#696969',
'DimGray': '#696969',
'DimGrey': '#696969',
'dodger blue': '#1E90FF',
'DodgerBlue': '#1E90FF',
'DodgerBlue1': '#1E90FF',
'DodgerBlue2': '#1C86EE',
'DodgerBlue3': '#1874CD',
'DodgerBlue4': '#104E8B',
'firebrick': '#B22222',
'firebrick1': '#FF3030',
'firebrick2': '#EE2C2C',
'firebrick3': '#CD2626',
'firebrick4': '#8B1A1A',
'floral white': '#FFFAF0',
'FloralWhite': '#FFFAF0',
'forest green': '#228B22',
'ForestGreen': '#228B22',
'gainsboro': '#DCDCDC',
'ghost white': '#F8F8FF',
'GhostWhite': '#F8F8FF',
'gold': '#FFD700',
'gold1': '#FFD700',
'gold2': '#EEC900',
'gold3': '#CDAD00',
'gold4': '#8B7500',
'goldenrod': '#DAA520',
'goldenrod1': '#FFC125',
'goldenrod2': '#EEB422',
'goldenrod3': '#CD9B1D',
'goldenrod4': '#8B6914',
'gray': '#BEBEBE',
'gray0': '#000000',
'gray1': '#030303',
'gray2': '#050505',
'gray3': '#080808',
'gray4': '#0A0A0A',
'gray5': '#0D0D0D',
'gray6': '#0F0F0F',
'gray7': '#121212',
'gray8': '#141414',
'gray9': '#171717',
'gray10': '#1A1A1A',
'gray11': '#1C1C1C',
'gray12': '#1F1F1F',
'gray13': '#212121',
'gray14': '#242424',
'gray15': '#262626',
'gray16': '#292929',
'gray17': '#2B2B2B',
'gray18': '#2E2E2E',
'gray19': '#303030',
'gray20': '#333333',
'gray21': '#363636',
'gray22': '#383838',
'gray23': '#3B3B3B',
'gray24': '#3D3D3D',
'gray25': '#404040',
'gray26': '#424242',
'gray27': '#454545',
'gray28': '#474747',
'gray29': '#4A4A4A',
'gray30': '#4D4D4D',
'gray31': '#4F4F4F',
'gray32': '#525252',
'gray33': '#545454',
'gray34': '#575757',
'gray35': '#595959',
'gray36': '#5C5C5C',
'gray37': '#5E5E5E',
'gray38': '#616161',
'gray39': '#636363',
'gray40': '#666666',
'gray41': '#696969',
'gray42': '#6B6B6B',
'gray43': '#6E6E6E',
'gray44': '#707070',
'gray45': '#737373',
'gray46': '#757575',
'gray47': '#787878',
'gray48': '#7A7A7A',
'gray49': '#7D7D7D',
'gray50': '#7F7F7F',
'gray51': '#828282',
'gray52': '#858585',
'gray53': '#878787',
'gray54': '#8A8A8A',
'gray55': '#8C8C8C',
'gray56': '#8F8F8F',
'gray57': '#919191',
'gray58': '#949494',
'gray59': '#969696',
'gray60': '#999999',
'gray61': '#9C9C9C',
'gray62': '#9E9E9E',
'gray63': '#A1A1A1',
'gray64': '#A3A3A3',
'gray65': '#A6A6A6',
'gray66': '#A8A8A8',
'gray67': '#ABABAB',
'gray68': '#ADADAD',
'gray69': '#B0B0B0',
'gray70': '#B3B3B3',
'gray71': '#B5B5B5',
'gray72': '#B8B8B8',
'gray73': '#BABABA',
'gray74': '#BDBDBD',
'gray75': '#BFBFBF',
'gray76': '#C2C2C2',
'gray77': '#C4C4C4',
'gray78': '#C7C7C7',
'gray79': '#C9C9C9',
'gray80': '#CCCCCC',
'gray81': '#CFCFCF',
'gray82': '#D1D1D1',
'gray83': '#D4D4D4',
'gray84': '#D6D6D6',
'gray85': '#D9D9D9',
'gray86': '#DBDBDB',
'gray87': '#DEDEDE',
'gray88': '#E0E0E0',
'gray89': '#E3E3E3',
'gray90': '#E5E5E5',
'gray91': '#E8E8E8',
'gray92': '#EBEBEB',
'gray93': '#EDEDED',
'gray94': '#F0F0F0',
'gray95': '#F2F2F2',
'gray96': '#F5F5F5',
'gray97': '#F7F7F7',
'gray98': '#FAFAFA',
'gray99': '#FCFCFC',
'gray100': '#FFFFFF',
'green': '#00FF00',
'green yellow': '#ADFF2F',
'green1': '#00FF00',
'green2': '#00EE00',
'green3': '#00CD00',
'green4': '#008B00',
'GreenYellow': '#ADFF2F',
'grey': '#BEBEBE',
'grey0': '#000000',
'grey1': '#030303',
'grey2': '#050505',
'grey3': '#080808',
'grey4': '#0A0A0A',
'grey5': '#0D0D0D',
'grey6': '#0F0F0F',
'grey7': '#121212',
'grey8': '#141414',
'grey9': '#171717',
'grey10': '#1A1A1A',
'grey11': '#1C1C1C',
'grey12': '#1F1F1F',
'grey13': '#212121',
'grey14': '#242424',
'grey15': '#262626',
'grey16': '#292929',
'grey17': '#2B2B2B',
'grey18': '#2E2E2E',
'grey19': '#303030',
'grey20': '#333333',
'grey21': '#363636',
'grey22': '#383838',
'grey23': '#3B3B3B',
'grey24': '#3D3D3D',
'grey25': '#404040',
'grey26': '#424242',
'grey27': '#454545',
'grey28': '#474747',
'grey29': '#4A4A4A',
'grey30': '#4D4D4D',
'grey31': '#4F4F4F',
'grey32': '#525252',
'grey33': '#545454',
'grey34': '#575757',
'grey35': '#595959',
'grey36': '#5C5C5C',
'grey37': '#5E5E5E',
'grey38': '#616161',
'grey39': '#636363',
'grey40': '#666666',
'grey41': '#696969',
'grey42': '#6B6B6B',
'grey43': '#6E6E6E',
'grey44': '#707070',
'grey45': '#737373',
'grey46': '#757575',
'grey47': '#787878',
'grey48': '#7A7A7A',
'grey49': '#7D7D7D',
'grey50': '#7F7F7F',
'grey51': '#828282',
'grey52': '#858585',
'grey53': '#878787',
'grey54': '#8A8A8A',
'grey55': '#8C8C8C',
'grey56': '#8F8F8F',
'grey57': '#919191',
'grey58': '#949494',
'grey59': '#969696',
'grey60': '#999999',
'grey61': '#9C9C9C',
'grey62': '#9E9E9E',
'grey63': '#A1A1A1',
'grey64': '#A3A3A3',
'grey65': '#A6A6A6',
'grey66': '#A8A8A8',
'grey67': '#ABABAB',
'grey68': '#ADADAD',
'grey69': '#B0B0B0',
'grey70': '#B3B3B3',
'grey71': '#B5B5B5',
'grey72': '#B8B8B8',
'grey73': '#BABABA',
'grey74': '#BDBDBD',
'grey75': '#BFBFBF',
'grey76': '#C2C2C2',
'grey77': '#C4C4C4',
'grey78': '#C7C7C7',
'grey79': '#C9C9C9',
'grey80': '#CCCCCC',
'grey81': '#CFCFCF',
'grey82': '#D1D1D1',
'grey83': '#D4D4D4',
'grey84': '#D6D6D6',
'grey85': '#D9D9D9',
'grey86': '#DBDBDB',
'grey87': '#DEDEDE',
'grey88': '#E0E0E0',
'grey89': '#E3E3E3',
'grey90': '#E5E5E5',
'grey91': '#E8E8E8',
'grey92': '#EBEBEB',
'grey93': '#EDEDED',
'grey94': '#F0F0F0',
'grey95': '#F2F2F2',
'grey96': '#F5F5F5',
'grey97': '#F7F7F7',
'grey98': '#FAFAFA',
'grey99': '#FCFCFC',
'grey100': '#FFFFFF',
'honeydew': '#F0FFF0',
'honeydew1': '#F0FFF0',
'honeydew2': '#E0EEE0',
'honeydew3': '#C1CDC1',
'honeydew4': '#838B83',
'hot pink': '#FF69B4',
'HotPink': '#FF69B4',
'HotPink1': '#FF6EB4',
'HotPink2': '#EE6AA7',
'HotPink3': '#CD6090',
'HotPink4': '#8B3A62',
'indian red': '#CD5C5C',
'IndianRed': '#CD5C5C',
'IndianRed1': '#FF6A6A',
'IndianRed2': '#EE6363',
'IndianRed3': '#CD5555',
'IndianRed4': '#8B3A3A',
'ivory': '#FFFFF0',
'ivory1': '#FFFFF0',
'ivory2': '#EEEEE0',
'ivory3': '#CDCDC1',
'ivory4': '#8B8B83',
'khaki': '#F0E68C',
'khaki1': '#FFF68F',
'khaki2': '#EEE685',
'khaki3': '#CDC673',
'khaki4': '#8B864E',
'lavender': '#E6E6FA',
'lavender blush': '#FFF0F5',
'LavenderBlush': '#FFF0F5',
'LavenderBlush1': '#FFF0F5',
'LavenderBlush2': '#EEE0E5',
'LavenderBlush3': '#CDC1C5',
'LavenderBlush4': '#8B8386',
'lawn green': '#7CFC00',
'LawnGreen': '#7CFC00',
'lemon chiffon': '#FFFACD',
'LemonChiffon': '#FFFACD',
'LemonChiffon1': '#FFFACD',
'LemonChiffon2': '#EEE9BF',
'LemonChiffon3': '#CDC9A5',
'LemonChiffon4': '#8B8970',
'light blue': '#ADD8E6',
'light coral': '#F08080',
'light cyan': '#E0FFFF',
'light goldenrod': '#EEDD82',
'light goldenrod yellow': '#FAFAD2',
'light gray': '#D3D3D3',
'light green': '#90EE90',
'light grey': '#D3D3D3',
'light pink': '#FFB6C1',
'light salmon': '#FFA07A',
'light sea green': '#20B2AA',
'light sky blue': '#87CEFA',
'light slate blue': '#8470FF',
'light slate gray': '#778899',
'light slate grey': '#778899',
'light steel blue': '#B0C4DE',
'light yellow': '#FFFFE0',
'LightBlue': '#ADD8E6',
'LightBlue1': '#BFEFFF',
'LightBlue2': '#B2DFEE',
'LightBlue3': '#9AC0CD',
'LightBlue4': '#68838B',
'LightCoral': '#F08080',
'LightCyan': '#E0FFFF',
'LightCyan1': '#E0FFFF',
'LightCyan2': '#D1EEEE',
'LightCyan3': '#B4CDCD',
'LightCyan4': '#7A8B8B',
'LightGoldenrod': '#EEDD82',
'LightGoldenrod1': '#FFEC8B',
'LightGoldenrod2': '#EEDC82',
'LightGoldenrod3': '#CDBE70',
'LightGoldenrod4': '#8B814C',
'LightGoldenrodYellow': '#FAFAD2',
'LightGray': '#D3D3D3',
'LightGreen': '#90EE90',
'LightGrey': '#D3D3D3',
'LightPink': '#FFB6C1',
'LightPink1': '#FFAEB9',
'LightPink2': '#EEA2AD',
'LightPink3': '#CD8C95',
'LightPink4': '#8B5F65',
'LightSalmon': '#FFA07A',
'LightSalmon1': '#FFA07A',
'LightSalmon2': '#EE9572',
'LightSalmon3': '#CD8162',
'LightSalmon4': '#8B5742',
'LightSeaGreen': '#20B2AA',
'LightSkyBlue': '#87CEFA',
'LightSkyBlue1': '#B0E2FF',
'LightSkyBlue2': '#A4D3EE',
'LightSkyBlue3': '#8DB6CD',
'LightSkyBlue4': '#607B8B',
'LightSlateBlue': '#8470FF',
'LightSlateGray': '#778899',
'LightSlateGrey': '#778899',
'LightSteelBlue': '#B0C4DE',
'LightSteelBlue1': '#CAE1FF',
'LightSteelBlue2': '#BCD2EE',
'LightSteelBlue3': '#A2B5CD',
'LightSteelBlue4': '#6E7B8B',
'LightYellow': '#FFFFE0',
'LightYellow1': '#FFFFE0',
'LightYellow2': '#EEEED1',
'LightYellow3': '#CDCDB4',
'LightYellow4': '#8B8B7A',
'lime green': '#32CD32',
'LimeGreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'magenta1': '#FF00FF',
'magenta2': '#EE00EE',
'magenta3': '#CD00CD',
'magenta4': '#8B008B',
'maroon': '#B03060',
'maroon1': '#FF34B3',
'maroon2': '#EE30A7',
'maroon3': '#CD2990',
'maroon4': '#8B1C62',
'medium aquamarine': '#66CDAA',
'medium blue': '#0000CD',
'medium orchid': '#BA55D3',
'medium purple': '#9370DB',
'medium sea green': '#3CB371',
'medium slate blue': '#7B68EE',
'medium spring green': '#00FA9A',
'medium turquoise': '#48D1CC',
'medium violet red': '#C71585',
'MediumAquamarine': '#66CDAA',
'MediumBlue': '#0000CD',
'MediumOrchid': '#BA55D3',
'MediumOrchid1': '#E066FF',
'MediumOrchid2': '#D15FEE',
'MediumOrchid3': '#B452CD',
'MediumOrchid4': '#7A378B',
'MediumPurple': '#9370DB',
'MediumPurple1': '#AB82FF',
'MediumPurple2': '#9F79EE',
'MediumPurple3': '#8968CD',
'MediumPurple4': '#5D478B',
'MediumSeaGreen': '#3CB371',
'MediumSlateBlue': '#7B68EE',
'MediumSpringGreen': '#00FA9A',
'MediumTurquoise': '#48D1CC',
'MediumVioletRed': '#C71585',
'midnight blue': '#191970',
'MidnightBlue': '#191970',
'mint cream': '#F5FFFA',
'MintCream': '#F5FFFA',
'misty rose': '#FFE4E1',
'MistyRose': '#FFE4E1',
'MistyRose1': '#FFE4E1',
'MistyRose2': '#EED5D2',
'MistyRose3': '#CDB7B5',
'MistyRose4': '#8B7D7B',
'moccasin': '#FFE4B5',
'navajo white': '#FFDEAD',
'NavajoWhite': '#FFDEAD',
'NavajoWhite1': '#FFDEAD',
'NavajoWhite2': '#EECFA1',
'NavajoWhite3': '#CDB38B',
'NavajoWhite4': '#8B795E',
'navy': '#000080',
'navy blue': '#000080',
'NavyBlue': '#000080',
'old lace': '#FDF5E6',
'OldLace': '#FDF5E6',
'olive drab': '#6B8E23',
'OliveDrab': '#6B8E23',
'OliveDrab1': '#C0FF3E',
'OliveDrab2': '#B3EE3A',
'OliveDrab3': '#9ACD32',
'OliveDrab4': '#698B22',
'orange': '#FFA500',
'orange red': '#FF4500',
'orange1': '#FFA500',
'orange2': '#EE9A00',
'orange3': '#CD8500',
'orange4': '#8B5A00',
'OrangeRed': '#FF4500',
'OrangeRed1': '#FF4500',
'OrangeRed2': '#EE4000',
'OrangeRed3': '#CD3700',
'OrangeRed4': '#8B2500',
'orchid': '#DA70D6',
'orchid1': '#FF83FA',
'orchid2': '#EE7AE9',
'orchid3': '#CD69C9',
'orchid4': '#8B4789',
'pale goldenrod': '#EEE8AA',
'pale green': '#98FB98',
'pale turquoise': '#AFEEEE',
'pale violet red': '#DB7093',
'PaleGoldenrod': '#EEE8AA',
'PaleGreen': '#98FB98',
'PaleGreen1': '#9AFF9A',
'PaleGreen2': '#90EE90',
'PaleGreen3': '#7CCD7C',
'PaleGreen4': '#548B54',
'PaleTurquoise': '#AFEEEE',
'PaleTurquoise1': '#BBFFFF',
'PaleTurquoise2': '#AEEEEE',
'PaleTurquoise3': '#96CDCD',
'PaleTurquoise4': '#668B8B',
'PaleVioletRed': '#DB7093',
'PaleVioletRed1': '#FF82AB',
'PaleVioletRed2': '#EE799F',
'PaleVioletRed3': '#CD687F',
'PaleVioletRed4': '#8B475D',
'papaya whip': '#FFEFD5',
'PapayaWhip': '#FFEFD5',
'peach puff': '#FFDAB9',
'PeachPuff': '#FFDAB9',
'PeachPuff1': '#FFDAB9',
'PeachPuff2': '#EECBAD',
'PeachPuff3': '#CDAF95',
'PeachPuff4': '#8B7765',
'peru': '#CD853F',
'pink': '#FFC0CB',
'pink1': '#FFB5C5',
'pink2': '#EEA9B8',
'pink3': '#CD919E',
'pink4': '#8B636C',
'plum': '#DDA0DD',
'plum1': '#FFBBFF',
'plum2': '#EEAEEE',
'plum3': '#CD96CD',
'plum4': '#8B668B',
'powder blue': '#B0E0E6',
'PowderBlue': '#B0E0E6',
'purple': '#A020F0',
'purple1': '#9B30FF',
'purple2': '#912CEE',
'purple3': '#7D26CD',
'purple4': '#551A8B',
'red': '#FF0000',
'red1': '#FF0000',
'red2': '#EE0000',
'red3': '#CD0000',
'red4': '#8B0000',
'rosy brown': '#BC8F8F',
'RosyBrown': '#BC8F8F',
'RosyBrown1': '#FFC1C1',
'RosyBrown2': '#EEB4B4',
'RosyBrown3': '#CD9B9B',
'RosyBrown4': '#8B6969',
'royal blue': '#4169E1',
'RoyalBlue': '#4169E1',
'RoyalBlue1': '#4876FF',
'RoyalBlue2': '#436EEE',
'RoyalBlue3': '#3A5FCD',
'RoyalBlue4': '#27408B',
'saddle brown': '#8B4513',
'SaddleBrown': '#8B4513',
'salmon': '#FA8072',
'salmon1': '#FF8C69',
'salmon2': '#EE8262',
'salmon3': '#CD7054',
'salmon4': '#8B4C39',
'sandy brown': '#F4A460',
'SandyBrown': '#F4A460',
'sea green': '#2E8B57',
'SeaGreen': '#2E8B57',
'SeaGreen1': '#54FF9F',
'SeaGreen2': '#4EEE94',
'SeaGreen3': '#43CD80',
'SeaGreen4': '#2E8B57',
'seashell': '#FFF5EE',
'seashell1': '#FFF5EE',
'seashell2': '#EEE5DE',
'seashell3': '#CDC5BF',
'seashell4': '#8B8682',
'sienna': '#A0522D',
'sienna1': '#FF8247',
'sienna2': '#EE7942',
'sienna3': '#CD6839',
'sienna4': '#8B4726',
'sky blue': '#87CEEB',
'SkyBlue': '#87CEEB',
'SkyBlue1': '#87CEFF',
'SkyBlue2': '#7EC0EE',
'SkyBlue3': '#6CA6CD',
'SkyBlue4': '#4A708B',
'slate blue': '#6A5ACD',
'slate gray': '#708090',
'slate grey': '#708090',
'SlateBlue': '#6A5ACD',
'SlateBlue1': '#836FFF',
'SlateBlue2': '#7A67EE',
'SlateBlue3': '#6959CD',
'SlateBlue4': '#473C8B',
'SlateGray': '#708090',
'SlateGray1': '#C6E2FF',
'SlateGray2': '#B9D3EE',
'SlateGray3': '#9FB6CD',
'SlateGray4': '#6C7B8B',
'SlateGrey': '#708090',
'snow': '#FFFAFA',
'snow1': '#FFFAFA',
'snow2': '#EEE9E9',
'snow3': '#CDC9C9',
'snow4': '#8B8989',
'spring green': '#00FF7F',
'SpringGreen': '#00FF7F',
'SpringGreen1': '#00FF7F',
'SpringGreen2': '#00EE76',
'SpringGreen3': '#00CD66',
'SpringGreen4': '#008B45',
'steel blue': '#4682B4',
'SteelBlue': '#4682B4',
'SteelBlue1': '#63B8FF',
'SteelBlue2': '#5CACEE',
'SteelBlue3': '#4F94CD',
'SteelBlue4': '#36648B',
'tan': '#D2B48C',
'tan1': '#FFA54F',
'tan2': '#EE9A49',
'tan3': '#CD853F',
'tan4': '#8B5A2B',
'thistle': '#D8BFD8',
'thistle1': '#FFE1FF',
'thistle2': '#EED2EE',
'thistle3': '#CDB5CD',
'thistle4': '#8B7B8B',
'tomato': '#FF6347',
'tomato1': '#FF6347',
'tomato2': '#EE5C42',
'tomato3': '#CD4F39',
'tomato4': '#8B3626',
'turquoise': '#40E0D0',
'turquoise1': '#00F5FF',
'turquoise2': '#00E5EE',
'turquoise3': '#00C5CD',
'turquoise4': '#00868B',
'violet': '#EE82EE',
'violet red': '#D02090',
'VioletRed': '#D02090',
'VioletRed1': '#FF3E96',
'VioletRed2': '#EE3A8C',
'VioletRed3': '#CD3278',
'VioletRed4': '#8B2252',
'wheat': '#F5DEB3',
'wheat1': '#FFE7BA',
'wheat2': '#EED8AE',
'wheat3': '#CDBA96',
'wheat4': '#8B7E66',
'white': '#FFFFFF',
'white smoke': '#F5F5F5',
'WhiteSmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellow green': '#9ACD32',
'yellow1': '#FFFF00',
'yellow2': '#EEEE00',
'yellow3': '#CDCD00',
'yellow4': '#8B8B00',
'YellowGreen': '#9ACD32',
}
```
#### File: JeffreyTsang/Brickbreaker/geom.py
```python
import numpy
import math
import copy
class Tuple2(object):
"""An instance is a tuple in 2D space.
This serves as the base class for both Point2 and Vector2."""
# MUTABLE ATTRIBUTES
@property
def x(self):
"""The x coordinate
**Invariant**: Value must be a float. If assigned an int, it will be typecast
to a float (possibly raising a TypeError)."""
return self._x
@x.setter
def x(self, value):
self._x = float(value)
@x.deleter
def x(self):
del self._x
@property
def y(self):
"""The y coordinate
**Invariant**: Value must be a float. If assigned an int, it will be typecast
to a float (possibly raising a TypeError)."""
return self._y
@y.setter
def y(self, value):
self._y = float(value)
@y.deleter
def y(self):
del self._y
# BUILT-IN METHODS
def __init__(self, x=0, y=0):
"""**Constructor**: creates a new Tuple2 value (x,y).
:param x: initial x value
**Precondition**: value is an int or float.
:param y: initial y value
**Precondition**: value is an int or float.
All values are 0.0 by default.
"""
self.x = x
self.y = y
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent Tuple2s.
This method uses numpy to test whether the coordinates are
"close enough". It does not require exact equality for floats.
:param other: value to compare against
"""
return (type(other) == Tuple2 and numpy.allclose(self.list(),other.list()))
def __ne__(self, other):
"""**Returns**: True if self and other are not equivalent Tuple2s.
:param other: value to compare against
"""
return not self == other
def __str__(self):
"""**Returns**: Readable String representation of this Tuple2. """
return "("+str(self.x)+","+str(self.y)+")"
def __repr__(self):
"""**Returns**: Unambiguous String representation of this Tuple2. """
return "%s%s" % (self.__class__,self.__str__())
def __add__(self, other):
"""**Returns**: the sum of self and other.
The value returned has the same type as self (so it is either a Tuple2
or a subclass of Tuple2). The contents of this object are not altered.
:param other: tuple value to add
**Precondition**: value has the same type as self.
"""
assert (type(other) == type(self)), "value %(value)s is not a of type %(type)s" % {'value': `other`, 'type':`type(self)`}
result = copy.copy(self)
result.x += other.x
result.y += other.y
return result
def __mul__(self, scalar):
"""**Returns**: the scalar multiple of self and other.
The value returned is a new Tuple2. The contents of this Tuple2
are not altered.
:param scalar: scalar to multiply by
**Precondition**: value is an int or float.
"""
assert (type(scalar) in [int,float]), "value %s is not a number" % `scalar`
result = copy.copy(self)
result.x *= scalar
result.y *= scalar
return result
def __rmul__(self, scalar):
"""**Returns**: the scalar multiple of self and other.
The value returned is a new Tuple2. The contents of this Tuple2
are not altered.
:param scalar: scalar to multiply by
**Precondition**: value is an int or float.
"""
assert (type(scalar) in [int,float]), "value %s is not a number" % `scalar`
result = copy.copy(self)
result.x *= scalar
result.y *= scalar
return result
# PUBLIC METHODS
def copy(self):
"""**Returns**: A copy of this Tuple2"""
return Tuple2(self.x, self.y)
def list(self):
"""**Returns**: A python list with the contents of this Tuple2."""
return [self.x,self.y]
def abs(self):
"""Sets each component of this Tuple2 to its absolute value."""
self.x = abs(self.x)
self.y = abs(self.y)
def clamp(self,low,high):
"""Clamps this tuple to the range [low, high].
Any value in this Vector less than low is set to low. Any
value greater than high is set to high."""
self.x = max(low,min(high,self.x))
self.y = max(low,min(high,self.y))
def interpolate(self, other, alpha):
"""**Returns**: the interpolation of self and other via alpha.
The value returned has the same type as self (so it is either
a Tuple2 or is a subclass of Tuple2). The contents of this object
are not altered. The resulting value is
alpha*self+(1-alpha)*other
according to Tuple2 addition and scalar multiplication.
:param other: tuple value to interpolate with
**Precondition**: value has the same type as self.
:param alpha: scalar to interpolate by
**Precondition**: value is an int or float.
"""
assert (type(other) == type(self)), "value %(value)s is not a of type %(type)s" % {'value': `other`, 'type':`type(self)`}
assert (type(alpha) in [int,float]), "value %s is not a number" % `alpha`
return alpha*self+(1-alpha)*other
class Point2(Tuple2):
"""An instance is a point in 2D space.
This class is a subclass of Tuple2 and inherits all of its attributes and methods.
"""
# BUILT_IN METHODS
def __init__(self, x=0, y=0):
"""**Constructor**: creates a new Point value (x,y).
:param x: initial x value
**Precondition**: value is an int or float.
:param y: initial y value
**Precondition**: value is an int or float.
All values are 0.0 by default.
"""
Tuple2.__init__(self,x,y)
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent Points.
This method uses numpy to test whether the coordinates are
"close enough". It does not require exact equality for floats.
:param other: value to compare against
"""
return (type(other) == Point2 and numpy.allclose(self.list(),other.list()))
def __sub__(self, tail):
"""**Returns**: the Vector from tail to self.
The value returned is a Vector2 with this point at its head.
:param tail: the tail value for the new Vector
**Precondition**: value is a Point2 object.
"""
assert (isinstance(tail, Point2)), "value %s is not a Point2" % `tail`
return Vector2(self.x-tail.x,self.y-tail.y)
# PUBLIC METHODS
def copy(self):
"""**Returns**: A copy of this Point2"""
return Point2(self.x, self.y)
def distanceTo(self, other):
"""**Returns**: the Euclidean distance from this point to other
:param other: value to compare against
**Precondition**: value is a Tuple2 object.
"""
assert (isinstance(tail, Tuple2)), "value %s is not a Tuple2" % `tail`
return math.sqrt((self.x-other.x)*(self.x-other.x)+
(self.y-other.y)*(self.y-other.y))
class Vector2(Tuple2):
"""An instance is a Vector in 2D space.
This class is a subclass of Tuple2 and inherits all of its attributes and methods.
"""
# BUILT-IN METHODS
def __init__(self, x=0, y=0):
"""**Constructor**: creates a new Vector object (x,y,z).
:param x: initial x value
**Precondition**: value is an int or float.
:param y: initial y value
**Precondition**: value is an int or float.
:param z: initial z value
**Precondition**: value is an int or float.
All values are 0.0 by default.
"""
Tuple2.__init__(self,x,y)
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent Vectors.
This method uses numpy to test whether the coordinates are
"close enough". It does not require exact equality for floats.
:param other: value to compare against
"""
return (type(other) == Vector2 and numpy.allclose(self.list(),other.list()))
def __str__(self):
"""**Returns**: A readable String representation of this Vector. """
return "<"+str(self.x)+","+str(self.y)+">"
def __sub__(self, other):
"""**Returns**: the difference between this Vector and other.
The value returned is a new Vector. The contents of this vector are not
modified.
:param other: the Vector to subtract
**Precondition**: value is a Vector2 object.
"""
assert (isinstance(other, Vector2)), "value %s is not a Vector2" % `other`
return Vector2(self.x-other.x,self.y-other.y)
# PUBLIC METHODS
def copy(self):
"""**Returns**: A copy of this Vector2"""
return Vector2(self.x, self.y)
def length(self):
"""**Returns**: the length of this Vector."""
return math.sqrt(self.x*self.x+self.y*self.y)
def length2(self):
"""**Returns**: the square of the length of this Vector."""
return self.x*self.x+self.y*self.y
def angle(self,other):
"""**Returns**: the angle between this vector and other.
The answer provided is in radians. Neither this Vector nor
other may be the zero vector.
:param other: value to compare against
**Precondition**: value is a nonzero Vector2 object.
"""
assert (isinstance(other, Vector2)), "value %s is not a Vector2" % `other`
na = self.length()
nb = other.length()
assert (na != 0), "Vector %s is zero" % `self`
assert (nb != 0), "Vector %s is zero" % `other`
return math.acos(self.dot(other)/(na*nb))
def perp(self):
"""**Returns**: a 2D vector perpendicular to this one.
The result of this method is a new Vector2"""
return Vector2(self.y, -self.x)
def dot(self,other):
"""**Returns**: the dot product between self and other.
The result of this method is a float.
:param other: value to dot
**Precondition**: value is a Vector2 object.
"""
assert (isinstance(other, Vector2)), "value %s is not a Vector" % `other`
return (self.x*other.x+self.y*other.y)
def projection(self,other):
"""**Returns**: the projection of this vector on to other.
The result of this method is a new Vector2
:param other: value to project on to
**Precondition**: value is a Vector2 object.
"""
assert (isinstance(other, Vector2)), "value %s is not a Vector2" % `other`
dot = self.dot(other)
base = other.length2()
return (dot/base)*other
def normalize(self):
"""Normalizes this Vector in place.
This method alters the Vector so that it has the same direction,
but its length is now 1.
"""
length = self.length()
self.x /= length
self.y /= length
class Tuple3(object):
"""An instance is a tuple in 3D space.
This serves as the base class for both Point3 and Vector3."""
# MUTABLE ATTRIBUTES
@property
def x(self):
"""The x coordinate
**Invariant**: Value must be a float. If assigned an int, it will be typecast
to a float (possibly raising a TypeError)."""
return self._x
@x.setter
def x(self, value):
self._x = float(value)
@x.deleter
def x(self):
del self._x
@property
def y(self):
"""The y coordinate
**Invariant**: Value must be a float. If assigned an int, it will be typecast
to a float (possibly raising a TypeError)."""
return self._y
@y.setter
def y(self, value):
self._y = float(value)
@y.deleter
def y(self):
del self._y
@property
def z(self):
"""The z coordinate
**Invariant**: Value must be a float. If assigned an int, it will be typecast
to a float (possibly raising a TypeError)."""
return self._z
@z.setter
def z(self, value):
self._z = float(value)
@z.deleter
def z(self):
del self._z
# BUILT-IN METHODS
def __init__(self, x=0, y=0, z=0):
"""**Constructor**: creates a new Tuple3 value (x,y,z).
:param x: initial x value
**Precondition**: value is an int or float.
:param y: initial y value
**Precondition**: value is an int or float.
:param z: initial z value
**Precondition**: value is an int or float.
All values are 0.0 by default.
"""
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent Tuple3s.
This method uses numpy to test whether the coordinates are
"close enough". It does not require exact equality for floats.
:param other: value to compare against
"""
return (type(other) == Tuple3 and numpy.allclose(self.list(),other.list()))
def __ne__(self, other):
"""**Returns**: True if self and other are not equivalent Tuple3s.
:param other: value to compare against
"""
return not self == other
def __str__(self):
"""**Returns**: Readable String representation of this Tuple3. """
return "("+str(self.x)+","+str(self.y)+","+str(self.z)+")"
def __repr__(self):
"""**Returns**: Unambiguous String representation of this Tuple3. """
return "%s%s" % (self.__class__,self.__str__())
def __add__(self, other):
"""**Returns**: the sum of self and other.
The value returned has the same type as self (so it is either
a Tuple3 or is a subclass of Tuple3). The contents of this object
are not altered.
:param other: tuple value to add
**Precondition**: value has the same type as self.
"""
assert (type(other) == type(self)), "value %(value)s is not a of type %(type)s" % {'value': `other`, 'type':`type(self)`}
result = copy.copy(self)
result.x += other.x
result.y += other.y
result.z += other.z
return result
def __mul__(self, scalar):
"""**Returns**: the scalar multiple of self and other.
The value returned is a new Tuple3. The contents of this Tuple3
are not altered.
:param scalar: scalar to multiply by
**Precondition**: value is an int or float.
"""
assert (type(scalar) in [int,float]), "value %s is not a number" % `scalar`
result = copy.copy(self)
result.x *= scalar
result.y *= scalar
result.z *= scalar
return result
def __rmul__(self, scalar):
"""**Returns**: the scalar multiple of self and other.
The value returned is a new Tuple3. The contents of this Tuple3
are not altered.
:param scalar: scalar to multiply by
**Precondition**: value is an int or float.
"""
assert (type(scalar) in [int,float]), "value %s is not a number" % `scalar`
result = copy.copy(self)
result.x *= scalar
result.y *= scalar
result.z *= scalar
return result
# PUBLIC METHODS
def copy(self):
"""**Returns**: A copy of this Tuple3"""
return Tuple3(self.x, self.y, self.z)
def list(self):
"""**Returns**: A python list with the contents of this Tuple3."""
return [self.x,self.y,self.z]
def abs(self):
"""Sets each component of this Tuple3 to its absolute value."""
self.x = abs(self.x)
self.y = abs(self.y)
self.z = abs(self.z)
def clamp(self,low,high):
"""Clamps this tuple to the range [low, high].
Any value in this Vector less than low is set to low. Any
value greater than high is set to high."""
self.x = max(low,min(high,self.x))
self.y = max(low,min(high,self.y))
self.z = max(low,min(high,self.z))
def interpolate(self, other, alpha):
"""**Returns**: the interpolation of self and other via alpha.
The value returned has the same type as self (so it is either
a Tuple3 or is a subclass of Tuple3). The contents of this object
are not altered. The resulting value is
alpha*self+(1-alpha)*other
according to Tuple3 addition and scalar multiplication.
:param other: tuple value to interpolate with
**Precondition**: value has the same type as self.
:param alpha: scalar to interpolate by
**Precondition**: value is an int or float.
"""
assert (type(other) == type(self)), "value %(value)s is not a of type %(type)s" % {'value': `other`, 'type':`type(self)`}
assert (type(alpha) in [int,float]), "value %s is not a number" % `alpha`
return alpha*self+(1-alpha)*other
class Point3(Tuple3):
"""An instance is a point in 3 space.
This class is a subclass of Tuple3 and inherits all of its attributes and methods.
"""
# BUILT_IN METHODS
def __init__(self, x=0, y=0, z=0):
"""**Constructor**: creates a new Point value (x,y,z).
:param x: initial x value
**Precondition**: value is an int or float.
:param y: initial y value
**Precondition**: value is an int or float.
:param z: initial z value
**Precondition**: value is an int or float.
All values are 0.0 by default.
"""
Tuple3.__init__(self,x,y,z)
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent Points.
This method uses numpy to test whether the coordinates are
"close enough". It does not require exact equality for floats.
:param other: value to compare against
"""
return (type(other) == Point3 and numpy.allclose(self.list(),other.list()))
def __sub__(self, tail):
"""**Returns**: the Vector from tail to self.
The value returned is a Vector with this point at its head.
:param tail: the tail value for the new Vector
**Precondition**: value is a Point object.
"""
assert (isinstance(tail, Point3)), "value %s is not a Point" % `tail`
return Vector3(self.x-tail.x,self.y-tail.y,self.z-tail.z)
# PUBLIC METHODS
def copy(self):
"""**Returns**: A copy of this Point3"""
return Point3(self.x, self.y, self.z)
def distanceTo(self, other):
"""**Returns**: the Euclidean distance from this point to other
:param other: value to compare against
**Precondition**: value is a Tuple3 object.
"""
return math.sqrt((self.x-other.x)*(self.x-other.x)+
(self.y-other.y)*(self.y-other.y)+
(self.z-other.z)*(self.z-other.z))
class Vector3(Tuple3):
"""An instance is a Vector in 3 space.
This class is a subclass of Tuple3 and inherits all of its attributes and methods.
"""
# BUILT-IN METHODS
def __init__(self, x=0, y=0, z=0):
"""**Constructor**: creates a new Vector object (x,y,z).
:param x: initial x value
**Precondition**: value is an int or float.
:param y: initial y value
**Precondition**: value is an int or float.
:param z: initial z value
**Precondition**: value is an int or float.
All values are 0.0 by default.
"""
Tuple3.__init__(self,x,y,z)
def __eq__(self, other):
"""**Returns**: True if self and other are equivalent Vectors.
This method uses numpy to test whether the coordinates are
"close enough". It does not require exact equality for floats.
:param other: value to compare against
"""
return (type(other) == Vector3 and numpy.allclose(self.list(),other.list()))
def __str__(self):
"""**Returns**: A readable String representation of this Vector. """
return "<"+str(self.x)+","+str(self.y)+","+str(self.z)+">"
def __sub__(self, other):
"""**Returns**: the difference between this Vector and other.
The value returned is a new Vector. The contents of this vector are not
modified.
:param other: the Vector to subtract
**Precondition**: value is a Vector object.
"""
assert (type(other) == Vector), "value %s is not a Vector" % `other`
return Vector3(self.x-other.x,self.y-other.y,self.z-other.z)
# PUBLIC METHODS
def copy(self):
"""**Returns**: A copy of this Vector3"""
return Vector3(self.x, self.y, self.z)
def length(self):
"""**Returns**: the length of this Vector."""
return math.sqrt(self.x*self.x+self.y*self.y+self.z*self.z)
def length2(self):
"""**Returns**: the square of the length of this Vector."""
return self.x*self.x+self.y*self.y+self.z*self.z
def angle(self,other):
"""**Returns**: the angle between this vector and other.
The answer provided is in radians. Neither this Vector nor
other may be the zero vector.
:param other: value to compare against
**Precondition**: value is a nonzero Vector3 object.
"""
assert (instance(other, Vector3)), "value %s is not a Vector" % `other`
na = self.length()
nb = other.length()
assert (na != 0), "Vector %s is zero" % `self`
assert (nb != 0), "Vector %s is zero" % `other`
return math.acos(self.dot(other)/(na*nb))
def cross(self,other):
"""**Returns**: the cross product between self and other.
The result of this method is a new Vector3
:param other: value to cross
**Precondition**: value is a Vector3 object.
"""
assert (isinstance(other, Vector3)), "value %s is not a Vector" % `other`
return Vector3(self.y*other.z-self.z*other.y,
self.z*other.x-self.z*other.z,
self.x*other.y-self.y*other.x)
def dot(self,other):
"""**Returns**: the dot product between self and other.
The result of this method is a float.
:param other: value to dot
**Precondition**: value is a Vector3 object.
"""
assert (isinstance(other, Vector3)), "value %s is not a Vector" % `other`
return (self.x*other.x+self.y*other.y+self.z*other.z)
def projection(self,other):
"""**Returns**: the projection of this vector on to other.
The result of this method is a new Vector2
:param other: value to project on to
**Precondition**: value is a Vector3 object.
"""
assert (type(other) == Vector3), "value %s is not a Vector3" % `other`
dot = self.dot(other)
base = other.length2()
return (dot/base)*other
def normalize(self):
"""Normalizes this Vector in place.
This method alters the Vector so that it has the same direction,
but its length is now 1.
"""
length = self.length()
self.x /= length
self.y /= length
self.z /= length
```
#### File: JeffreyTsang/Brickbreaker/models.py
```python
import random # To randomly generate the ball velocity
from constants import *
from game2d import *
# PRIMARY RULE: Models are not allowed to access anything except the module constants.py.
# If you need extra information from Play, then it should be a parameter in your method,
# and Play should pass it as a argument when it calls the method.
class Paddle(GRectangle):
"""An instance is the game paddle.
This class contains a method to detect collision with the ball, as well as move it
left and right. You may wish to add more features to this class.
The attributes of this class are those inherited from GRectangle.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
# INITIALIZER TO CREATE A NEW PADDLE
def __init__(self, x, bottom, width, height, color):
"""Creates a paddle of Parent class Grectangle
Initializer: Creates a GRectangle object as the paddle. Paddle is a
subclass of GRectangle with the given arguments.
Parameter x: The x-coordinate of the paddle
Precondition: x is a number (int or float)
Parameter bottom: the vertical coordinate of the bottom edge
of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GRectangle.__init__(self, x=x, bottom=bottom, width=width,
height=height, linecolor=color, fillcolor=color)
# METHODS TO MOVE THE PADDLE AND CHECK FOR COLLISIONS
def move(self,press):
"""Moves the paddle left and right in the bounds of the window
Sets left attribute to 0 and right attribute to width of game window
to create boundaries
Parameter: a number added to the x coordinate of the paddle moves
in one key press
Precondition: press is a number(int or float)"""
self.x+=press
if self.left<0:
self.left=0
if self.right>GAME_WIDTH:
self.right=GAME_WIDTH
def collides(self,ball):
"""Returns: True if the ball collides with this brick
Parameter ball: The ball to check
Precondition: ball is of class Ball"""
if ball._vy<0:
return self.contains(ball.x-BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x-BALL_RADIUS, ball.y+BALL_RADIUS)or\
self.contains(ball.x+BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x+BALL_RADIUS, ball.y+BALL_RADIUS)
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
class Brick(GRectangle):
"""An instance is the game paddle.
This class contains a method to detect collision with the ball. You may wish to
add more features to this class.
The attributes of this class are those inherited from GRectangle.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
# INITIALIZER TO CREATE A BRICK
def __init__(self, left,y, width, height, color):
"""Initializer: creates a GRectangle object as the brick. Brick is a
subclass of GRectangle with the given arguments.
Parameter left: The left edge of the paddle
Precondition: left is a number (int or float)
Parameter y: the vertical coordinate of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GRectangle.__init__(self, left=left, y=y, width=width, height=height, \
linecolor=color, fillcolor=color)
# METHOD TO CHECK FOR COLLISION
def collides(self,ball):
"""Returns: True if the ball collides with this brick
Parameter ball: The ball to check
Precondition: ball is of class Ball"""
return self.contains(ball.x-BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x-BALL_RADIUS, ball.y+BALL_RADIUS)
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
class Ball(GEllipse):
"""Instance is a game ball.
We extend GEllipse because a ball must have additional attributes for velocity.
This class adds this attributes and manages them.
INSTANCE ATTRIBUTES:
_vx [int or float]: Velocity in x direction
_vy [int or float]: Velocity in y direction
The class Play will need to look at these attributes, so you will need
getters for them. However, it is possible to write this assignment with no
setters for the velocities.
How? The only time the ball can change velocities is if it hits an obstacle
(paddle or brick) or if it hits a wall. Why not just write methods for these
instead of using setters? This cuts down on the amount of code in Gameplay.
NOTE: The ball does not have to be a GEllipse. It could be an instance
of GImage (why?). This change is allowed, but you must modify the class
header up above.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
def getVX(self):
"""Returns: velocity in x direction of ball"""
return self._vx
def getVY(self):
"""Returns: velocity in y direction of ball"""
return self._vy
def setVY(self, value):
"""Sets vy to value
Parameter value:value is a number(int or float)"""
assert (type(value)==int or type(value)==float)
self._vy=value
# INITIALIZER TO SET RANDOM VELOCITY
def __init__(self, x, y, width, height, color):
"""Initializer: creates a GRectangle object as the ball. Ball is a
subclass of GRectangle with the given arguments for the instance. The
initializer also sets the default values for attributes _vx and _vy.
Parameter x: The x coordinate of the paddle
Precondition: left is a number (int or float)
Parameter y: the y coordinate of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GEllipse.__init__(self, x=x, y=y, width=width, height=height,\
fillcolor=color)
self._vx = random.uniform(1.0,5.0)
self._vx = self._vx * random.choice([-1, 1])
self.setVY(-2.0)
# METHODS TO MOVE AND/OR BOUNCE THE BALL
def step(self):
"""Modifies the x and y attributes of the Ball instance to allow it
to move at random speeds"""
self.x=self.x+self._vx
self.y=self.y+self._vy
def bounce(self):
"""Modifies the _vy and _vx class attributes to be negative when the
ball object hits any of the four corners of the game window."""
if self.y>=GAME_HEIGHT:
self._vy=-self._vy
if self.x>=GAME_WIDTH:
self._vx=-self._vx
if self.x<=0:
self._vx=-self._vx
if self.y<=0:
self._vy=-self._vy
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
def bottom(self):
"""Returns: True if the y coordinate of the ball passes through the
bottom of the screen; False otherwise
Allows the Ball object to pass through the bottom of the game
window if the player does not catch the ball with the paddle."""
if self.y<=0:
return True
else:
return False
# IF YOU NEED ADDITIONAL MODEL CLASSES, THEY GO HERE
```
|
{
"source": "jeffreyttc/opencv_ros2",
"score": 3
}
|
#### File: opencv_ros2/opencv_ros2/face_detection.py
```python
import rclpy # Python library for ROS 2
from rclpy.node import Node # Handles the creation of nodes
from sensor_msgs.msg import Image # Image is the message type
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
# Load the cascade
# face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
from rclpy.qos import qos_profile_sensor_data
class FaceDetection(Node):
"""
Create an FaceDetection class, which is a subclass of the Node class.
"""
def __init__(self):
"""
Class constructor to set up the node
"""
# Initiate the Node class's constructor and give it a name
super().__init__('face_detection')
# Create the subscriber. This subscriber will receive an Image
# from the video_frames topic. The queue size is 10 messages.
self.subscription = self.create_subscription(
Image,
'/image_raw',
self.listener_callback,
qos_profile_sensor_data)
self.subscription # prevent unused variable warning
# Used to convert between ROS and OpenCV images
self.br = CvBridge()
def listener_callback(self, data):
"""
Callback function.
"""
# Display the message on the console
self.get_logger().info('Receiving image')
# Convert ROS Image message to OpenCV image
current_frame = self.br.imgmsg_to_cv2(data, "bgr8")
# Convert to grayscale
gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# Draw the rectangle around each face
for (x,y,w,h) in faces:
current_frame = cv2.rectangle(current_frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = current_frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# Display image
cv2.imshow("Camera", current_frame)
cv2.waitKey(1)
def main(args=None):
# Initialize the rclpy library
rclpy.init(args=args)
# Create the node
face_detection = FaceDetection()
# Spin the node so the callback function is called.
rclpy.spin(face_detection)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
face_detection.destroy_node()
# Shutdown the ROS client library for Python
rclpy.shutdown()
if __name__ == '__main__':
main()
```
#### File: opencv_ros2/opencv_ros2/webcam_pub.py
```python
import rclpy # Python Client Library for ROS 2
from rclpy.node import Node # Handles the creation of nodes
from sensor_msgs.msg import Image # Image is the message type
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
class ImagePublisher(Node):
"""
Create an ImagePublisher class, which is a subclass of the Node class.
"""
def __init__(self):
"""
Class constructor to set up the node
"""
# Initiate the Node class's constructor and give it a name
super().__init__('image_publisher')
# Create the publisher. This publisher will publish an Image
# to the video_frames topic. The queue size is 10 messages.
self.publisher_ = self.create_publisher(Image, 'video_frames', 10)
# We will publish a message every 0.1 seconds
timer_period = 0.1 # seconds
# Create the timer
self.timer = self.create_timer(timer_period, self.timer_callback)
# Create a VideoCapture object
# The argument '0' gets the default webcam.
self.cap = cv2.VideoCapture(0)
# Used to convert between ROS and OpenCV images
self.br = CvBridge()
def timer_callback(self):
"""
Callback function.
This function gets called every 0.1 seconds.
"""
# Capture frame-by-frame
# This method returns True/False as well
# as the video frame.
ret, frame = self.cap.read()
if ret == True:
# Publish the image.
# The 'cv2_to_imgmsg' method converts an OpenCV
# image to a ROS 2 image message
self.publisher_.publish(self.br.cv2_to_imgmsg(frame))
# Display the message on the console
self.get_logger().info('Publishing video frame')
def main(args=None):
# Initialize the rclpy library
rclpy.init(args=args)
# Create the node
image_publisher = ImagePublisher()
# Spin the node so the callback function is called.
rclpy.spin(image_publisher)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
image_publisher.destroy_node()
# Shutdown the ROS client library for Python
rclpy.shutdown()
if __name__ == '__main__':
main()
```
|
{
"source": "JeffreyUrban/count-sequences",
"score": 3
}
|
#### File: JeffreyUrban/count-sequences/tests.py
```python
import pytest
from random_list import random_list
from iterate_and_count import iterate_and_count
from iterate_and_count_using_sequence_trie import iterate_and_count_using_sequence_trie
def test_output_is_consistent():
unique_count = 10
list_length = 100
input_list = random_list(count=unique_count, length=list_length)
print("Input list: " + str(input_list))
sequences1 = iterate_and_count(input_list=input_list, min_sequence_length=2, max_sequence_length=list_length)
print(sequences1)
sequences2 = iterate_and_count_using_sequence_trie(input_list=input_list, min_sequence_length=2, max_sequence_length=list_length)
print(sequences2)
assert sequences1 == sequences2
```
|
{
"source": "JeffreyUrban/pi-relay-controller",
"score": 3
}
|
#### File: JeffreyUrban/pi-relay-controller/server.py
```python
from __future__ import print_function
import sys
import time
import json
from flask import Flask
from flask import make_response
from flask import render_template
from flask_bootstrap import Bootstrap
from relay_lib import *
from digital_in_lib import *
error_msg = '{msg:"error"}'
success_msg = '{msg:"success"}'
# Initialize these from channels.json
RELAY_PORTS = {}
root_dir = '/home/pi/pi-relay-controller'
with open('{}/channels.json'.format(root_dir)) as json_file:
channel_config = json.load(json_file)
RELAY_PORTS = {ch['channel']: ch['pin'] for ch in channel_config['channels'] if ch['type'] == "relay"}
DIGITAL_IN_PORTS = [ch['pin'] for ch in channel_config['channels'] if ch['type'] == "digital-in"]
RELAY_NAME = 'Generic Relay Controller'
# initialize the relay library with the system's port configuration
relay_control = RelayControl(RELAY_PORTS)
digital_in_control = DigitalInControl(DIGITAL_IN_PORTS)
app = Flask(__name__)
bootstrap = Bootstrap(app)
@app.route('/')
def index():
print("Loading app Main page")
return render_template('index.html', relay_name=RELAY_NAME, channel_info=channel_config['channels'])
@app.route('/state/<int:digital_in>')
def api_get_state(digital_in):
res = digital_in_control.input_get_state(digital_in)
if res:
print("State is HIGH")
return make_response("1", 200)
else:
print("State is LOW")
return make_response("0", 200)
@app.route('/status/<int:relay>')
def api_get_status(relay):
res = relay_control.relay_get_port_status(relay)
if res:
print("Relay is ON")
return make_response("1", 200)
else:
print("Relay is OFF")
return make_response("0", 200)
@app.route('/toggle/<int:relay>')
def api_toggle_relay(relay):
print("Executing api_relay_toggle:", relay)
relay_control.relay_toggle_port(relay)
return make_response(success_msg, 200)
@app.route('/on/<int:relay>')
def api_relay_on(relay):
print("Executing api_relay_on:", relay)
relay_control.relay_on(relay)
return make_response(success_msg, 200)
@app.route('/off/<int:relay>')
def api_relay_off(relay):
print("Executing api_relay_off:", relay)
relay_control.relay_off(relay)
return make_response(success_msg, 200)
@app.route('/all_toggle/')
def api_relay_all_toggle():
print("Executing api_relay_all_toggle")
relay_control.relay_toggle_all_port()
return make_response(success_msg, 200)
@app.route('/all_on/')
def api_relay_all_on():
print("Executing api_relay_all_on")
relay_control.relay_all_on()
return make_response(success_msg, 200)
@app.route('/all_off/')
def api_all_relay_off():
print("Executing api_relay_all_off")
relay_control.relay_all_off()
return make_response(success_msg, 200)
@app.route('/reboot/<int:relay>')
def api_relay_reboot(relay, sleep_time=3):
print("Executing api_relay_reboot:", relay)
relay_control.relay_off(relay)
time.sleep(sleep_time)
relay_control.relay_on(relay)
return make_response(success_msg, 200)
@app.errorhandler(404)
def page_not_found(e):
print("ERROR: 404")
return render_template('404.html', the_error=e), 404
@app.errorhandler(500)
def internal_server_error(e):
print("ERROR: 500")
return render_template('500.html', the_error=e), 500
if __name__ == "__main__":
# On the Pi, you need to run the app using this command to make sure it
# listens for requests outside of the device.
app.run(host='0.0.0.0', port=8080)
```
|
{
"source": "JeffreyUrban/python-docs-samples",
"score": 2
}
|
#### File: v2/label_gce_instance/main.py
```python
import re
from google.api_core.exceptions import GoogleAPIError
from google.cloud import compute_v1
from google.cloud.compute_v1.types import compute
instances_client = compute_v1.InstancesClient()
# CloudEvent function that labels newly-created GCE instances
# with the entity (user or service account) that created them.
#
# @param {object} cloudevent A CloudEvent containing the Cloud Audit Log entry.
# @param {object} cloudevent.data.protoPayload The Cloud Audit Log entry.
def label_gce_instance(cloudevent):
# Extract parameters from the CloudEvent + Cloud Audit Log data
payload = cloudevent.data.get('protoPayload', dict())
auth_info = payload.get('authenticationInfo', dict())
creator = auth_info.get('principalEmail')
# Get relevant VM instance details from the cloudevent's `subject` property
# Example value:
# compute.googleapis.com/projects/<PROJECT_ID>/zones/<ZONE_ID>/instances/<INSTANCE_NAME>
instance_params = cloudevent['subject'].split('/')
# Validate data
if not creator or not instance_params or len(instance_params) != 7:
# This is not something retries will fix, so don't throw an Exception
# (Thrown exceptions trigger retries *if* you enable retries in GCF.)
print('ERROR: Invalid `principalEmail` and/or CloudEvent `subject`.')
return
instance_project = instance_params[2]
instance_zone = instance_params[4]
instance_name = instance_params[6]
# Format the 'creator' parameter to match GCE label validation requirements
creator = re.sub('\\W', '_', creator.lower())
# Get the newly-created VM instance's label fingerprint
# This is required by the Compute Engine API to prevent duplicate labels
instance = instances_client.get(
project=instance_project,
zone=instance_zone,
instance=instance_name
)
# Construct API call to label the VM instance with its creator
request_init = {
'project': instance_project,
'zone': instance_zone,
'instance': instance_name
}
request_init['instances_set_labels_request_resource'] = \
compute.InstancesSetLabelsRequest(
label_fingerprint=instance.label_fingerprint,
labels={'creator': creator}
)
request = compute.SetLabelsInstanceRequest(request_init)
# Perform instance-labeling API call
try:
instances_client.set_labels(request)
print(f'Labelled VM instance {instance_name} with creator: {creator}')
except GoogleAPIError as e:
# Swallowing the exception means failed invocations WON'T be retried
print('Label operation failed', e)
# Uncomment the line below to retry failed invocations.
# (You'll also have to enable retries in Cloud Functions itself.)
# raise e
return
# [END functions_label_gce_instance]
```
|
{
"source": "jeffreyvh/tacticalrmm",
"score": 2
}
|
#### File: tacticalrmm/agents/tasks.py
```python
import os
import subprocess
from loguru import logger
from time import sleep
import random
import requests
from packaging import version as pyver
from django.conf import settings
from tacticalrmm.celery import app
from agents.models import Agent, AgentOutage
logger.configure(**settings.LOG_CONFIG)
@app.task
def send_agent_update_task(pks, version):
assert isinstance(pks, list)
ver = version.split("winagent-v")[1]
q = Agent.objects.only("pk").filter(pk__in=pks)
agents = [
i
for i in q
if pyver.parse(i.version) < pyver.parse(ver) and i.status == "online"
]
if agents:
for agent in agents:
agent.update_pending = True
agent.save(update_fields=["update_pending"])
minions = [i.salt_id for i in agents]
r = Agent.get_github_versions()
git_versions = r["versions"]
data = r["data"] # full response from github
versions = {}
for i, release in enumerate(data):
versions[i] = release["name"]
key = [k for k, v in versions.items() if v == version][0]
download_url = data[key]["assets"][0]["browser_download_url"]
# split into chunks to not overload salt
chunks = (minions[i : i + 30] for i in range(0, len(minions), 30))
for chunk in chunks:
r = Agent.salt_batch_async(
minions=chunk,
func="win_agent.do_agent_update",
kwargs={"version": ver, "url": download_url},
)
sleep(5)
@app.task
def get_wmi_detail_task(pk):
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(timeout=30, func="win_agent.system_info")
if r == "timeout" or r == "error":
return "failed"
agent.wmi_detail = r
agent.save(update_fields=["wmi_detail"])
return "ok"
@app.task
def sync_salt_modules_task(pk):
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(timeout=35, func="saltutil.sync_modules")
# successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]}
# successful sync with no new/changed files: {'return': [{'MINION-15': []}]}
if r == "timeout" or r == "error":
logger.error(f"Unable to sync modules {agent.salt_id}")
return
logger.info(f"Successfully synced salt modules on {agent.hostname}")
return "ok"
@app.task
def uninstall_agent_task(salt_id):
attempts = 0
error = False
while 1:
try:
r = requests.post(
f"http://{settings.SALT_HOST}:8123/run",
json=[
{
"client": "local",
"tgt": salt_id,
"fun": "win_agent.uninstall_agent",
"timeout": 8,
"username": settings.SALT_USERNAME,
"password": <PASSWORD>,
"eauth": "pam",
}
],
timeout=10,
)
ret = r.json()["return"][0][salt_id]
except Exception:
attempts += 1
else:
if ret != "ok":
attempts += 1
else:
attempts = 0
if attempts >= 10:
error = True
break
elif attempts == 0:
break
if error:
logger.error(f"{salt_id} uninstall failed")
else:
logger.info(f"{salt_id} was successfully uninstalled")
try:
r = requests.post(
f"http://{settings.SALT_HOST}:8123/run",
json=[
{
"client": "wheel",
"fun": "key.delete",
"match": salt_id,
"username": settings.SALT_USERNAME,
"password": settings.SALT_PASSWORD,
"eauth": "<PASSWORD>",
}
],
timeout=30,
)
except Exception:
logger.error(f"{salt_id} unable to remove salt-key")
return "ok"
@app.task
def agent_outage_email_task(pk):
sleep(random.randint(1, 15))
outage = AgentOutage.objects.get(pk=pk)
outage.send_outage_email()
outage.outage_email_sent = True
outage.save(update_fields=["outage_email_sent"])
@app.task
def agent_recovery_email_task(pk):
sleep(random.randint(1, 15))
outage = AgentOutage.objects.get(pk=pk)
outage.send_recovery_email()
outage.recovery_email_sent = True
outage.save(update_fields=["recovery_email_sent"])
@app.task
def agent_outages_task():
agents = Agent.objects.only("pk")
for agent in agents:
if agent.status == "overdue":
outages = AgentOutage.objects.filter(agent=agent)
if outages and outages.last().is_active:
continue
outage = AgentOutage(agent=agent)
outage.save()
if agent.overdue_email_alert:
agent_outage_email_task.delay(pk=outage.pk)
if agent.overdue_text_alert:
# TODO
pass
```
#### File: tacticalrmm/checks/tasks.py
```python
import datetime as dt
import random
from time import sleep
from tacticalrmm.celery import app
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone as djangotime
from agents.models import Agent
from clients.models import Client, Site
@app.task
def handle_check_email_alert_task(pk):
from .models import Check
check = Check.objects.get(pk=pk)
# first time sending email
if not check.email_sent:
sleep(random.randint(1, 10))
check.send_email()
check.email_sent = djangotime.now()
check.save(update_fields=["email_sent"])
else:
# send an email only if the last email sent is older than 24 hours
delta = djangotime.now() - dt.timedelta(hours=24)
if check.email_sent < delta:
sleep(random.randint(1, 10))
check.send_email()
check.email_sent = djangotime.now()
check.save(update_fields=["email_sent"])
return "ok"
@app.task
def run_checks_task(pk):
agent = Agent.objects.get(pk=pk)
agent.salt_api_async(func="win_agent.run_manual_checks")
return "ok"
```
#### File: tacticalrmm/logs/tasks.py
```python
from loguru import logger
from tacticalrmm.celery import app
from django.conf import settings
from .models import PendingAction
logger.configure(**settings.LOG_CONFIG)
@app.task
def cancel_pending_action_task(data):
if data["action_type"] == "schedreboot" and data["status"] == "pending":
from agents.models import Agent
agent = Agent.objects.get(pk=data["agent"])
task_name = data["details"]["taskname"]
r = agent.salt_api_cmd(
timeout=30, func="task.delete_task", arg=[f"name={task_name}"]
)
if r == "timeout" or r == "error" or (isinstance(r, bool) and not r):
logger.error(
f"Unable to contact {agent.hostname}. Task {task_name} will need to cancelled manually."
)
return
else:
logger.info(f"Scheduled reboot cancelled on {agent.hostname}")
return "ok"
```
#### File: tacticalrmm/_modules/win_agent.py
```python
from __future__ import absolute_import
import psutil
import os
import datetime
import zlib
import json
import base64
import wmi
import win32evtlog
import win32con
import win32evtlogutil
import winerror
from time import sleep
import requests
import subprocess
import random
PROGRAM_DIR = "C:\\Program Files\\TacticalAgent"
TAC_RMM = os.path.join(PROGRAM_DIR, "tacticalrmm.exe")
NSSM = os.path.join(PROGRAM_DIR, "nssm.exe")
SALT_CALL = os.path.join("C:\\salt", "salt-call.bat")
TEMP_DIR = os.path.join("C:\\Windows", "Temp")
def get_services():
# see https://github.com/wh1te909/tacticalrmm/issues/38
# for why I am manually implementing the svc.as_dict() method of psutil
ret = []
for svc in psutil.win_service_iter():
i = {}
try:
i["display_name"] = svc.display_name()
i["binpath"] = svc.binpath()
i["username"] = svc.username()
i["start_type"] = svc.start_type()
i["status"] = svc.status()
i["pid"] = svc.pid()
i["name"] = svc.name()
i["description"] = svc.description()
except Exception:
continue
else:
ret.append(i)
return ret
def run_python_script(filename, timeout, script_type="userdefined"):
# no longer used in agent version 0.11.0
python_bin = os.path.join("c:\\salt\\bin", "python.exe")
file_path = os.path.join("c:\\windows\\temp", filename)
if os.path.exists(file_path):
try:
os.remove(file_path)
except:
pass
if script_type == "userdefined":
__salt__["cp.get_file"](f"salt://scripts/userdefined/{filename}", file_path)
else:
__salt__["cp.get_file"](f"salt://scripts/{filename}", file_path)
return __salt__["cmd.run_all"](f"{python_bin} {file_path}", timeout=timeout)
def run_script(filepath, filename, shell, timeout, args=[], bg=False):
if shell == "powershell" or shell == "cmd":
if args:
return __salt__["cmd.script"](
source=filepath,
args=" ".join(map(lambda x: f'"{x}"', args)),
shell=shell,
timeout=timeout,
bg=bg,
)
else:
return __salt__["cmd.script"](
source=filepath, shell=shell, timeout=timeout, bg=bg
)
elif shell == "python":
python_bin = os.path.join("c:\\salt\\bin", "python.exe")
file_path = os.path.join("c:\\windows\\temp", filename)
if os.path.exists(file_path):
try:
os.remove(file_path)
except:
pass
__salt__["cp.get_file"](filepath, file_path)
salt_cmd = "cmd.run_bg" if bg else "cmd.run_all"
if args:
a = " ".join(map(lambda x: f'"{x}"', args))
cmd = f"{python_bin} {file_path} {a}"
return __salt__[salt_cmd](cmd, timeout=timeout)
else:
return __salt__[salt_cmd](f"{python_bin} {file_path}", timeout=timeout)
def uninstall_agent():
remove_exe = os.path.join(PROGRAM_DIR, "unins000.exe")
__salt__["cmd.run_bg"]([remove_exe, "/VERYSILENT", "/SUPPRESSMSGBOXES"])
return "ok"
def update_salt():
from subprocess import Popen, PIPE
CREATE_NEW_PROCESS_GROUP = 0x00000200
DETACHED_PROCESS = 0x00000008
cmd = [TAC_RMM, "-m", "updatesalt"]
p = Popen(
cmd,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True,
creationflags=DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP,
)
return p.pid
def run_manual_checks():
__salt__["cmd.run_bg"]([TAC_RMM, "-m", "runchecks"])
return "ok"
def install_updates():
for p in psutil.process_iter():
with p.oneshot():
if p.name() == "tacticalrmm.exe" and "winupdater" in p.cmdline():
return "running"
return __salt__["cmd.run_bg"]([TAC_RMM, "-m", "winupdater"])
def agent_update(version, url):
# make sure another instance of the update is not running
# this function spawns 2 instances of itself so if more than 2 running,
# don't continue as an update is already running
count = 0
for p in psutil.process_iter():
try:
with p.oneshot():
if "win_agent.agent_update" in p.cmdline():
count += 1
except Exception:
continue
if count > 2:
return "already running"
sleep(random.randint(1, 60)) # don't flood the rmm
try:
r = requests.get(url, stream=True, timeout=600)
except Exception:
return "failed"
if r.status_code != 200:
return "failed"
exe = os.path.join(TEMP_DIR, f"winagent-v{version}.exe")
with open(exe, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
del r
services = ("tacticalagent", "checkrunner")
for svc in services:
subprocess.run([NSSM, "stop", svc], timeout=120)
sleep(10)
r = subprocess.run([exe, "/VERYSILENT", "/SUPPRESSMSGBOXES"], timeout=300)
sleep(30)
for svc in services:
subprocess.run([NSSM, "start", svc], timeout=120)
return "ok"
def do_agent_update(version, url):
return __salt__["cmd.run_bg"](
[
"C:\\salt\\salt-call.bat",
"win_agent.agent_update",
f"version={version}",
f"url={url}",
"--local",
]
)
class SystemDetail:
def __init__(self):
self.c = wmi.WMI()
self.comp_sys_prod = self.c.Win32_ComputerSystemProduct()
self.comp_sys = self.c.Win32_ComputerSystem()
self.memory = self.c.Win32_PhysicalMemory()
self.os = self.c.Win32_OperatingSystem()
self.base_board = self.c.Win32_BaseBoard()
self.bios = self.c.Win32_BIOS()
self.disk = self.c.Win32_DiskDrive()
self.network_adapter = self.c.Win32_NetworkAdapter()
self.network_config = self.c.Win32_NetworkAdapterConfiguration()
self.desktop_monitor = self.c.Win32_DesktopMonitor()
self.cpu = self.c.Win32_Processor()
self.usb = self.c.Win32_USBController()
def get_all(self, obj):
ret = []
for i in obj:
tmp = [
{j: getattr(i, j)}
for j in list(i.properties)
if getattr(i, j) is not None
]
ret.append(tmp)
return ret
def system_info():
info = SystemDetail()
return {
"comp_sys_prod": info.get_all(info.comp_sys_prod),
"comp_sys": info.get_all(info.comp_sys),
"mem": info.get_all(info.memory),
"os": info.get_all(info.os),
"base_board": info.get_all(info.base_board),
"bios": info.get_all(info.bios),
"disk": info.get_all(info.disk),
"network_adapter": info.get_all(info.network_adapter),
"network_config": info.get_all(info.network_config),
"desktop_monitor": info.get_all(info.desktop_monitor),
"cpu": info.get_all(info.cpu),
"usb": info.get_all(info.usb),
}
def get_procs():
ret = []
# setup
for proc in psutil.process_iter():
with proc.oneshot():
proc.cpu_percent(interval=None)
# need time for psutil to record cpu percent
sleep(1)
for c, proc in enumerate(psutil.process_iter(), 1):
x = {}
with proc.oneshot():
if proc.pid == 0 or not proc.name():
continue
x["name"] = proc.name()
x["cpu_percent"] = proc.cpu_percent(interval=None) / psutil.cpu_count()
x["memory_percent"] = proc.memory_percent()
x["pid"] = proc.pid
x["ppid"] = proc.ppid()
x["status"] = proc.status()
x["username"] = proc.username()
x["id"] = c
ret.append(x)
return ret
def _compress_json(j):
return {
"wineventlog": base64.b64encode(
zlib.compress(json.dumps(j).encode("utf-8", errors="ignore"))
).decode("ascii", errors="ignore")
}
def get_eventlog(logtype, last_n_days):
start_time = datetime.datetime.now() - datetime.timedelta(days=last_n_days)
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ
status_dict = {
win32con.EVENTLOG_AUDIT_FAILURE: "AUDIT_FAILURE",
win32con.EVENTLOG_AUDIT_SUCCESS: "AUDIT_SUCCESS",
win32con.EVENTLOG_INFORMATION_TYPE: "INFO",
win32con.EVENTLOG_WARNING_TYPE: "WARNING",
win32con.EVENTLOG_ERROR_TYPE: "ERROR",
0: "INFO",
}
computer = "localhost"
hand = win32evtlog.OpenEventLog(computer, logtype)
total = win32evtlog.GetNumberOfEventLogRecords(hand)
log = []
uid = 0
done = False
try:
while 1:
events = win32evtlog.ReadEventLog(hand, flags, 0)
for ev_obj in events:
uid += 1
# return once total number of events reach or we'll be stuck in an infinite loop
if uid >= total:
done = True
break
the_time = ev_obj.TimeGenerated.Format()
time_obj = datetime.datetime.strptime(the_time, "%c")
if time_obj < start_time:
done = True
break
computer = str(ev_obj.ComputerName)
src = str(ev_obj.SourceName)
evt_type = str(status_dict[ev_obj.EventType])
evt_id = str(winerror.HRESULT_CODE(ev_obj.EventID))
evt_category = str(ev_obj.EventCategory)
record = str(ev_obj.RecordNumber)
msg = (
str(win32evtlogutil.SafeFormatMessage(ev_obj, logtype))
.replace("<", "")
.replace(">", "")
)
event_dict = {
"computer": computer,
"source": src,
"eventType": evt_type,
"eventID": evt_id,
"eventCategory": evt_category,
"message": msg,
"time": the_time,
"record": record,
"uid": uid,
}
log.append(event_dict)
if done:
break
except Exception:
pass
win32evtlog.CloseEventLog(hand)
return _compress_json(log)
```
|
{
"source": "JeffreyW2468/CSP-stuffu",
"score": 3
}
|
#### File: CSP-stuffu/CSP AP Portfolio Project/Reaction_Game.py
```python
import turtle as trtl
import random as rand
import time
from time import sleep #utilizing time module for logging time to click and registering delays before object respawn
#initialization
trtl.Screen()
width = 950
height = 800
trtl.setup(width, height)
trtl.bgpic("sky.gif") #sets background image
trtl.title("Reaction Game")
#shape registrations (registers various images for moving object)
trtl.addshape("coin.gif")
trtl.addshape("parrot.gif")
trtl.addshape("dragon.gif")
trtl.addshape("explosion.gif")
flying_object = trtl.Turtle()
flying_object.penup()
#object list configuration data
object_list = ["coin.gif", "parrot.gif", "dragon.gif", "explosion.gif"] #holds the images for shape change references later
teleportation_delay = rand.randint(1, 3)
#game timer (initializes a timer display that increments downwards by 1000 milliseconds, or exactly 1 second)
timer = 20
time_up = False
count_interval = 1000
count_display = trtl.Turtle()
count_display.speed(2)
count_display.color("black")
count_display.hideturtle()
count_display.penup() #timer moves to the top of the screen and configures font display
count_display.goto(-320, 350)
font_config = ("Arial", 14, "normal")
#reaction timer
time_difference = [] #stores the actual timespan between each object spawn and user response
start_time = time.time() #approximate time of when the game first begins and before the first click
flying_object.shape(str(object_list[-1])) #sets the default object appearance to explosion.gif
#functions
def response_time_report(time_difference, preference):
average_time = round(sum(time_difference) / len(time_difference), 2) #takes the average of all response times to visual stimuli
if preference == "average":
count_display.write('Game over! Your average response time was: ' + str(average_time) + ' seconds!', font=font_config)
elif preference == "holistic":
count_display.write('Game over! Your individual response times were: ' + str([i for i in time_difference]) + ' seconds!', font=font_config) #displays each time split individually
def game_timer():
global timer, time_up
count_display.clear()
if timer <= 0:
flying_object.hideturtle()
preference = input("Would you like an average reaction time or holistic report? ")
response_time_report(time_difference, preference) #passes previous input entry as an argument
preference = input("You can select another report: average or holistic? ")
count_display.clear()
response_time_report(time_difference, preference) #calls function a second time for alternative display
time_up = True
else:
count_display.write("Time: " + str(timer), font = font_config)
timer -= 1
count_display.getscreen().ontimer(game_timer, count_interval) #refreshes the timer display each second and updates by counting an additional second downwards to zero
def location_reset():
x_pos = rand.randint(-475, 475)
y_pos = rand.randint(-400, 400) #pulls random coordinates for object to travel to
flying_object.hideturtle()
flying_object.goto(x_pos, y_pos) #object travels to the random coordinate
sleep(teleportation_delay) #suspends the program for a random amount of time between 1 and 3 seconds for the sake of unpredictability
flying_object.showturtle()
def has_clicked(xcor, ycor):
global time_up, start_time
if time_up != True:
time_entry = time.time() #records a timestamp of when the user clicks
difference = round(time_entry - start_time, 2) #calculates the difference between the timestamp of the user clicking and the last timestamp where the object changed location
time_difference.append(difference) #stores the user response time
flying_object.shape(str(object_list[-1]))
sleep(0.1)
object_iteration()
location_reset()
start_time = time.time() #timestamps the next time that the object appears
def object_iteration():
obj_index = rand.randint(0, 2) #random index of the object list; exclusive of the explosion animation
flying_object.shape(str(object_list[obj_index])) #changes the object to display the random item of the object list
def run_game(): #acts as abstraction, storing the all functions necessary for running the game into one function that can be called at the end
game_timer()
flying_object.onclick(has_clicked)
trtl.mainloop() #allows screen to persist after execution
#events
run_game() #calls all functions, initializing entire program
'''
Citations
coin.gif courtesy of emaze.com --> https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.emaze.com%2F%40AOTCFWIOF&psig=AOvVaw0NxqXmWRU3XUgc_4s4NOj9&ust=1621570617983000&source=images&cd=vfe&ved=0CAMQjB1qFwoTCLDKs-K01_ACFQAAAAAdAAAAABAb
dragon.gif courtesy of tenor.com --> https://tenor.com/view/drogon-gif-21425979
explosion.gif courtesy of pinterest.com --> https://www.pinterest.com/pin/167759154858790760/
parrot.gif courtesy of Clipart Library --> http://clipart-library.com/clipart/5cRrGBAzi.htm
sky.gif courtesy of cloudygif.com --> https://www.google.com/url?sa=i&url=https%3A%2F%2Fcloudygif.com%2F84fc7fedbe4f6e1d.aspx&psig=AOvVaw0c6c0sDtWHsP4skXq4mdHl&ust=1621573747605000&source=images&cd=vfe&ved=0CAMQjB1qFwoTCLDWzo2_1_ACFQAAAAAdAAAAABAJ
'''
```
|
{
"source": "jeffreywang1988/mosquitto-1.6.7-send_connect_status",
"score": 2
}
|
#### File: test/broker/03-publish-qos1-max-inflight.py
```python
from mosq_test_helper import *
def write_config(filename, port):
with open(filename, 'w') as f:
f.write("port %d\n" % (port))
f.write("max_inflight_messages 1\n")
port = mosq_test.get_port()
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port)
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("pub-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 311
publish_packet = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message")
puback_packet = mosq_test.gen_puback(mid)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port, timeout=10)
mosq_test.do_send_receive(sock, publish_packet, puback_packet, "puback")
rc = 0
sock.close()
finally:
os.remove(conf_file)
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
```
|
{
"source": "JeffreyWang2864/bias-comments-mining",
"score": 2
}
|
#### File: bias-comments-mining/analyse/count.py
```python
import MySQLdb as mysql
import os
import jieba
from wordcloud import WordCloud, ImageColorGenerator
from concurrent.futures import ThreadPoolExecutor as tpe
from matplotlib import pyplot as plt
from util import PROJECT_ABS_PATH
from scipy.misc import imread
import time
custom_dictionary = ["韩国人", "中国人", "第三世界", "死宅", "生是中国人","厉害了我的哥", "死妈", "三哥", "开挂", "手抓饭", "阿三", "印度狗", "妈逼", "不干净","不卫生",
"啊三", "印度阿三", "恒河水", "好人一生平安", "印度人", "狗逼", "找骂", "死是中国魂", "韩国狗", "狗韩国",
"天团", "朝鲜狗", "韩国猪", "猪韩国", "吃狗", "南朝鲜", "大寒冥国", "棒粉" , "小日本", "日本狗", "日本鬼子", "本子", "鬼子", "黑鬼", "黑哥哥",
"种族天赋", "带感", "美黑", "白屁股", "黑屁股", "头脑简单", "四肢发达", "黑人天赋", "哈韩", "哈日", "广州黑人", "民族主义", "种族主义"]
filters = set([
"是不是", "表白", "我", "都", "这个", "这样", "那个", "这么", "还是", "还", "过", "跟", "谁", "说", "觉得", "要", "被", "自己",
"能", "给", "笑", "知道", "着", "真的", "看", "的", "现在", "问题", "为什么", "一个", "没", "比", "来", "有", "是", "把", "打",
"才", "很", "小", "对", "好", "喜欢", "她", "太", "大", "多", "在", "啊", "哈", "和", "呢", "听", "吧", "吗", "吃", "又", "去",
"到", "像", "做", "你", "会", "他", "人", "了", "也", "么", "个", "不", "上", "没有", "所以", "我们", "感觉", "感觉",
"怎么", "弹幕", "就是", "好看", "好吃", "回复", "你们", "但是", "他们", "什么", "不是", "一样", "可以", "时候" , "不要" , "因为" ,
"还有" , "前面" , "不会" , "那么" , "楼主" , "看到" , "这是" , "应该" , "好像" , "这种" , "视频" , "出来" , "一下" , "东西" ,
"不能" , "厉害" , "已经" , "其实" , "人家" , "很多" , "可能" , "一直" , "好听" , "有点" , "哈哈" , "声音" , "如果" , "这里" , "大家" ,
"只是" , "表示" , "只有" , "以为" , "不错" , "别人" , "承包" , "这些" , "开始" , "多少" , "两个" , "真是" , "看看" , "一点",
"就" ,"这" ,"想" ,"那" ,"最" ,"用" ,"为" ,"叫" ,"让" ,"呀" ,"真" ,"得" ,"里" ,"啦" ,"啥" ,"一" ,"哦" ,"但" ,"走" ,"更" ,"话" ,
"买" ,"别" ,"再" ,"挺" ,"年" ,"并" ,"完" ,"只" ,"嘛" ,"请" ,"下" ,"哇" ,"歌" ,"等" ,"拿" ,"超" ,"玩" ,"们" ,"点" ,"钱" ,"前" ,
"脸" ,"快" ,"懂" ,"高" ,"老" ,"当" ,"黑" ,"问", "超级" ,"比较" ,"看过" ,"不过" ,"地方" ,"第一" ,"的话" ,"看着" ,"辛苦" ,"特别" ,
"确实" ,"不行" ,"需要" ,"然后" ,"哪里" ,"老师" ,"一定" ,"最后" ,"以前" ,"这句" ,"突然" ,"而且" ,"直接" ,"首歌" ,"居然" ,"卧槽" ,
"东东" ,"虽然" ,"好多" ,"有人" ,"说话" ,"一次" ,"高能" ,"好好" ,"肯定" ,"为了" ,"衣服" ,"希望" ,"那些" ,"我家" ,"翻译" ,"发现" ,
"一口" ,"里面" ,"孩子" ,"几个" ,"本来" ,"字幕" , "国家", "喜欢","以后" ,"前方" ,"而已" ,"认识" ,"可是" ,"不了" ,"只能" ,"之前" ,"完全" ,"每次" ,
"意思" ,"名字" ,"有些" ,"一些" ,"后面" ,"其他" ,"今天" ,"终于" ,"不用" ,"回来" ,"疯狂", "嘴" ,"国" ,"日" ,"见" ,"连" ,"咋" ,"字" ,
"月" ,"靠" ,"美" ,"先" ,"开" ,"阿" ,"干" ,"手" ,"帮" ,"长" ,"号" ,"之" ,"学" ,"卖" ,"跑" ,"甜" ,"时" ,"泫" ,"饭" ,"它" ,"家" ,"写" ,
"讲" ,"主" ,"路" ,"发" ,"诶" ,"白" ,"行" ,"丶" ,"越" ,"少" ,"李" ,"嗯" ,"哎" ,"该" ,"抱" ,"算" ,"新" ,"地" ,"而" ,"搞" ,"后" ,"从" ,"与" ,
"事" ,"站" ,"带" ,"出" ,"找" ,"放", "至少" ,"哪个" ,"评论" ,"眼睛" ,"变成" ,"注意" ,"所有" ,"干嘛" ,"一天" ,"不同" ,"大爷" ,"呵呵" ,"情况" ,"小米" ,
"有没有" ,"不够" ,"操作" ,"到底" ,"原因" ,"标题" ,"真正" ,"全是" ,"重要" ,"还好", "差不多", "生日快乐", "谢谢", "一般", "起来", "不好",
"加油", "选择", "支持", "当然", "毕竟", "或者", "我要", "成功", "技术", "原来", "帖子", "最好", "过来", "只要", "记得", "电视", "不到",
"正常", "等等", "告诉", "非常", "之后", "准备", "基本", "封面", "上海", "不想", "要是", "小哥", "每天", "系列", "大概", "十五", "容易",
"唱", "由", "加", "已", "以", "无", "贴"
])
class CountWords:
def __init__(self, database, table, country):
self.frequency = dict()
self.file_names = list()
self.current_country = country
self.thread_pool_size = 8
self.is_frequency_sorted = False
self.var_names = ["word", "frequency"]
with open("/Users/Excited/localmysqlrootssh.txt", "r")as f:
local_info = f.readlines() #host, username, passwd, port
local_info = list(map(str.strip, local_info))
try:
self.connection = mysql.connect(
host=local_info[0],
user=local_info[1],
passwd=local_info[2],
db=database,
port=int(local_info[3]),
charset="utf8"
)
except mysql.Error as e:
print("Error: %s" % e)
self.cursor = self.connection.cursor()
self.table = table
def filter_frequency_with(self, target_filter):
for item in target_filter:
if self.frequency.get(item, -1) != -1:
self.frequency.pop(item)
def add_dictionary_from(self, target_dict):
for item in target_dict:
jieba.add_word(item, 3)
def get_all_data_file_name(self):
abs_path = "/Users/Excited/PycharmProjects/bias-comments-mining/data/%s/"%self.current_country
for parent_file_name in os.walk(abs_path):
for child_file_name in parent_file_name[-1]:
if child_file_name[-4:] == ".txt":
self.file_names.append(parent_file_name[0] + child_file_name)
print("found %d files in total"%len(self.file_names))
def read_from_file_and_count(self):
def _read_from_file_and_count(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
if len(lines) < 10:
return
for line in lines:
if not isinstance(line, str) or len(line) < 4 or len(line) > 500:
continue
vline = self.validate(line)
splited_words = [item for item in jieba.cut(vline)]
for splited_word in splited_words:
self.frequency[splited_word] = self.frequency.get(splited_word, 0) + 1
self.file_names.remove(file_name)
print("finish counting %s" % file_name)
executor = tpe(self.thread_pool_size)
executor.map(_read_from_file_and_count, self.file_names)
executor.shutdown(wait=True)
def validate(self, line):
length = len(line)
mark_list = list()
frontIndex = 0
endIndex = 1
while True:
if endIndex >= length and endIndex - frontIndex < 3:
break
if endIndex - frontIndex < 3:
endIndex += 1
continue
if line[frontIndex] == line[frontIndex + 1] == line[frontIndex + 2]:
currentCharacter = line[frontIndex]
frontIndex += 1
while frontIndex < length and line[frontIndex] == currentCharacter:
mark_list.append(frontIndex)
frontIndex += 1
endIndex = frontIndex + 1
else:
frontIndex += 1
if len(mark_list) == 0:
return line.strip()
unmarked = [i for i in range(length) if i not in mark_list]
return "".join([line[i] for i in unmarked]).strip()
def make_wordcloud(self, image_path):
back_coloring_path = PROJECT_ABS_PATH + image_path
font_path = PROJECT_ABS_PATH + "/bin/msyh.ttf"
saving_image_modify_by_shape = PROJECT_ABS_PATH + "/image/" + str(int(time.time())) + "_by_shape.png"
saving_image_modify_by_all = PROJECT_ABS_PATH + "/image/" + str(int(time.time())) + "_by_all.png"
back_coloring = imread(back_coloring_path)
wc = WordCloud(
font_path=font_path,
background_color="white",
max_words=300,
mask=back_coloring,
max_font_size=250,
random_state=42,
width=1080,
height=2048,
margin=2
)
wc.generate_from_frequencies(self.frequency)
image_colors = ImageColorGenerator(back_coloring)
plt.imshow(wc.recolor(color_func=image_colors))
plt.axis = "off"
plt.figure()
plt.imshow(back_coloring, cmap=plt.get_cmap('gray'))
plt.axis = "off"
plt.show()
#wc.to_file(saving_image_modify_by_all)
def _sort_frequency(self):
self.frequency = sorted(self.frequency.items(), key=lambda x: x[1], reverse=True)
self.is_frequency_sorted = True
def save_frequency_to_sql(self):
if not self.is_frequency_sorted:
self._sort_frequency()
for pair in self.frequency:
self.addRow(pair)
def closeConnection(self):
if self.connection:
self.connection.close()
def __del__(self):
self.closeConnection()
def getFormat(self):
self.cursor.execute("desc %s"%self.table)
return self.cursor.fetchall()
def execute(self, command):
assert isinstance(command, str)
self.cursor.execute(command)
def india_treatment(self):
modify_word = {"阿三": 10000, "种姓": 5000, "厕所":3000, "强奸": 4391, "素质": 3223, "印度":-10000, "中国":-10000}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
def korea_treatment(self):
modify_word = {"明星": 5000, "韩剧": 4000, "哥哥": 2000, "韩国": -40000, "中国": -20000}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
if self.frequency.get("黑人", -1) != -1:
self.frequency.pop("黑人")
def japan_treatment(self):
modify_word = {"日本": -20141, "日本人": 14982, "日语":5000, "鬼子": 5426, "本子": 3864, "动漫": 6000, "留学": 3000, "小姐姐": 3000, "中国":-10000, "宅": 3236}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
def black_treatment(self):
for key, value in self.frequency.items():
self.frequency[key] += value * 1.3
def getOne(self, with_label = False):
try:
res = self.cursor.fetchone()
if not with_label:
return res
res_dict = dict(zip([item[0] for item in self.cursor.description], res))
return res_dict
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
def getAll(self, with_label = False):
try:
res = self.cursor.fetchall()
if not with_label:
return res
res_list = list()
for row in res:
res_list.append(dict(zip([item[0] for item in self.cursor.description], row)))
return res_list
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
def addRow(self, data):
try:
command = "insert into " + self.table + "(" + ", ".join(["`" + str(item) + "`" for item in self.var_names]) + ")"
command += "VALUE(" + ", ".join(['"' + str(item) + '"' for item in data]) +");"
self.execute(command)
self.connection.commit()
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
```
#### File: JeffreyWang2864/bias-comments-mining/util.py
```python
import re
import time
import os
PROJECT_ABS_PATH = os.path.dirname(os.path.abspath(__file__))
def polishChineseSentences(targets):
polished = list()
for target in targets:
res = re.findall("[\u4E00-\u9FA5]+", str(target))
plain_chinese = "".join(res)
polished.append(plain_chinese)
polished = list(filter(lambda x: len(set(x)) > 1, polished))
return polished
def rest(second):
assert isinstance(second, int)
assert 0 < second < 100
time.sleep(second)
def print_progress(name, percentage):
percentage *= 100
num_of_sharp = int(percentage/2)
num_of_equal = 50-num_of_sharp
strap = "[" + "#" * num_of_sharp + "=" * num_of_equal + "]"
print("\n\nprogress of %s: %s %.2f%%\n\n"%(name, strap, percentage))
```
|
{
"source": "JeffreyWardman/chesscom",
"score": 2
}
|
#### File: chesscom/tests/test_puzzles.py
```python
from chesscom.api.puzzles import Puzzles
class TestPuzzles:
@staticmethod
def test_daily():
Puzzles.daily()
@staticmethod
def test_random():
Puzzles.random()
```
#### File: chesscom/tests/test_tournaments.py
```python
from chesscom.api.tournaments import Tournament
class TestTournament:
@staticmethod
def test_get(tournament_id):
Tournament.get(tournament_id)
@staticmethod
def test_get_round(tournament_id, tournament_round):
Tournament.get_round(tournament_id, tournament_round)
@staticmethod
def test_get_round_group(tournament_id, tournament_round, tournament_group):
Tournament.get_round_group(tournament_id, tournament_round, tournament_group)
```
|
{
"source": "JeffreyWardman/FluentNet",
"score": 2
}
|
#### File: FluentNet/tests/conftest.py
```python
from pytest import fixture
import torch
@fixture
def input():
return torch.rand((1, 3, 256, 256))
```
|
{
"source": "JeffreyWardman/MultiPy",
"score": 4
}
|
#### File: JeffreyWardman/MultiPy/multipy.py
```python
import os
import pyautogui
def multipy(pyfile, input_file, virtual_env, PAUSE=0.15):
"""Runs a keygrabber script to set up the data collection for each input in a text file.
Inputs to arguments are written in a text file on a line-by-line basis and separated by
a space.
:param pyfile: string python file
:param virtual_env: string virtual environment
"""
arguments = open(input_file, 'r', newline='\n').read().splitlines()
# open console
os.system('gnome-terminal')
pyautogui.PAUSE = PAUSE # pause for PAUSE seconds between inputs
for args in arguments:
pyautogui.press('enter')
# activate virtual environment
pyautogui.typewrite('source activate ' + virtual_env)
pyautogui.press('enter')
# run python file
pyautogui.typewrite('python ' + pyfile + ' ' + args)
pyautogui.press('enter')
# open new console tab
pyautogui.keyDown('ctrl')
pyautogui.keyDown('shift')
pyautogui.keyDown('T')
pyautogui.keyUp('ctrl')
pyautogui.keyUp('shift')
pyautogui.keyUp('T')
```
|
{
"source": "jeffreywolberg/udacity_behavioral_cloning",
"score": 2
}
|
#### File: jeffreywolberg/udacity_behavioral_cloning/model.py
```python
import os
import csv
import cv2
import numpy as np
import sklearn
from enum import Enum
from keras.optimizers import Adam
from keras.models import load_model, save_model
from keras.models import Sequential
from keras.layers import Lambda, Cropping2D, Conv2D, Dense, Flatten, MaxPool2D, Dropout
from sklearn.model_selection import train_test_split
shuffle = sklearn.utils.shuffle
ceil = np.ceil
join = os.path.join
class ImagePos(Enum):
center=0
left=1
right=2
class BehavioralCloning(object):
def __init__(self):
self.batch_size = 32
self.crop_up, self.crop_down = 50, 20
self.orig_dims = (160, 320, 3)
self.model_name = 'my_model_5.h5'
def get_train_val_data(self):
samples = []
file_names = [
# r'.\driving_logs\driving_log_train.csv',
r'.\driving_logs\driving_log_train2.csv']
#file_name = '/home/data/driving_log.csv'
# file_name = './driving_log_train.csv'
for i, file_name in enumerate(file_names):
with open(file_name) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append([line, i])
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
return train_samples, validation_samples
def generator(self, samples, batch_size=32):
def img_name(dir_name, img_details, imagePos: ImagePos):
if dir_name.startswith("/opt/carnd_p3/data/"):
return join(dir_name, img_details[imagePos.value].split("/")[-1])
else:
return join(dir_name, img_details[imagePos.value].split("\\")[-1])
num_samples = len(samples)
correction = .15
# dir_name = r'/home/data/IMG/'
dir_names = [
# r'.\IMG_folders\IMG_train',
r'.\IMG_folders\IMG_train2']
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
dir_index = batch_sample[1]
dir_name = dir_names[dir_index]
img_details = batch_sample[0]
center_name = img_name(dir_name, img_details, ImagePos.center)
center_image = cv2.imread(center_name)
if center_image is None:
print("Image doesn't exist")
continue
center_angle = float(img_details[3])
center_flipped_image = np.fliplr(center_image)
center_flipped_angle = -center_angle
left_name = img_name(dir_name, img_details, ImagePos.left)
left_image = cv2.imread(left_name)
left_angle = float(img_details[3]) + correction
right_name = img_name(dir_name, img_details, ImagePos.right)
right_image = cv2.imread(right_name)
right_angle = float(img_details[3]) - correction
images.extend([center_image, center_flipped_image, left_image, right_image])
angles.extend([center_angle, center_flipped_angle, left_angle, right_angle])
X_train = np.array(images)
y_train = np.array(angles)
# yield shuffle(X_train, y_train)
yield X_train, y_train
def create_model(self):
model = Sequential()
model.add(Cropping2D(cropping=((self.crop_up, self.crop_down), (0, 0)), input_shape=self.orig_dims))
# 90, 360, 3
dims_1 = (self.orig_dims[0] - self.crop_up - self.crop_down, self.orig_dims[1], self.orig_dims[2])
print(dims_1)
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
filters, kernel_size, stride = 24, 5, (1, 1)
model.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=stride,
padding='valid', activation='relu', input_shape=dims_1))
model.add(MaxPool2D((2,2)))
filters, kernel_size, stride = 36, 5, (1, 1)
model.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=stride,
padding='valid', activation='relu'))
model.add(MaxPool2D((2,2)))
filters, kernel_size, stride = 48, 3, (1, 1)
model.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=stride,
padding='valid', activation='relu'))
model.add(MaxPool2D((1,2)))
filters, kernel_size, stride = 64, 3, (1, 1)
model.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=stride,
padding='valid', activation='relu'))
model.add(MaxPool2D((2,2)))
filters, kernel_size, stride = 64, 3, (1, 1)
model.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=stride,
padding='valid', activation='relu'))
model.add(Dropout(.3))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
return model
def load_my_model(self):
# return load_model(f".{os.sep}{self.model_name}")
num = int(self.model_name.split('_')[-1].split('.h5')[0]) - 1
return load_model(join(os.getcwd(), "my_model_{}.h5".format(num)))
def train_model(self):
train_samples, validation_samples = self.get_train_val_data()
# compile and train the model using the generator function
train_generator = self.generator(train_samples, batch_size=self.batch_size)
validation_generator = self.generator(validation_samples, batch_size=self.batch_size)
# model = self.load_my_model()
model = self.create_model()
optimizer = Adam(lr=.0005)
# print(model.summary())
model.compile(loss='mse', optimizer=optimizer)
model.fit_generator(train_generator, steps_per_epoch=ceil(len(train_samples)/self.batch_size),
validation_data=validation_generator,
validation_steps=ceil(len(validation_samples)/self.batch_size),
epochs=2, verbose=1)
# model.save(fr'.{os.sep}{self.model_name}')
save_model(model, join(os.getcwd(), self.model_name))
if __name__ == '__main__':
inst = BehavioralCloning()
inst.train_model()
```
|
{
"source": "jeffreywolf/landscape",
"score": 3
}
|
#### File: landscape/patch/patches.py
```python
import numpy as np
import os
from landscape.raster import raster
class Patches(object):
"""Patches class is a sequence of Patch objects
"""
def __init__(self, image_filename, labels_filename, size):
"""Initialize a Patches object
The image at image_filename and the labels at labels_filename
must have the same projection, geotransform, and extent.
Args:
image_filename: filename representing path to image
labels_filename: filename representing path to labels
size: `int` size of window side
Returns:
None
Raises:
AssertionError
"""
self._image = None
self._labels = None
self._image_metadata = None
self._labels_metadata = None
self._size = None
self._offset = None
self._labeled_indices = None # initialized in __iter__()
self._count = None # initialized in __iter__()
self._max_iter = None # initialized in __iter__()
# assert valid files
assert os.path.exists(image_filename), ("image file not found")
assert os.path.exists(labels_filename), ("labels file not found")
# assert equal metadata
image_metadata = raster.Metadata(image_filename)
labels_metadata = raster.Metadata(labels_filename)
assert image_metadata == labels_metadata, (
"Metadata are not equivalent. " +
"Try `gdalinfo` on the files. " +
"Look at the docstring for `raster.Metadata.__eq__()`.")
assert labels_metadata.ndv is not None, (
"labels metadata ndv is None")
self._image_metadata = image_metadata
self._labels_metadata = labels_metadata
# asserts on image and labels np.ndarrays
image = raster.load_image(image_filename)
labels = raster.load_image(labels_filename)
assert isinstance(image, np.ndarray), (
"image must be a numpy.ndarray")
assert len(image.shape) == 3, (
"image must be an numpy.ndarray with shape (H,W,D)")
assert isinstance(labels, np.ndarray), (
"labels must be a numpy.ndarray")
assert len(labels.shape) == 3, (
"lables must be an numpy.ndarray with shape (H,W,D)")
# test if shape of both is equal on H,W axes
assert image.shape[0] == labels.shape[0], (
"Image and label height is different")
assert image.shape[1] == labels.shape[1], (
"Image and label height is different")
self._image = image
self._labels = labels
# assert on size
assert isinstance(size, int), ("size must be an integer")
assert size % 2 == 1, ("size must be an odd integer")
assert size > 1, ("size must be an integer >1")
self._size = size
self._offset = self.size // 2
@property
def image(self):
"""The image `np.ndarray` with shape (H,W,D)
"""
return self._image
@property
def labels(self):
"""The labels `np.ndarray` with shape (H,W,D)
"""
return self._labels
@property
def image_metadata(self):
"""The image `Metadata` object
"""
return self._image_metadata
@property
def labels_metadata(self):
"""The labels `Metadata` object
"""
return self._labels_metadata
@property
def size(self):
"""The `int` size of the side length.
Must be an odd `int`
"""
return self._size
@property
def offset(self):
"""The `int` offset derived from self._size//2
An even integer
"""
return self._offset
@property
def labeled_indices(self):
"""An indices iterator to access labeled pixels
"""
return self._labeled_indices
def _calculate_origin(self, origin, resolution, offset, index):
"""Calculate new origin
Args:
origin: `float`
resolution: `float` that can be positive or negative
offset: `int` pixel offset
index: `int` index
Returns:
new origin `float`
Raises:
AssertionError
"""
assert isinstance(index, int)
assert isinstance(offset, int)
resolution_string = str(resolution)
parts = resolution_string.split(".")
if len(parts) == 2:
precision = len(parts[1])
else:
precision = 0
# calculate difference
difference = (index - offset) * resolution
origin += difference
return round(origin, precision)
def _build_geotransform(self, i, j):
"""Build geotransform for an image patch
Args:
i: `int` row index
j: `int` column index
Returns:
GDAL geotransform for `Metadata` object
Raises:
AssertionError
"""
assert isinstance(i, int), ("i is not an integer")
assert isinstance(j, int), ("j is not an integer")
x_origin, x_res, x_ignore, y_origin, y_ignore, y_res = (
self.image_metadata.geotransform)
# integer conversion to reduce floating point error
new_x_origin = self._calculate_origin(x_origin, x_res, self.offset, j)
new_y_origin = self._calculate_origin(y_origin, y_res, self.offset, i)
geotransform = (new_x_origin, x_res, x_ignore, new_y_origin,
y_ignore, y_res)
return geotransform
def _patch_metadata(self, i, j):
"""Build metadata for an image patch
Uses self.image_metadata as the metadata source. Modifies
the geotransform, x, and y size. Keeps the same projection,
datatype, and ndv.
Args:
i: `int` row index into image and labels `np.ndarray`
j: `int` col index into image and labels `np.ndarray`
Returns:
`raster.Metadata` object
Raises:
"""
assert isinstance(i, int), ("i is not an integer")
assert i >= 0, ("i must be >= 0")
assert isinstance(j, int), ("j is not an integer")
assert j >= 0, ("j must be >= 0")
# modify the geotransform
geotransform = self._build_geotransform(i, j)
# modify the x and y size
x, y = self.size, self.size
# projection
projection = self.image_metadata.projection
# datatype
datatype = self.image_metadata.datatype
# ndv
ndv = self.image_metadata.ndv
metadata = raster.Metadata()
metadata.set(x, y, projection, geotransform, datatype, ndv)
return metadata
def _patch_image(self, i, j):
"""Build an image patch
Args:
i: `int` row index into image and labels `nd.ndarray`
j: `int` row index into image and labels `nd.ndarray`
size:
Returns:
`np.ndarray`
Raises:
AssertionError
"""
assert isinstance(i, int), ("i is not an integer")
assert i >= 0, ("i must be >= 0")
assert isinstance(j, int), ("j is not an integer")
assert j >= 0, ("j must be >= 0")
imin, imax = i - self.offset, i + self.offset + 1
jmin, jmax = j - self.offset, j + self.offset + 1
image = self.image[imin:imax, jmin:jmax, :]
return image
def _patch_label(self, i, j):
"""Get patch label
Args:
i: index i
j: index j
Returns:
label
"""
assert isinstance(i, int), ("i is not an integer")
assert i >= 0, ("i must be >= 0")
assert isinstance(j, int), ("j is not an integer")
assert j >= 0, ("j must be >= 0")
band = 0 # currently supports 1 band labels
label = self.labels[i, j, band]
return label
def __iter__(self):
"""Initialize an iterator
"""
# height and width
shape = self.labels.shape[:2] # equivalently use self.image.shape[:2]
# rows (H,W) `np.ndarray` and columns (H,W) `np.ndarray`
rows, columns = np.indices(shape)
ndv = self.labels_metadata.ndv
# an (H,W,D) `np.ndarray`, labels must be 1 band
band = 0
valid = self.labels[:,:,band] != ndv
# valid rows
valid_rows = rows[valid]
valid_columns = columns[valid]
# randomize - should use seed
# equivalently could use valid_columns.shape
n_valid_rows = valid_rows.shape[0]
indices = np.arange(n_valid_rows)
np.random.shuffle(indices)
self._labeled_indices = np.vstack(
(valid_rows[indices], valid_columns[indices])).T
self._labeled_indices.astype(int)
self._count = 0
self._max_iter = n_valid_rows
return self
def __next__(self):
"""Next patch from the iterator
Args:
None
Returns:
`Patch` object
Raises:
StopIteration
"""
if self._count == self._max_iter:
raise StopIteration
i_npint64, j_npint64 = self._labeled_indices[self._count,:]
# alternative to explicit casting is to
# broaden the integer types accepted by the assert clauses
i, j = int(i_npint64), int(j_npint64)
image = self._patch_image(i, j)
label = self._patch_label(i, j)
metadata = self._patch_metadata(i, j)
patch = Patch(image, label, metadata, self.size)
self._count += 1
return patch
def __len__(self):
"""The number of `Patch` objects
Args:
None
Returns:
`int` number of `Patch` objects in `Patches` object
"""
#initialize self._max_iter
if self._max_iter is None:
# self._max_iter is initialized in __iter__()
iter(self)
return self._max_iter
class Patch(object):
"""Patch
"""
def __init__(self, image, label, metadata, size):
"""Initialize a `Patch` object
Args:
image: a `np.ndarray` of shape (H,W,D)
label: an `int` or `float` type
metadata: a `raster.Metadata` object
size: `int` number of pixels along one axis
Returns:
None
Raises:
AssertionError
"""
self._image = None
self._label = None
self._metadata = None
self._size = None
assert isinstance(image, np.ndarray), ("image must be a numpy.ndarray")
assert len(image.shape) == 3, (
"image must be an numpy.ndarray with shape (H,W,D)")
self._image = image
# label assertion
# need to figure out how to better support np dtypes
#assert isinstance(label, float) or isinstance(label, int), (
# "Patch class currently supports only float or int labels")
self._label = label
# metadata assertion
assert isinstance(metadata, raster.Metadata)
self._metadata = metadata
# size
height, width = self._image.shape[:2]
assert size == width, ("Size and width of image are not equal")
assert size == height, ("Size and height of image are not equal")
self._size = size
@property
def image(self):
"""The image
"""
return self._image
@property
def label(self):
"""The label
"""
return self._label
@property
def metadata(self):
"""The metadata
"""
return self._metadata
@property
def size(self):
"""The size
"""
return self._size
def save_image(self, filename):
"""Save a patch as a raster file
Args:
filename: a valid path for a new file
Returns:
None
"""
raster.save_image(filename, self.image, self.metadata)
def __str__(self):
"""String containing image and label
"""
return str(self.image) + "\n" + str(self.label)
```
#### File: landscape/raster/raster.py
```python
import os
import numpy as np
import gdal
import osr
class Metadata(object):
"""Geographical metadata for `load_image()` and `save_image()`
"""
# Band types supported for reading and writing of GeoTiff images
# www.gdal.org/frmt_gtiff.html
GDAL_DATATYPES = [gdal.GDT_Byte,
gdal.GDT_UInt16,
gdal.GDT_Int16,
gdal.GDT_UInt32,
gdal.GDT_Int32,
gdal.GDT_Float32,
gdal.GDT_Float64,
gdal.GDT_CInt16,
gdal.GDT_CInt32,
gdal.GDT_CFloat32,
gdal.GDT_CFloat64]
def __init__(self, filename=None):
"""Construct geographical metadata for an image
Args:
filename: None (default) or a file name path
Returns:
A `Metadata` object
Raises:
AssertionError
"""
self._x = None
self._y = None
self._projection = None
self._geotransform = None
self._datatype = None
self_ndv = None
# initialize with metadata from # file
if filename is not None:
# assertions
assert os.path.exists(filename), (
"Invalid path to # file.")
dataset = gdal.Open(filename, gdal.GA_ReadOnly)
assert dataset # assert that dataset is not None
self._projection = dataset.GetProjectionRef() # wkt
# use osr.SpatialReference class to validate projection
# http://www.gdal.org/osr_tutorial.html
spatial_reference = osr.SpatialReference()
spatial_reference.ImportFromWkt(self._projection)
isprojected = spatial_reference.IsProjected()
assert isprojected, ("WKT projection not parsed by OGR")
self._geotransform = dataset.GetGeoTransform()
assert len(self._geotransform) == 6, (
"geotransform must have 6 elements")
assert False not in [
isinstance(x, float) for x in self._geotransform], (
"geotransform elements must be float type")
band = dataset.GetRasterBand(1)
assert band # assert that band is not None
# what is this if there is no ndv?
self._ndv = band.GetNoDataValue()
# assert ndv
self._x = band.XSize
assert isinstance(self._x, int)
self._y = band.YSize
assert isinstance(self._y, int)
self._datatype = band.DataType
assert self._datatype in Metadata.GDAL_DATATYPES
@property
def x(self):
"""The size of the image along the x-axis (width).
"""
return self._x
@property
def y(self):
"""The size of the image along the y-axis (height)
"""
return self._y
@property
def projection(self):
"""The well-known text projection
"""
return self._projection
@property
def geotransform(self):
"""The geotransform list
"""
return self._geotransform
@property
def datatype(self):
"""The GDAL DataType
"""
return self._datatype
@property
def ndv(self):
"""The no data value
"""
return self._ndv
def set(self, x, y, projection, geotransform,
datatype=gdal.GDT_Float32, ndv=None):
"""Set the metadata for new file
Args:
x: `int` x size
y: `int` y size
projection: a valid WKT projection
geotransform: a list of six floating point numbers representing an
affine GeoTransform as described in the GDAL data model
(http://www.gdal.org/gdal_datamodel.html)
datatype: gdal.GDT_Float32 (default) or any GeoTiff supported
data type (www.gdal.org/frmt_gtiff.html).
ndv: None (default) or any number representable in datatype
Returns:
None
Raises:
AssertonError if invalid types are used to initialize the Metadata
"""
# assert's about x
assert isinstance(x, int)
self._x = x
# assert's about y
assert isinstance(y, int)
self._y = y
# use osr.SpatialReference class to validate projection
# http://www.gdal.org/osr_tutorial.html
spatial_reference = osr.SpatialReference()
spatial_reference.ImportFromWkt(projection)
isprojected = spatial_reference.IsProjected()
assert isprojected, ("WKT projection not parsed by OGR")
self._projection = projection
assert len(geotransform) == 6, ("geotransform must have 6 elements")
assert False not in [isinstance(x, float) for x in geotransform],(
"geotransform elements must be float type")
self._geotransform = geotransform
assert datatype in Metadata.GDAL_DATATYPES, (
"datatype is not recognized as a valid GDAL datatype for GeoTiff.")
self._datatype = datatype
# assert's about ndv
self._ndv = ndv
def create(self):
"""
Args:
None
Returns:
x: x-dimension size
y: y-dimension size
datatype: GDAL DataType
Raises:
AssertionError
"""
assert isinstance(self.x, int), ("Metadata is uninitialized")
assert isinstance(self.y, int), ("Metadata is uninitialized")
assert self.datatype in Metadata.GDAL_DATATYPES, (
"Invalid GDAL DataType")
return self.x, self.y, self.datatype
def __eq__(self, other):
"""Test two Metadata objects for spatial equality.
Test if two `Metadata` objects have the geotransform and
projection, x, and y properties. This test is used to
evaluate whether two images can use the same horizontal
indexing operations and maintain spatial consistency.
Args:
other: another `Metadata` object
Returns:
`bool` object
Raises:
AssertionError
"""
assert isinstance(other, Metadata), (
"other is not a valid Metadata object")
return (repr(self.geotransform) == repr(other.geotransform) and
self.projection == other.projection and self.x == other.x
and self.y == other.y)
def load_image(filename=None):
"""Load an image
Loads an image as an `numpy.ndarray` with dim's (H,W,D). H is the
height (y size), W is the width (x size), and D is the depth (the
number of channels in the image).
Args:
filename: `string` path to a gdal compatable image file
Returns:
numpy representation of an image with array shape (H,W,D)
Raises:
AssertionError
"""
# assertions
assert filename is not None, ("Filename cannot be None")
assert os.path.exists(filename), (
"Invalid path to # file.")
dataset = gdal.Open(filename, gdal.GA_ReadOnly)
assert dataset, ("GDAL could not open {}".format(filename))
# read image
bands = [dataset.GetRasterBand(i+1).ReadAsArray() for i in range(
dataset.RasterCount)]
dataset = None
return np.stack(bands, 2)
def load_mask(filename):
"""Load a mask
Loads a mask as a `np.ndarray` with dtype `np.float32`. Invalid
values are stored as `np.nan`
Args:
filename: `string` path to a gdal compatable image file
Returns:
numpy representation of an image mask with array shape (H,W,D)
Raises:
AssertionError from `load_image` or `Metadata` initialization
"""
mask_in = load_image(filename)
mask = mask_in.astype(np.float32)
metadata = Metadata(filename)
mask[mask == metadata.ndv] = np.NAN
return mask
def save_image(filename, image, metadata):
"""Save an image
Saves an image as a GeoTiff.
Args:
image: a numpy `ndarray` with array shape (H,W,D)
metadata: object of class `Metadata`
filename: `string` a valid system path
Returns:
None
Raises:
AssertionError
"""
# assertions
path = os.path.dirname(filename)
assert path == "" or os.path.exists(path), ("Invalid directory name")
assert isinstance(image, np.ndarray), ("image must be a numpy.ndarray")
assert len(image.shape) == 3, ("image must be an numpy.ndarray with shape (H,W,D)")
rows = image.shape[0]
cols = image.shape[1]
n_bands = image.shape[2]
assert isinstance(metadata, Metadata)
geotransform = metadata.geotransform
assert len(geotransform) == 6, ("Geotransform must be 6 elements")
# use osr.SpatialReference class to validate projection
# http://www.gdal.org/osr_tutorial.html
projection = metadata.projection
spatial_reference = osr.SpatialReference()
spatial_reference.ImportFromWkt(projection)
isprojected = spatial_reference.IsProjected()
assert isprojected, ("WKT projection not parsed by OGR")
x, y, datatype = metadata.create()
assert y == rows
assert x == cols
# could check that datatype is appropriate for dtype of image
assert datatype in Metadata.GDAL_DATATYPES, (
"datatype is not recognized as a valid GDAL datatype for GeoTiff.")
ndv = metadata.ndv
# save image
format = "GTiff"
driver = gdal.GetDriverByName(format)
dataset = driver.Create(filename, x, y, n_bands, datatype)
dataset.SetGeoTransform(geotransform)
dataset.SetProjection(projection)
depth_axis_len = image.shape[2]
for depth in range(depth_axis_len):
band = depth + 1
dataset.GetRasterBand(band).WriteArray(image[:,:, depth])
if band == 1 and ndv is not None:
dataset.GetRasterBand(1).SetNoDataValue(ndv)
dataset = None
```
#### File: landscape/test/test_landscape.py
```python
from landscape.topography import topography
import unittest
import os
import shutil
import numpy as np
class TestGaussianTopographyInitialize(unittest.TestCase):
"""
"""
def setUp(self):
"""Setup method
"""
self.valid_input_filename = "test/data/test_dem.tif"
self.invalid_input_filename = "test/data/no_such_file.tif"
def tearDown(self):
"""Tear down method
"""
del self.valid_input_filename
del self.invalid_input_filename
def test_invalid_dem_filename(self):
"""Test instantiation failure with invalid filename
"""
with self.assertRaises(AssertionError):
gt = topography.GaussianTopography(self.invalid_input_filename)
def test_valid_dem_filename(self):
"""Test instantiation success with valid filename
"""
gt = topography.GaussianTopography(self.valid_input_filename)
self.assertTrue(isinstance(gt, topography.GaussianTopography))
# Remaining test cases subclass TestGaussianTopographyMethod
class TestGaussianTopographyMethod(unittest.TestCase):
"""Superclass
"""
def setUp(self):
"""Setup method
"""
self.valid_input_filename = "test/data/test_dem.tif"
self.valid_output_filename = "test/data/temp/test.tif"
self.invalid_output_filename = (
"test/data/no_such_directory/test.tif")
self.gt = topography.GaussianTopography(self.valid_input_filename)
self.bw = 5
try:
if os.path.isfile(self.valid_output_filename):
os.remove(self.valid_output_filename)
except Exception as e:
print("Unable to remove the temporary file used",
" to test raster.save_image().")
def tearDown(self):
"""Tear down method
"""
try:
if os.path.isfile(self.valid_output_filename):
os.remove(self.valid_output_filename)
except Exception as e:
print("Unable to remove the temporary file used",
" to test raster.save_image().")
del self.valid_input_filename
del self.valid_output_filename
del self.invalid_output_filename
del self.gt
del self.bw
class TestGaussianTopographyFilter(TestGaussianTopographyMethod):
"""Subclass of `TestGaussianTopographyMethod`
"""
def test_valid_filename(self):
"""Test write success with valid output filename
"""
self.gt.filter(self.bw, self.valid_output_filename)
self.assertTrue(os.path.exists(self.valid_output_filename))
def test_no_filename(self):
"""Test valid return of array without an output filename
"""
array = self.gt.filter(self.bw)
self.assertTrue(isinstance(array, np.ndarray))
def test_invalid_filename(self):
"""Test failure of output with invalid filename
"""
with self.assertRaises(AssertionError):
self.gt.filter(self.bw, self.invalid_output_filename)
def test_image_returned(self):
"""Test an ndarray returned
"""
array_no_filename = self.gt.filter(self.bw)
self.assertTrue(isinstance(array_no_filename, np.ndarray))
array_filename = self.gt.filter(self.bw, self.valid_output_filename)
self.assertTrue(isinstance(array_filename, np.ndarray))
class TestGaussianTopographyGradientMagnitude(TestGaussianTopographyMethod):
"""Subclass of `TestGaussianTopographyMethod`
"""
def test_valid_filename(self):
"""Test write success with valid output filename
"""
self.gt.gradient_magnitude(self.bw, self.valid_output_filename)
self.assertTrue(os.path.exists(self.valid_output_filename))
def test_no_filename(self):
"""Test valid return of array without an output filename
"""
array = self.gt.gradient_magnitude(self.bw)
self.assertTrue(isinstance(array, np.ndarray))
def test_invalid_filename(self):
"""Test failure of output with invalid filename
"""
with self.assertRaises(AssertionError):
self.gt.gradient_magnitude(self.bw, self.invalid_output_filename)
def test_image_returned(self):
"""Test an ndarray returned
"""
array_no_filename = self.gt.gradient_magnitude(self.bw)
self.assertTrue(isinstance(array_no_filename, np.ndarray))
array_filename = self.gt.gradient_magnitude(
self.bw, self.valid_output_filename)
self.assertTrue(isinstance(array_filename, np.ndarray))
class TestGaussianTopographyGradientDx(TestGaussianTopographyMethod):
"""Subclass of `TestGaussianTopographyMethod`
"""
def test_valid_filename(self):
"""Test write success with valid output filename
"""
self.gt.gradient_dx(self.bw, self.valid_output_filename)
self.assertTrue(os.path.exists(self.valid_output_filename))
def test_no_filename(self):
"""Test valid return of array without an output filename
"""
array = self.gt.gradient_dx(self.bw)
self.assertTrue(isinstance(array, np.ndarray))
def test_invalid_filename(self):
"""Test failure of output with invalid filename
"""
with self.assertRaises(AssertionError):
self.gt.gradient_dx(self.bw, self.invalid_output_filename)
def test_image_returned(self):
"""Test an ndarray returned
"""
array_no_filename = self.gt.gradient_dx(self.bw)
self.assertTrue(isinstance(array_no_filename, np.ndarray))
array_filename = self.gt.gradient_dx(
self.bw, self.valid_output_filename)
self.assertTrue(isinstance(array_filename, np.ndarray))
class TestGaussianTopographyGradientDy(TestGaussianTopographyMethod):
"""Subclass of `TestGaussianTopographyMethod`
"""
def test_valid_filename(self):
"""Test write success with valid output filename
"""
self.gt.gradient_dy(self.bw, self.valid_output_filename)
self.assertTrue(os.path.exists(self.valid_output_filename))
def test_no_filename(self):
"""Test valid return of array without an output filename
"""
array = self.gt.gradient_dy(self.bw)
self.assertTrue(isinstance(array, np.ndarray))
def test_invalid_filename(self):
"""Test failure of output with invalid filename
"""
with self.assertRaises(AssertionError):
self.gt.gradient_dy(self.bw, self.invalid_output_filename)
def test_image_returned(self):
"""Test an ndarray returned
"""
array_no_filename = self.gt.gradient_dy(self.bw)
self.assertTrue(isinstance(array_no_filename, np.ndarray))
array_filename = self.gt.gradient_dy(
self.bw, self.valid_output_filename)
self.assertTrue(isinstance(array_filename, np.ndarray))
class TestGaussianTopographyLaplacian(TestGaussianTopographyMethod):
"""Subclass of `TestGaussianTopographyMethod`
"""
def test_valid_filename(self):
"""Test write success with valid output filename
"""
self.gt.laplacian(self.bw, self.valid_output_filename)
self.assertTrue(os.path.exists(self.valid_output_filename))
def test_no_filename(self):
"""Test valid return of array without an output filename
"""
array = self.gt.laplacian(self.bw)
self.assertTrue(isinstance(array, np.ndarray))
def test_invalid_filename(self):
"""Test failure of output with invalid filename
"""
with self.assertRaises(AssertionError):
self.gt.laplacian(self.bw, self.invalid_output_filename)
def test_image_returned(self):
"""Test an ndarray returned
"""
array_no_filename = self.gt.laplacian(self.bw)
self.assertTrue(isinstance(array_no_filename, np.ndarray))
array_filename = self.gt.laplacian(
self.bw, self.valid_output_filename)
self.assertTrue(isinstance(array_filename, np.ndarray))
class TestGaussianTopographyAspect(TestGaussianTopographyMethod):
"""Subclass of `TestGaussianTopographyMethod`
"""
def test_valid_filename(self):
"""Test write success with valid output filename
"""
self.gt.aspect(self.bw, self.valid_output_filename)
self.assertTrue(os.path.exists(self.valid_output_filename))
def test_no_filename(self):
"""Test valid return of array without an output filename
"""
array = self.gt.aspect(self.bw)
self.assertTrue(isinstance(array, np.ndarray))
def test_invalid_filename(self):
"""Test failure of output with invalid filename
"""
with self.assertRaises(AssertionError):
self.gt.aspect(self.bw, self.invalid_output_filename)
def test_image_returned(self):
"""Test an ndarray returned
"""
array_no_filename = self.gt.aspect(self.bw)
self.assertTrue(isinstance(array_no_filename, np.ndarray))
array_filename = self.gt.aspect(
self.bw, self.valid_output_filename)
self.assertTrue(isinstance(array_filename, np.ndarray))
```
|
{
"source": "jeffreywolf/pyday-2015",
"score": 4
}
|
#### File: jeffreywolf/pyday-2015/pi.py
```python
import argparse
import numpy as np
def getArgs():
parser = argparse.ArgumentParser(
description = "Simulate Pi"
)
parser.add_argument(
"-n",
type = int,
required = True,
help = "Number of random points to draw"
)
return parser.parse_args()
def dist(x,y):
z = np.sqrt((x)**2+(y)**2)
return z
def count_circle_pts(z):
pz = z<1.0
return sum(pz)
def simulate_pi(n):
# square is 2 x 2
x = np.random.uniform(-1,1,n)
y = np.random.uniform(-1,1,n)
z = dist(x,y)
circle_pts = count_circle_pts(z)
return circle_pts/float(n)*4
def main():
args = getArgs()
result = simulate_pi(args.n)
print result
if __name__ == "__main__":
main()
```
|
{
"source": "jeffreywolf/threshold",
"score": 3
}
|
#### File: jeffreywolf/threshold/driver.py
```python
import argparse, ConfigParser, shlex, time, os, itertools
import subprocess as sp
import numpy as np
import multiprocessing as mp
def getArgs():
parser = argparse.ArgumentParser(
description = "Driver for threshold.py"
)
parser.add_argument(
"-c",
"--config",
required = True,
help = "location of a configuration file"
)
parser.add_argument(
"-v",
"--verbose",
action = "store_true",
help = "Print status while executing"
)
return parser.parse_args()
def getConfigs(configFile):
Configs = {}
try:
config = ConfigParser.ConfigParser()
config.read(configFile)
Configs["path"] = dict(config.items("path"))
print Configs
height=dict(config.items("height"))
print height
h_str=height["height"]
print h_str
if ("," in h_str) and (":" in h_str):
raise Exception
elif "," in h_str:
h_list = h_str.split(",")
h_list = [int(L) for L in h_list]
h_list = np.array(h_list)
elif ":" in h_str:
h_range = h_str.split(":")
h_range = [int(L) for L in h_range]
if len(h_range)==2:
h_min, h_max = h_range[0],h_range[1] + 1
h_list = range(h_min, h_max)
elif len(h_range)==3:
h_min, h_max, step = h_range[0],h_range[1] + 1, h_range[2]
h_list = range(h_min, h_max, step)
else:
print "Problem with height configuration."
raise e
else:
h_list = np.array([int(h_str)])
Configs["height"] = h_list
except Exception as e:
print "Problem parsing configuration file {}. Check file.".format(configFile)
raise e
return Configs
def threshold(configs, args):
for h in configs["height"]:
if args.verbose:
cmd = "python {} -i {} -o {}/threshold{}.tif -t {} -v".format(
configs["path"]["threshold"],
configs["path"]["chm"],
configs["path"]["output"],
h,
h
)
print cmd
else:
cmd = "python {} -i {} -o {}/threshold{}.tif -t {}".format(
configs["path"]["threshold"],
configs["path"]["chm"],
configs["path"]["output"],
h,
h
)
cmd_args = shlex.split(cmd)
stdout,stderr = sp.Popen(
cmd_args,
stdin = sp.PIPE,
stdout = sp.PIPE,
stderr = sp.PIPE
).communicate()
if args.verbose:
print stdout, stderr
return True
def map_func(h, configs, args):
"""Polygons command line in parallel.
"""
if args.verbose:
cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp -v".format(
configs["path"]["polygons"],
configs["path"]["output"],
h,
configs["path"]["output"],
h
)
print cmd
else:
cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp".format(
configs["path"]["polygons"],
configs["path"]["output"],
h,
configs["path"]["output"],
h
)
cmd_args = shlex.split(cmd)
stdout,stderr = sp.Popen(
cmd_args,
stdin = sp.PIPE,
stdout = sp.PIPE,
stderr = sp.PIPE
).communicate()
if args.verbose:
print stdout, stderr
return True
def map_star_func(a_b):
return map_func(*a_b)
def main():
t_i = time.time()
args = getArgs()
configs = getConfigs(args.config)
threshold(configs, args)
cores = mp.cpu_count()
pool = mp.Pool(processes = cores)
pool.map(
map_star_func,
itertools.izip(
configs["height"],
itertools.repeat(configs),
itertools.repeat(args)
)
)
t_f = time.time()
if args.verbose:
print "Total elapsed time was {} minutes".format((t_f-t_i)/60.)
if __name__ == "__main__":
main()
```
#### File: jeffreywolf/threshold/threshold.py
```python
import gdal
from gdalconst import *
import numpy as np
import argparse
from osgeo import osr
def getArgs():
parser = argparse.ArgumentParser(
description = "Threshold an image. Values above the \
threshold are coded as 0 and below as 1."
)
parser.add_argument(
"-i",
"--input",
type = str,
required = True,
help = "input raster file"
)
parser.add_argument(
"-b",
"--band",
type = str,
required = False,
help = "Band (indexing starts at 1). Default is band 1."
)
parser.add_argument(
"-t",
"--threshold",
type = str,
required = True,
help = "Threshold value."
)
parser.add_argument(
"-o",
"--output",
type = str,
required = True,
help = "Output file name"
)
parser.add_argument(
"-v",
"--verbose",
action = "store_true",
help = "Print status updates while executing"
)
return parser.parse_args()
def getGeoInfo(filename):
raster = gdal.Open(filename, GA_ReadOnly)
NDV = raster.GetRasterBand(1).GetNoDataValue()
xsize = raster.GetRasterBand(1).XSize
ysize = raster.GetRasterBand(1).YSize
GeoT = raster.GetGeoTransform()
Projection = osr.SpatialReference()
Projection.ImportFromWkt(raster.GetProjectionRef())
DataType = raster.GetRasterBand(1).DataType
DataType = gdal.GetDataTypeName(DataType)
return NDV, xsize, ysize, GeoT, Projection, DataType
def createGTiff(Name, Array, driver, NDV,
xsize, ysize, GeoT, Projection, DataType, band = 1):
if DataType == "Float32":
DataType = gdal.GDT_Float32
# set nans to original no data value
Array[np.isnan(Array)] = NDV
# Set data
band = 1
DataSet = driver.Create(Name, xsize, ysize, band, DataType)
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection(Projection.ExportToWkt())
# Write array
DataSet.GetRasterBand(1).WriteArray(Array)
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
# Close DataSet
DataSet = None
return Name
def main():
args = getArgs()
if args.verbose:
print args
raster = gdal.Open(args.input)
NDV, xsize, ysize, GeoT, Projection, DataType = getGeoInfo(args.input)
if args.band:
band = raster.GetRasterBand(args.band)
else:
band = raster.GetRasterBand(1)
array = band.ReadAsArray()
#output_array = np.zeros(array.shape, array.dtype)+400+array
output_array = np.zeros(array.shape, array.dtype)
output_array[array == NDV] = np.nan
output_array[array!=NDV] = array[array!=NDV] <= float(args.threshold)
if args.verbose:
print output_array
driver = gdal.GetDriverByName('GTiff')
new_filename = createGTiff(args.output, output_array, driver, NDV,
xsize, ysize, GeoT, Projection, DataType)
if __name__ == "__main__":
main()
```
|
{
"source": "JeffreyXBao/TKN",
"score": 2
}
|
#### File: JeffreyXBao/TKN/tkn.py
```python
import torch
import torch.nn as nn
class TKN(nn.Module):
def __init__(self, encoder, decoder, src_pad_idx, trg_pad_idx, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = device
def make_pad_mask(self, matrix, pad):
#TODO: more efficient way?
batch_size = matrix.shape[0]
src_len = matrix.shape[1]
pad_mask = (matrix != pad).all(dim=2)
return torch.reshape(pad_mask, (batch_size, 1, 1, src_len))
def make_src_mask(self, src):
#src = [batch size, src len, vec_dim]
src_mask = self.make_pad_mask(src, self.src_pad_idx)
#src_mask = torch.ones((src_len, src_len), device=self.device).bool()
#src_mask = [batch size, 1, 1, src len]
return src_mask
def make_trg_mask(self, trg):
trg_len = trg.shape[1]
#trg = [batch size, trg len]
trg_pad_mask = self.make_pad_mask(trg, self.trg_pad_idx)
#trg_pad_mask = [batch size, 1, 1, trg len]
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device = self.device)).bool()
#trg_sub_mask = [trg len, trg len]
trg_mask = trg_pad_mask & trg_sub_mask
#trg_mask = [batch size, 1, trg len, trg len]
return trg_mask
def forward(self, src, trg):
#src = [batch size, src len]
#trg = [batch size, trg len]
src_mask = self.make_src_mask(src)
trg_mask = self.make_trg_mask(trg)
#src_mask = [batch size, 1, 1, src len]
#trg_mask = [batch size, 1, trg len, trg len]
enc_src = self.encoder(src, src_mask)
#enc_src = [batch size, src len, hid dim]
output = self.decoder(trg, enc_src, trg_mask, src_mask)
#output = [batch size, trg len, output dim]
#attention = [batch size, n heads, trg len, src len]
return output
class PsuedoEmbedding(nn.Module):
def __init__(self, input_dim, hid_dim, n_layers):
super().__init__()
self.fc1 = nn.Linear(input_dim, hid_dim)
self.layers = nn.ModuleList([nn.Linear(hid_dim, hid_dim) for _ in range(n_layers)])
def forward(self, inp):
inp = self.fc1(inp)
for layer in self.layers:
inp = layer(inp)
return inp
class Encoder(nn.Module):
def __init__(self, input_dim, hid_dim, n_layers, n_heads, pf_dim, dropout, device, max_length=100, penc_layers=1):
super().__init__()
self.device = device
#self.tok_embedding = nn.Embedding(input_dim, hid_dim)
self.psuedo_embedding = PsuedoEmbedding(input_dim, hid_dim, penc_layers)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([EncoderLayer(hid_dim, n_heads, pf_dim, dropout, device) for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, src, src_mask):
#src = [batch size, src len]
#src_mask = [batch size, src len]
batch_size = src.shape[0]
src_len = src.shape[1]
pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, src len]
src = self.dropout((self.psuedo_embedding(src) * self.scale) + self.pos_embedding(pos))
#src = [batch size, src len, hid dim]
for layer in self.layers:
src = layer(src, src_mask)
#src = [batch size, src len, hid dim]
return src
class Decoder(nn.Module):
def __init__(self, output_dim, hid_dim, n_layers, n_heads, pf_dim, dropout, device, max_length=100, penc_layers=1, out_layers=3):
super().__init__()
self.device = device
#self.tok_embedding = nn.Embedding(output_dim, hid_dim)
self.psuedo_embedding = PsuedoEmbedding(output_dim, hid_dim, penc_layers)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([DecoderLayer(hid_dim, n_heads, pf_dim, dropout, device) for _ in range(n_layers)])
#self.fc_out = nn.Linear(hid_dim, output_dim)
self.fc_out = OutLayer(hid_dim, output_dim, out_layers)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, trg len]
#src_mask = [batch size, src len]
batch_size = trg.shape[0]
trg_len = trg.shape[1]
pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, trg len]
trg = self.dropout((self.psuedo_embedding(trg) * self.scale) + self.pos_embedding(pos))
#trg = [batch size, trg len, hid dim]
for layer in self.layers:
trg, att = layer(trg, enc_src, trg_mask, src_mask)
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
output = self.fc_out(trg)
#output = [batch size, trg len, output dim]
return output
class EncoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
#src = [batch size, src len, hid dim]
#src_mask = [batch size, src len]
#self attention
_src, _ = self.self_attention(src, src, src, src_mask)
#dropout, residual connection and layer norm
src = self.self_attn_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
#positionwise feedforward
_src = self.positionwise_feedforward(src)
#dropout, residual and layer norm
src = self.ff_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
return src
class DecoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.enc_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.encoder_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len, hid dim]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, trg len]
#src_mask = [batch size, src len]
#self attention
_trg, _ = self.self_attention(trg, trg, trg, trg_mask)
#dropout, residual connection and layer norm
trg = self.self_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#encoder attention
_trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)
#dropout, residual connection and layer norm
trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#positionwise feedforward
_trg = self.positionwise_feedforward(trg)
#dropout, residual and layer norm
trg = self.ff_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
return trg, attention
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device, fc_layers=5):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
# self.fc_q = nn.Linear(hid_dim, hid_dim)
# self.fc_k = nn.Linear(hid_dim, hid_dim)
# self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_q = OutLayer(hid_dim, hid_dim, fc_layers)
self.fc_k = OutLayer(hid_dim, hid_dim, fc_layers)
self.fc_v = OutLayer(hid_dim, hid_dim, fc_layers)
self.fc_o = nn.Linear(hid_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask = None):
batch_size = query.shape[0]
#query = [batch size, query len, hid dim]
#key = [batch size, key len, hid dim]
#value = [batch size, value len, hid dim]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
#Q = [batch size, query len, hid dim]
#K = [batch size, key len, hid dim]
#V = [batch size, value len, hid dim]
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
#Q = [batch size, n heads, query len, head dim]
#K = [batch size, n heads, key len, head dim]
#V = [batch size, n heads, value len, head dim]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
#energy = [batch size, n heads, query len, key len]
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim = -1)
#attention = [batch size, n heads, query len, key len]
x = torch.matmul(self.dropout(attention), V)
#x = [batch size, n heads, query len, head dim]
x = x.permute(0, 2, 1, 3).contiguous()
#x = [batch size, query len, n heads, head dim]
x = x.view(batch_size, -1, self.hid_dim)
#x = [batch size, query len, hid dim]
x = self.fc_o(x)
#x = [batch size, query len, hid dim]
return x, attention
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, hid_dim, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
#x = [batch size, seq len, hid dim]
x = self.dropout(torch.relu(self.fc_1(x)))
#x = [batch size, seq len, pf dim]
x = self.fc_2(x)
#x = [batch size, seq len, hid dim]
return x
class OutLayer(nn.Module):
def __init__(self, in_dim, out_dim, hidden_layers):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(in_dim, in_dim) for _ in range(hidden_layers)])
self.fc_final = nn.Linear(in_dim, out_dim)
def forward(self, x):
for layer in self.layers:
x = torch.relu(layer(x))
x = self.fc_final(x)
return x
```
|
{
"source": "jeffrey-xiao/dotinstall",
"score": 2
}
|
#### File: dotinstall/installer/eopkg_installer.py
```python
import subprocess
from dotinstall.installer.installer import Installer
class EopkgInstaller(Installer):
def installer_exists(self):
return (
subprocess.call(
["which", "eopkg"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
== 0
)
def _is_installed(self, dependency):
eopkg_pipe = subprocess.Popen(
["sudo", "eopkg", "it", "-n", dependency],
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
_, error = eopkg_pipe.communicate()
return "already installed" in error.decode("utf-8")
def _install(self, dependency):
return (
subprocess.call(
["sudo", "eopkg", "it", "-y", dependency],
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
== 0
)
```
#### File: tests/integration/simple_test.py
```python
import io
import os
import pytest
from testing.util import execute_install
from testing.util import expand_path
from testing.util import in_resource_path
@pytest.fixture(autouse=True)
def config():
with in_resource_path("./testing/resources/simple"):
execute_install(False, False)
yield
def test_linking():
assert os.path.islink(expand_path("./other_dist/2.txt"))
def test_overwrite():
assert os.path.isfile(expand_path("./dist/1.txt"))
with io.open(expand_path("./dist/1.txt")) as fin:
assert fin.read().strip() == "1"
def test_prelink():
assert os.path.isfile(expand_path("./dist/1.txt"))
with io.open(expand_path("./dist/2.txt")) as fin:
assert fin.read().strip() == "200"
def test_postlink():
assert not os.path.exists(expand_path("./dist/3.txt"))
def test_clean():
assert not os.path.exists(expand_path("./dist/broken1.txt"))
assert not os.path.islink(expand_path("./dist/broken1.txt"))
assert not os.path.exists(expand_path("./dist/broken2.txt"))
assert not os.path.islink(expand_path("./dist/broken2.txt"))
```
|
{
"source": "jeffreyyang3/Lines_Queueing",
"score": 2
}
|
#### File: jeffreyyang3/Lines_Queueing/config.1.py
```python
import random
import math
data = [[
{ # Type 1: double, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "double",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 2: swap, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "swap",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 3: bid, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "bid",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 4: swap, communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "swap",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": True,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 5: bid, communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "bid",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": True,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},{ # Type 1: double, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "double",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 2: swap, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "swap",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 3: bid, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "bid",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 4: swap, communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "swap",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": True,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 5: bid, communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "bid",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": True,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},{ # Type 1: double, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "double",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 2: swap, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "swap",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 3: bid, no communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "bid",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": False,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 4: swap, communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "swap",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": True,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
{ # Type 5: bid, communication, 8 players
#
"settings": {
"duration": 100,
"swap_method": "bid",
"pay_method": "gain",
"k": 0.8,
"service_distribution": 1,
"discrete": True,
"messaging": True,
},
"players": [
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
{"pay_rate": 4, "endowment": 4, "c": random.random()},
],
},
]]
def shuffle(data):
for i, group in enumerate(data):
for j, period in enumerate(group):
if "start_pos" not in data[i][j]["players"][0]:
positions = [n for n in range(1, len(period["players"]) + 1)]
random.shuffle(positions)
for k, player in enumerate(period["players"]):
data[i][j]["players"][k]["start_pos"] = positions[k]
random.shuffle(
data[i][j]["players"]
) # shuffle order of players within periods
random.shuffle(data[i]) # shuffle order of periods withing groups
random.shuffle(data) # shuffle order of groups
return data
# exports data to a csv format
def export_csv(fname, data):
pass
# exports data to models.py
# formats data to make it easier for models.py to parse it
def export_data():
# error handling & filling defaults
for i, group in enumerate(data):
for j, period in enumerate(group):
if "settings" not in period:
raise ValueError("Each period must contain settings dict")
if "players" not in period:
raise ValueError("Each period must contain players dict")
settings = period["settings"]
players = period["players"]
if "duration" not in settings:
raise ValueError("Each period settings must have a duration")
if "swap_method" not in settings:
raise ValueError(
"Each period settings must have a swap_method variable"
)
# For now, will comment out this swap_method check to allow for testing
# of the double auction
"""
if settings['swap_method'] not in ['cut', 'swap', 'bid']:
raise ValueError('Each period settings swap_method variable \
must be either \'bid\', \'swap\' or \'cut\'')
"""
if "pay_method" not in settings:
raise ValueError(
"Each period settings must have a pay_method variable")
if settings["pay_method"] not in ["gain", "lose"]:
raise ValueError(
"Each period settings pay_method variable \
must be either 'gain' or 'lose'"
)
if "pay_rate" not in players[0]:
raise ValueError("Players must have pay_rates")
if "service_time" not in players[0]:
if "k" not in settings:
raise ValueError(
"Period settings must have a k variable if players \
do not define service ti"
)
if "service_distribution" not in settings:
data[i][j]["settings"]["service_distribution"] = 1
sd = settings["service_distribution"]
t = settings["duration"]
k = settings["k"]
vals = [random.randrange(sd) + 1 for p in players]
vals = [v / sum(vals) for v in vals]
vals = [round(v * k * t) for v in vals]
positions = [n for n in range(1, len(period["players"]) + 1)]
for k, _ in enumerate(players):
data[i][j]["players"][k]["service_time"] = vals[k]
data[i][j]["players"][k]["start_pos"] = positions[k]
print("exported data is")
print(data[0][0])
return data
```
|
{
"source": "jeffreyyoo/SICER2",
"score": 3
}
|
#### File: sicer/lib/associate_tags_with_regions.py
```python
import bisect
def tag_position(line, fragment_size):
shift = int(round(fragment_size / 2))
strand = line[5]
if strand == '+':
return int(line[1]) + shift
elif strand == '-':
return int(line[2]) - 1 - shift
def find_readcount_on_islands(island_start_list, island_end_list, tag_position):
"""
Make sure the islands are sorted.
Islands are non-overlapping!
Returns the index of the island on which the tag lands, or -1.
"""
index = bisect.bisect_right(island_start_list, tag_position);
if index - bisect.bisect_left(island_end_list, tag_position) == 1:
return index - 1;
else:
return -1;
```
#### File: sicer/lib/Utility.py
```python
import re, os, sys, shutil
from math import *
from string import *
def get_total_tag_counts(tag_bed_file):
"""
Get total tag counts given the current experimental run
file should be a bed file.
"""
counts =0;
infile = open(tag_bed_file,'r');
for line in infile:
""" check to make sure not a header line """
if not re.match("track", line):
counts += 1;
infile.close();
return counts;
def get_total_tag_counts_bed_graph(summary_graph_file, bed_val={}, threshold = 0):
"""
Get total tag counts given the current experimental run
file should be a summary bed graph file.
can be used for bed summary or islands file
"""
count = 0;
if summary_graph_file != "": #if summary graph exists
infile = open(summary_graph_file,'r');
for line in infile:
""" check to make sure not a header line """
if not re.match("track", line):
line = line.strip();
sline = line.split();
assert ( len(sline) == 4 );
value = float(sline[3]);
if (value >= threshold):
count += value;
infile.close();
elif len(bed_val)>0:
for chrom in bed_val.keys():
for item in bed_val[chrom]:
if (item.value >= threshold):
count +=item.value;
else:
print ("wrong input!!");
return count;
def fileExists(f):
try:
file = open(f)
except IOError:
exists = 0
else:
exists = 1
return exists
def is_bed_sorted(list):
"""
Check if sorted in ascending order.
input is a list of BED with chrom, start, end and value.
output: sorted =1 or 0
"""
sorted = 1;
for index in range(0, len(list)-1):
if list[index].start > list[index+1].start:
sorted = 0;
return sorted;
def is_list_sorted(list):
"""
Check if sorted in ascending order.
input is a list of values.
output: sorted =1 or 0
"""
sorted = 1;
for index in range(0, len(list)-1):
if list[index] > list[index+1]:
sorted = 0;
return sorted;
def rescale_a_column(input_file, c, rescale_factor, output_file):
"""
c is the 0-based column number
Return a list of names
"""
infile = open(input_file,'r')
outfile = open(output_file, 'w')
for line in infile:
if not re.match("#", line):
line = line.strip()
sline = line.split()
if (len(sline)>0):
new_value = atof(sline[c]) * rescale_factor;
sline[c] = str(new_value);
outfile.write('\t'.join(sline)+'\n')
infile.close()
outfile.close()
def normalize_a_column(input_file, c, output_file):
"""
c is the 0-based column number
Return a list of names
"""
line_number = 0;
infile = open(input_file,'r')
outfile = open(output_file, 'w')
for line in infile:
if not re.match("#", line):
line = line.strip()
sline = line.split()
if (len(sline)>0):
if (line_number == 0):
rescale_factor = atof(sline[c])
new_value = atof(sline[c]) / rescale_factor;
sline[c] = str(new_value);
outfile.write('\t'.join(sline)+'\n')
line_number += 1;
infile.close()
outfile.close()
def add_column (infile, c, outfile, const=-1):
"""
c is the 0-based column number
add a column to the original file
default value would be the line number
"""
file = open(infile,'r')
ofile = open(outfile, 'w')
counter = 0;
for line in file:
if not re.match("#", line):
counter += 1
line = line.strip()
sline = line.split()
if const == -1:
sline.insert(c, "L" + str(counter));
else:
sline.insert(c, str(const));
line = '\t '.join(sline) + '\n';
ofile.write(line);
file.close()
ofile.close()
def extract_two_columns(gene_file, c1, c2, outfile):
"""
c is the 0-based column number
"""
maxi = max (c1, c2);
mini = min (c1, c2);
file = open(gene_file,'r')
ofile = open(outfile, 'w')
for line in file:
line = line.strip()
sline = line.split()
if len(sline)> maxi:
outline = sline[c1] + '\t' + sline[c2] + '\n';
elif len(sline)>mini:
outline = sline[mini]+ '\n';
ofile.write(outline);
ofile.close();
file.close();
```
#### File: sicer/main/run_RECOGNICER.py
```python
import os
import shutil
import tempfile
import sys
import multiprocessing as mp
curr_path = os.getcwd()
# From SICER Package
from sicer.lib import GenomeData
from sicer.src import remove_redundant_reads
from sicer.src import run_make_graph_file_by_chrom
from sicer.src import coarsegraining
from sicer.src import associate_tags_with_chip_and_control_w_fc_q
from sicer.src import filter_islands_by_significance
from sicer.src import make_normalized_wig
from sicer.src import filter_raw_tags_by_islands
def main(args, df_run=False): # df_run indicates if run_RECOGNICER is being called by run_RECOGNICER_df function.
# Checks if there is a control library
control_lib_exists = True
if (args.control_file is None):
control_lib_exists = False
try:
temp_dir = tempfile.mkdtemp()
# Change current working directory to temp_dir
os.chdir(temp_dir)
except:
sys.exit(
"Temporary directory required for SICER cannot be created. Check if directories can be created in %s." % curr_path)
try:
# Step 0: create Pool object for parallel-Processing
num_chroms = len(GenomeData.species_chroms[args.species])
pool = mp.Pool(processes=min(args.cpu, num_chroms))
# Step 1: Remove redundancy reads in input file according to input threshold
treatment_file_name = os.path.basename(args.treatment_file)
print("Preprocess the", treatment_file_name, "file to remove redundancy with threshold of",
args.redundancy_threshold, "\n")
total_treatment_read_count = remove_redundant_reads.main(args, args.treatment_file, pool)
args.treatment_file = treatment_file_name
print('\n')
# Step 2: Remove redundancy reads in control library according to input threshold
if (control_lib_exists):
control_file_name = os.path.basename(args.control_file)
print("Preprocess the", control_file_name, "file to remove redundancy with threshold of",
args.redundancy_threshold, "\n")
total_control_read_count = remove_redundant_reads.main(args, args.control_file, pool)
args.control_file = control_file_name
print('\n')
# Step 3: Partition the genome in windows and generate graph files for each chromsome
print("Partitioning the genome in windows and generate summary files... \n")
total_tag_in_windows = run_make_graph_file_by_chrom.main(args, pool)
print("\n")
# Step4+5: Normalize and generate WIG file
print("Normalizing graphs by total island filitered reads per million and generating summary WIG file...\n")
output_WIG_name = (treatment_file_name.replace('.bed', '') + "-W" + str(args.window_size) + "-normalized.wig")
make_normalized_wig.main(args, output_WIG_name, pool)
# Step 6: Find condidate islands exhibiting clustering
print("Finding candidate islands exhibiting clustering... \n")
coarsegraining.main(args, total_tag_in_windows, pool)
print("\n")
# Running SICER with a control library
if (control_lib_exists):
# Step 7
print("Calculating significance of candidate islands using the control library... \n")
associate_tags_with_chip_and_control_w_fc_q.main(args, total_treatment_read_count, total_control_read_count, pool)
# Step 8:
print("Identifying significant islands using FDR criterion...")
significant_read_count = filter_islands_by_significance.main(args, 7, pool) # 7 represents the n-th column we want to filtered by
print("Out of the ", total_treatment_read_count, " reads in ", treatment_file_name, ", ",
significant_read_count, " reads are in significant islands")
# Optional Outputs
if (args.significant_reads):
# Step 9: Filter treatment reads by the significant islands found from step 8
print("Filter reads with identified significant islands...\n")
filter_raw_tags_by_islands.main(args, pool)
# Step 10: Produce graph file based on the filtered reads from step 9
print("Make summary graph with filtered reads...\n")
run_make_graph_file_by_chrom.main(args, pool, True)
# Step 11: Produce Normalized WIG file
print("\nNormalizing graphs by total island filitered reads per million and generating summary WIG file \n")
output_WIG_name = (treatment_file_name.replace('.bed', '') + "-W" + str(args.window_size) + "-FDR" + str(
args.false_discovery_rate) + "-islandfiltered-normalized.wig")
make_normalized_wig.main(args, output_WIG_name, pool)
pool.close()
pool.join()
# Final Step
if (df_run == True):
return temp_dir, total_treatment_read_count
else:
print("End of SICER")
finally:
if df_run==False:
print("Removing temporary directory and all files in it.")
shutil.rmtree(temp_dir)
```
#### File: sicer/src/find_islands_in_pr.py
```python
import multiprocessing as mp
import os
from functools import partial
from math import *
import numpy as np
from sicer.lib import Background_island_probscore_statistics
from sicer.lib import GenomeData
"""
Take in coords for bed_gaph type summary files and find 'islands' of modifications.
There are a number options here that can be turned on or off depending on need
Right now:
(1) scan for all 'islands' where the single window or consecutive
windows
(2) remove all single window 'islands' -- this can be commented out
when looking for more localized signals (such as TF binding sites?)
(3) try to combine islands that are within gap distance of each other.
This gap distance is supplemented by a window_buffer just so we don't
do anything stupid with window sizes
(4) Remove all single window combined islands -- if step (2) was done,
this is redundant
(5) Lastly, filter out all the islands we've found that have a total
score < islands_minimum_tags
"""
# Factorial
def fact(m):
value = 1.0;
if m != 0:
while m != 1:
value = value * m;
m = m - 1;
return value;
# Return the log of a factorial, using Srinivasa Ramanujan's approximation when m>=20
def factln(m):
if m < 20:
value = 1.0;
if m != 0:
while m != 1:
value = value * m;
m = m - 1;
return log(value);
else:
return m * log(m) - m + log(m * (1 + 4 * m * (1 + 2 * m))) / 6.0 + log(pi) / 2;
def poisson(i, average):
if i < 20:
return exp(-average) * average ** i / fact(i);
else:
exponent = -average + i * log(average) - factln(i);
return exp(exponent);
def combine_proximal_islands(islands, gap, window_size_buffer=3):
"""
islands: a list of tuples of following format: (chrom, start, end, score)
Therefore, "islands[index][1]" would mean the start position of the window at the given index
Extend the regions found in the find_continuous_region function.
If gap is not allowed, gap = 0, if one window is allowed, gap = window_size (200)
Return a list of combined regions.
"""
proximal_island_dist = gap + window_size_buffer;
final_islands = []
current_island = islands[0];
if len(islands) == 1:
final_islands = islands;
else:
for index in range(1, len(islands)):
dist = islands[index][1] - current_island[2];
if dist <= proximal_island_dist:
current_island[2] = islands[index][2];
current_island[3] += islands[index][3];
else:
final_islands.append(current_island);
current_island = islands[index];
# The last island:
final_islands.append(current_island);
return final_islands;
def find_region_above_threshold(island_list, score_threshold):
filtered_islands = [];
for island in island_list:
if island[3] >= (score_threshold - .0000000001):
filtered_islands.append(island);
return filtered_islands;
def filter_ineligible_windows(chrom_graph, min_tags_in_window, average):
'''Filters windows that have tag count lower than the minimum threshold count and calculates score for windows that meet the minimum count.
Score is defined as s = -log(Poisson(read_count,lambda))'''
filtered_chrom_graph = []
for window in chrom_graph:
read_count = window[3]
score = -1
if (read_count >= min_tags_in_window):
prob = poisson(read_count, average);
if prob < 1e-250:
score = 1000; # outside of the scale, take an arbitrary number.
else:
score = -log(prob)
eligible_window = (window[0], window[1], window[2], score)
if score > 0:
filtered_chrom_graph.append(eligible_window);
np_filtered_chrom_graph = np.array(filtered_chrom_graph, dtype=object)
return np_filtered_chrom_graph
def filter_and_find_islands(min_tags_in_window, gap_size, score_threshold, average, verbose, graph_file):
'''Function for handling multiprocessing. Calls functions for filtering windows and finding islands.'''
number_of_islands = 0
print_return = ""
chrom_graph = np.load(graph_file, allow_pickle=True)
if (len(chrom_graph) > 0):
chrom = chrom_graph[0][0]
filtered_chrom_graph = filter_ineligible_windows(chrom_graph, min_tags_in_window, average)
islands = combine_proximal_islands(filtered_chrom_graph, gap_size, 2);
islands = find_region_above_threshold(islands, score_threshold);
number_of_islands += len(islands)
if not (len(islands) > 0):
if verbose:
print_return += chrom + " does not have any islands meeting the required significance"
np.save(graph_file, islands)
return (graph_file, number_of_islands, print_return)
def main(args, total_read_count, pool):
print("Species: ", args.species);
print("Window_size: ", args.window_size);
print("Gap size: ", args.gap_size);
print("E value is:", args.e_value);
print("Total read count:", total_read_count)
chroms = GenomeData.species_chroms[
args.species]; # list of chromsomes for the given species (e.g. chr1, chr2, ... , chrx)
genome_length = sum(GenomeData.species_chrom_lengths[args.species].values()); # list of length of each chromsomes
effective_genome_length = int(args.effective_genome_fraction * genome_length);
average = float(total_read_count) * args.window_size / effective_genome_length; # average read count
print("Genome Length: ", genome_length);
print("Effective genome Length: ", effective_genome_length);
print("Window average:", average);
window_pvalue = 0.20;
bin_size = 0.001;
background = Background_island_probscore_statistics.Background_island_probscore_statistics(total_read_count,
args.window_size,
args.gap_size,
window_pvalue,
effective_genome_length,
bin_size);
min_tags_in_window = background.min_tags_in_window
print("Window pvalue:", window_pvalue)
print("Minimum num of tags in a qualified window: ", min_tags_in_window) # first threshold cutoff
print("\nDetermining the score threshold from random background...");
# determine threshold from random background
score_threshold = background.find_island_threshold(args.e_value);
print("The score threshold is:", score_threshold);
# generate the probscore summary graph file, only care about enrichment
# filter the summary graph to get rid of windows whose scores are less than window_score_threshold
file = args.treatment_file.replace('.bed', '')
list_of_graph_files = []
for chrom in chroms:
list_of_graph_files.append(file + '_' + chrom + '_graph.npy')
# Use multiprocessing to filter windows with tag count below minimum requirement
print(
"Generating the enriched probscore summary graph and filtering the summary graph to eliminate ineligible windows... ");
#pool = mp.Pool(processes=min(args.cpu, len(chroms)))
filter_and_find_islands_partial = partial(filter_and_find_islands, min_tags_in_window, args.gap_size,
score_threshold, average, args.verbose)
filtered_islands_result = pool.map(filter_and_find_islands_partial, list_of_graph_files)
#pool.close()
file_name = args.treatment_file.replace('.bed', '')
outfile_path = os.path.join(args.output_directory, (file_name + '-W' + str(args.window_size)
+ '-G' + str(args.gap_size) + '.scoreisland'))
total_number_islands = 0
path_to_filtered_graph = []
with open(outfile_path, 'w') as outfile:
for i in range(0, len(filtered_islands_result)):
filtered_chrom_graph = np.load(filtered_islands_result[i][0],allow_pickle=True)
path_to_filtered_graph.append(filtered_islands_result[i][0])
total_number_islands += filtered_islands_result[i][1]
if (filtered_islands_result[i][2] != ""):
print(filtered_islands_result[i][2])
for window in filtered_chrom_graph:
chrom = window[0]
line = (chrom + '\t' + str(window[1]) + '\t' + str(window[2])
+ '\t' + str(window[3]) + '\n')
outfile.write(line)
print("Total number of islands: ", total_number_islands);
```
|
{
"source": "jeffreyyzwu/pythonTools",
"score": 3
}
|
#### File: pythonTools/dpFreeDine/request.py
```python
import urllib.request
import random
import time
from log import logger
def randomSleep():
stime = round(random.uniform(3,7), 3)
logger.info('random sleep:{0}s'.format(stime))
time.sleep(stime)
def randomUserAgent():
agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36'
]
index = random.randint(0, len(agents) - 1)
return agents[index]
def setHeader(config):
headers = config["headers"]
headers["User-Agent"] = randomUserAgent()
defaultHeaders = {
"Pragma": "no-cache",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8;",
"Accept": "application/json",
"X-Requested-With": "XMLHttpRequest",
'Cookie': "dper={0}".format(config["token"])
}
defaultHeaders.update(headers)
config["headers"] = defaultHeaders
def openUrl(url, config, data):
randomSleep()
setHeader(config)
if (len(config["proxy"]) > 0):
proxy = urllib.request.ProxyHandler({'http': config["proxy"]})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
req = urllib.request.Request(
url=url,
data=urllib.parse.urlencode(data).encode("utf-8"),
headers=config["headers"]
)
response = urllib.request.urlopen(req)
return response
```
#### File: pythonTools/jdtry/jdlogin.py
```python
import time
import json
import config
from bs4 import BeautifulSoup
from log import logger
from selenium import webdriver
# chromedriver download address
# https://chromedriver.storage.googleapis.com/index.html
class jdlogin:
def __init__(self):
self.users = config.getUsers()
def _login_account_token(self, user):
token = ''
option = webdriver.ChromeOptions()
# 隐私模式
option.add_argument('--incognito')
# option.add_argument('--headless')
# 此步骤很重要,设置为开发者模式,防止被各大网站识别出来使用了Selenium
option.add_experimental_option('excludeSwitches', ['enable-automation'])
browser = webdriver.Chrome(chrome_options=option, executable_path=r'./chromedriver')
try:
browser.get('https://passport.jd.com/new/login.aspx')
accountbutton = browser.find_element_by_css_selector(
'div.login-tab.login-tab-r a')
accountbutton.click()
# time.sleep(3)
uinput = browser.find_element_by_id('loginname')
uinput.send_keys(user["phone"])
time.sleep(2)
pinput = browser.find_element_by_id('nloginpwd')
pinput.send_keys(user['password'])
time.sleep(1)
button = browser.find_element_by_id('loginsubmit')
button.click()
time.sleep(15)
for cookie in browser.get_cookies():
print(cookie)
# print('name='+cookie["name"])
# print('value='+cookie["value"])
if ("thor" == cookie["name"] and ".jd.com" == cookie["domain"]):
token = cookie["value"]
finally:
browser.close()
return token
def _get_current_time(self):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def refresh_token(self):
if (len(self.users) == 0):
logger.info("获取用户信息失败")
return
for user in self.users:
token = user["token"]
if (len(token) >= 0):
time.sleep(3)
token = self._login_account_token(user)
if (len(token) == 0):
logger.error('获取用户[{0}]token失败'.format(user["phone"]))
return
user["token"] = token
user["time"] = self._get_current_time()
logger.info(user)
self._save_user_profile(user)
logger.info('获取用户[{0}]token成功,token:[{1}]'.format(user["phone"], user["token"]))
def _save_user_profile(self, user):
phone = user["phone"]
logger.info("------保存更新token到users配置文件中--------")
try:
with open('conf/users.json', mode="r", encoding='utf-8-sig') as json_read_file:
usersConfig = json.load(json_read_file)
for config in usersConfig:
if (config["phone"] == user["phone"]):
config["token"] = user["token"]
if (user.__contains__("time")):
config["time"] = user["time"]
break
with open('conf/users.json', mode="w", encoding='utf-8-sig') as json_write_file:
content = json.dumps(usersConfig, ensure_ascii=False, indent=4)
json_write_file.write(content)
json_write_file.close()
json_read_file.close()
except Exception as ex:
logger.error(ex)
if __name__ == '__main__':
login = jdlogin()
login.refresh_token()
```
#### File: pythonTools/jdtry/removefollow.py
```python
import json
import time
import request
from log import logger
import config
def getFollowList(user):
url = 'https://t.jd.com/follow/vender/qryCategories.do?qryKey=&_={0}'.format(time.time())
follows = []
setHeader(user)
try:
response = request.openUrl(url, user, {})
content = str(response.read(), 'utf-8')
decodeContent = json.loads(content)
if decodeContent.__contains__("data"):
# logger.info(decodeContent)
for category in decodeContent.get("data",[]):
for entry in category["entityIdSet"]:
follows.append(entry)
else:
# logger.error(decodeContent)
if (decodeContent.get("error","") == "NotLogin"):
user["token"] = ""
config.saveUserConfig(user)
logger.info("clear token and save to config file")
except Exception as ex:
logger.error("账户:{0}获取店铺关注列表报错, url:{1},错误信息:{2}".format(user["phone"], url, ex))
logger.info("账户:{0}关注店铺总数:{1}, 具体如下:{2}".format(user["phone"],len(follows), follows))
return follows
def setHeader(user):
user["headers"].update({
"Host": "t.jd.com",
"Referer": "https://t.jd.com/vender/followVenderList.action"
})
def removeFollow(user, vendor):
url = 'https://t.jd.com/follow/vender/unfollow.do?venderId={0}&_={1}'.format(vendor, time.time())
setHeader(user)
try:
response = request.openUrl(url, user, {})
content = str(response.read(), 'utf-8')
# decodeContent = json.loads(content)
logger.info("账户:{0}取消店铺{1}关注, 结果信息:{2}".format(user["phone"], vendor, content))
except Exception as ex:
logger.error("账户:{0}取消店铺关注列表报错, url:{1}, 错误信息:{2}".format(user["phone"], url, ex))
def remove(user):
while True:
follows = getFollowList(user)
if (follows and len(follows) > 100):
for follow in follows:
removeFollow(user, follow)
else:
break
```
#### File: pythonTools/meituantry/login.py
```python
from log import logger
import config
def saveUserConfig(user):
config.saveUserConfig(user)
def refreshToken(user):
logger.info("-----更新token-------")
token = ''
logger.info(user)
if (token and len(token) == 0):
logger.info('user:{0} login fail'.format(user["phone"]))
else:
saveUserConfig(user)
logger.info('user:{0} login success, token:{1}'.format(
user["phone"], token))
return token
def getToken(user):
token = ''
if (user.__contains__('token')):
token = user["token"]
if ((not token) or len(token) == 0):
token = refreshToken(user)
return token
```
#### File: pythonTools/meituantry/main.py
```python
import config
import login
import freebeautytry
from log import logger
def main():
logger.info('----------------------------开始搬砖--------------------------------')
users = config.getUsers()
for user in users:
token = login.getToken(user)
if (token and len(token) > 0):
logger.info('--------------用户切换成功:{0},开始报名--------------'.format(user["phone"]))
freebeautytry.fetchFreeBeautyTry(user)
logger.info('--------------用户:{0}报名结束--------------'.format(user["phone"]))
config.checkToken()
logger.info('--------全部报名结束-----------')
if __name__ == '__main__':
main()
```
#### File: pythonTools/smzdm/checkin.py
```python
from log import logger
import request
from urllib.parse import unquote
import json
def setQueryHeader(config):
config.update({
"headers": {
"Host": "zhiyou.smzdm.com",
"Referer": "https://www.smzdm.com/",
"DNT": "1",
"Accept-Encoding": "deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Accept": "*/*",
"Connection": "keep-alive",
"Cookie": config["token"]
}})
def checkin(config):
setQueryHeader(config)
logger.info(config)
url = 'https://zhiyou.smzdm.com/user/checkin/jsonp_checkin'
response = request.openUrl(url, config, {})
content = response.read().decode('utf-8')
decodeContent = json.loads(content)
logger.info("张大妈签到结果:{0}".format(decodeContent))
```
|
{
"source": "JeffreyZh4ng/automated-trading",
"score": 3
}
|
#### File: automated-trading/AutomatedTrading/CommandTools.py
```python
class CommandTools:
@staticmethod
def get_token():
""" Simple helper that will return the token stored in the text file.
:return: Your Robinhood API token
"""
robinhood_token_file = open('robinhood_token.txt')
current_token = robinhood_token_file.read()
return current_token
```
|
{
"source": "jeffreyzhang2012/rrc_package",
"score": 2
}
|
#### File: rrc_package/python/pybullet_utils.py
```python
import pybullet
import numpy as np
from trifinger_simulation.tasks import move_cube
MAX_DIST = move_cube._max_cube_com_distance_to_center
#DIST_THRESH = move_cube._CUBE_WIDTH / 5
ORI_THRESH = np.pi / 8
REW_BONUS = 1
POS_SCALE = np.array([0.128, 0.134, 0.203, 0.128, 0.134, 0.203, 0.128, 0.134,
0.203])
def reset_camera():
camera_pos = (0.,0.2,-0.2)
camera_dist = 1.0
pitch = -45.
yaw = 0.
if pybullet.isConnected() != 0:
pybullet.resetDebugVisualizerCamera(cameraDistance=camera_dist,
cameraYaw=yaw,
cameraPitch=pitch,
cameraTargetPosition=camera_pos)
```
#### File: python/traj_opt/fixed_contact_point_system.py
```python
import numpy as np
from casadi import *
import pybullet
from rrc_iprl_package.traj_opt import utils
class FixedContactPointSystem:
def __init__(self,
nGrid = 100,
dt = 0.1,
fnum = 3,
cp_params = None,
obj_shape = None,
obj_mass = None,
log_file = None,
):
# Time parameters
self.nGrid = nGrid
self.dt = dt
self.tf = dt * (nGrid-1) # Final time
self.fnum = fnum
self.qnum = 3
self.obj_dof = 6
self.x_dim = 7 # Dimension of object pose
self.dx_dim = 6 # Dimension of object twist
self.p = 100
self.obj_shape = obj_shape # (width, length, height), (x, y, z)
self.obj_mass = obj_mass
self.obj_mu = 1
self.gravity = -10
self.cp_params = cp_params
self.cp_list = self.get_contact_points_from_cp_params(self.cp_params)
# Contact model force selection matrix
l_i = 3
self.l_i = l_i
H_i = np.array([
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
#[0, 0, 0, 1, 0, 0],
])
self.H = np.zeros((l_i*self.fnum,self.obj_dof*self.fnum))
for i in range(self.fnum):
self.H[i*l_i:i*l_i+l_i, i*self.obj_dof:i*self.obj_dof+self.obj_dof] = H_i
self.log_file = log_file
################################################################################
# Decision variable management helper functions
################################################################################
"""
Define decision variables
t : time
s_flat: state [x, dx] (flattened vector)
l_flat: contact forces
"""
def dec_vars(self):
x_dim = self.x_dim
dx_dim = self.dx_dim
qnum = self.qnum
fnum = self.fnum
nGrid = self.nGrid
# time
t = SX.sym("t" ,nGrid)
# object pose at every timestep
# one row of x is [x, y, z, qw, qx, qy, qz]
x = SX.sym("x" ,nGrid,x_dim)
# object velocity at every timestep
# one row of dx is [dx, dy, dtheta]
dx = SX.sym("dx",nGrid,dx_dim)
# Lamda (applied contact forces) at every timestep
# one row of l is [normal_force_f1, tangent_force_f1, ..., normal_force_fn, tangent_force_fn]
l = SX.sym("l" ,nGrid,fnum*self.l_i)
# Slack variables for x_goal
a = SX.sym("a", x_dim)
# Flatten vectors
s_flat = self.s_pack(x,dx)
l_flat = self.l_pack(l)
return t,s_flat,l_flat, a
"""
Pack the decision variables into a single horizontal vector
"""
def decvar_pack(self,t,s,l,a):
z = vertcat(t,s,l,a)
return z
"""
Unpack the decision variable vector z into:
t: times (nGrid x 1) vector
s: packed state vector
u: packed u vector (joint torques)
l: packed l vector (contact forces)
a: slack variables
"""
def decvar_unpack(self,z):
qnum = self.qnum
fnum = self.fnum
nGrid = self.nGrid
x_dim = self.x_dim
dx_dim = self.dx_dim
t = z[:nGrid]
s_start_ind = nGrid
s_end_ind = s_start_ind + nGrid*x_dim + nGrid*dx_dim
s_flat = z[s_start_ind:s_end_ind]
l_start_ind = s_end_ind
l_end_ind = l_start_ind + nGrid * fnum * self.l_i
l_flat = z[l_start_ind:l_end_ind]
a_start_ind = l_end_ind
a = z[a_start_ind:]
return t,s_flat,l_flat,a
"""
Unpack the state vector s
x: (x,y,theta) pose of object (nGrid x dim)
dx: (dx,dy,dtheta) of object (nGrid x dim)
"""
def s_unpack(self,s):
nGrid = self.nGrid
x_dim = self.x_dim
dx_dim = self.dx_dim
# Get object pose
x_flat = s[:nGrid*x_dim]
x = reshape(x_flat,x_dim,nGrid).T
# Get object twist
dx_flat = s[nGrid*x_dim:]
dx = reshape(dx_flat,dx_dim,nGrid).T
return x,dx
"""
Pack the state vector s into a single horizontal vector
State:
x: (px, py, pz, qx, qy, qz, qw) pose of object
dx: d(px, py, pz, qx, qy, qz, qw) velocity of object
"""
def s_pack(self,x,dx):
nGrid = self.nGrid
x_dim = self.x_dim
dx_dim = self.dx_dim
x_flat = reshape(x.T,nGrid*x_dim,1)
dx_flat = reshape(dx.T,nGrid*dx_dim,1)
return vertcat(x_flat,dx_flat)
"""
Pack the l vector into single horizontal vector
"""
def l_pack(self,l):
nGrid = self.nGrid
fnum = self.fnum
l_flat = reshape(l.T,nGrid*fnum*self.l_i,1)
return l_flat
"""
Unpack flat l fector in a (nGrid x fnum*dim) array
"""
def l_unpack(self,l_flat):
nGrid = self.nGrid
fnum = self.fnum
l = reshape(l_flat,self.l_i*fnum,nGrid).T
return l
################################################################################
# End of decision variable help functions
################################################################################
################################################################################
# Constraint functions
################################################################################
"""
Compute system dynamics (ds/dt):
s_flat: state vector
l_flat: contact forces
Return:
Derivative of state, ds, as a flattened vector with same dimension as s_flat
"""
def dynamics(self, s_flat, l_flat):
# Unpack variables
x, dx = self.s_unpack(s_flat)
l = self.l_unpack(l_flat)
new_dx_list = []
ddx_list = []
for t_ind in range(self.nGrid):
x_i = x[t_ind, :]
dx_i = dx[t_ind, :]
# Compute dx at each collocation point
# dx is a (7x1) vector
new_dx_i = SX.zeros((7,1))
# First 3 elements are position time-derivatives
new_dx_i[0:3, :] = dx_i[0, 0:3]
# Last 4 elements are quaternion time-derivatives
## Transform angular velocities dx into quaternion time-derivatives
quat_i = x_i[0, 3:]
dquat_i = 0.5 * self.get_dx_to_dquat_matrix(quat_i) @ dx_i[0, 3:].T
new_dx_i[3:, :] = dquat_i
new_dx_list.append(new_dx_i)
# Compute ddx at each collocation point
Mo = self.get_M_obj()
G = self.get_grasp_matrix(x_i)
gapp = self.get_gapp()
l_i = l[t_ind, :].T
#print("t_ind: {}".format(t_ind))
#print(x_i)
#print(l_i)
#print(inv(Mo))
#print("gapp: {}".format(gapp))
#print(G.shape)
#print((gapp + G@l_i).shape)
ddx_i = inv(Mo) @ (gapp + G @ l_i)
ddx_list.append(ddx_i)
new_dx = horzcat(*new_dx_list).T
ddx = horzcat(*ddx_list).T
ds = self.s_pack(new_dx, ddx)
return ds
"""
Get matrix to transform angular velocity to quaternion time derivative
Input:
quat: [qx, qy, qz, qw]
"""
def get_dx_to_dquat_matrix(self, quat):
qx = quat[0]
qy = quat[1]
qz = quat[2]
qw = quat[3]
M = np.array([
[-qx, -qy, -qz],
[qw, qz, -qy],
[-qz, qw, qx],
[qy, -qx, qw],
])
return SX(M)
"""
Linearized friction cone constraint
Approximate cone as an inner pyramid
Handles absolute values by considering positive and negative bound as two constraints
Return:
f_constraints: (nGrid*fnum*2*2)x1 vector with friction cone constraints
where nGrid*fnum element corresponds to constraints of finger fnum at time nGrid
Every element in f_constraints must be >= 0 (lower bound 0, upper bound np.inf)
"""
def friction_cone_constraints(self,l_flat):
l = self.l_unpack(l_flat)
# Positive bound
f1_constraints = SX.zeros((self.nGrid, self.fnum*2))
# Negative bound
f2_constraints = SX.zeros((self.nGrid, self.fnum*2))
mu = np.sqrt(2) * self.obj_mu # Inner approximation of cone
for col in range(self.fnum):
# abs(fy) <= mu * fx
f1_constraints[:,2*col] = mu * l[:,col*self.l_i] + l[:,col*self.l_i + 1]
f2_constraints[:,2*col] = -1 * l[:,col*self.l_i + 1] + mu * l[:,col*self.l_i]
# abs(fz) <= mu * fx
f1_constraints[:,2*col+1] = mu * l[:,col*self.l_i] + l[:,col*self.l_i + 2]
f2_constraints[:,2*col+1] = -1 * l[:,col*self.l_i + 2] + mu * l[:,col*self.l_i]
f_constraints = vertcat(f1_constraints, f2_constraints)
#print(l)
#print("friction cones: {}".format(f_constraints))
#quit()
return f_constraints
"""
Constrain state at end of trajectory to be at x_goal
With a slack variable
First, just add tolerance
"""
def x_goal_constraint(self, s_flat,a, x_goal):
x, dx = self.s_unpack(s_flat)
x_end = x[-1, :]
con_list = []
for i in range(self.x_dim):
f = a[i] - (x_goal[0, i] - x_end[0, i]) ** 2
con_list.append(f)
return horzcat(*con_list)
################################################################################
# End of constraint functions
################################################################################
"""
Get pnorm of cp_param tuple
"""
def get_pnorm(self, cp_param):
# Compute pnorm of cp
pnorm = 0
for param in cp_param:
pnorm += fabs(param) ** self.p
pnorm = pnorm ** (1/self.p)
return pnorm
"""
Calculate mass matrix of hand given joint positions q
"""
# TODO
#def get_M_hand(self, q):
"""
Get grasp matrix
Input:
x: object pose [px, py, pz, qw, qx, qy, qz]
"""
def get_grasp_matrix(self, x):
# Transformation matrix from object frame to world frame
quat_o_2_w = [x[0,3], x[0,4], x[0,5], x[0,6]]
G_list = []
# Calculate G_i (grasp matrix for each finger)
for c in self.cp_list:
cp_pos_of = c["position"] # Position of contact point in object frame
quat_cp_2_o = c["orientation"] # Orientation of contact point frame w.r.t. object frame
S = np.array([
[0, -cp_pos_of[2], cp_pos_of[1]],
[cp_pos_of[2], 0, -cp_pos_of[0]],
[-cp_pos_of[1], cp_pos_of[0], 0]
])
P_i = np.eye(6)
P_i[3:6,0:3] = S
# Orientation of cp frame w.r.t. world frame
# quat_cp_2_w = quat_o_2_w * quat_cp_2_o
quat_cp_2_w = utils.multiply_quaternions(quat_o_2_w, quat_cp_2_o)
# R_i is rotation matrix from contact frame i to world frame
R_i = utils.get_matrix_from_quaternion(quat_cp_2_w)
R_i_bar = SX.zeros((6,6))
R_i_bar[0:3,0:3] = R_i
R_i_bar[3:6,3:6] = R_i
G_iT = R_i_bar.T @ P_i.T
G_list.append(G_iT)
#GT_full = np.concatenate(G_list)
GT_full = vertcat(*G_list)
GT = self.H @ GT_full
#print(GT.T)
return GT.T
"""
Get 6x6 object inertia matrix
"""
def get_M_obj(self):
M = np.zeros((6, 6))
M[0,0] = M[1,1] = M[2,2] = self.obj_mass
M[3,3] = self.obj_mass * (self.obj_shape[0]**2 + self.obj_shape[2]**2) / 12
M[4,4] = self.obj_mass * (self.obj_shape[1]**2 + self.obj_shape[2]**2) / 12
M[5,5] = self.obj_mass * (self.obj_shape[0]**2 + self.obj_shape[1]**2) / 12
return M
"""
Compute external gravity force on object, in -z direction
"""
def get_gapp(self):
gapp = np.array([[0],[0], [self.gravity * self.obj_mass], [0], [0], [0]])
return gapp
"""
Get 4x4 tranformation matrix from contact point frame to object frame
Input:
cp: dict with "position" and "orientation" fields in object frame
"""
def get_R_cp_2_o(self, cp):
#H = SX.zeros((4,4))
quat = cp["orientation"]
p = cp["position"]
R = utils.get_matrix_from_quaternion(quat)
return R
#H[3,3] = 1
#H[0:3,0:3] = R
##H[0:3,3] = p[:]
## Test transformation
##print("calculated: {}".format(H @ np.array([0,0,0,1])))
##print("actual: {}".format(p))
#return H
def get_R_o_2_w(self, x):
quat = [x[0,3], x[0,4], x[0,5], x[0,6]]
R = utils.get_matrix_from_quaternion(quat)
return R
"""
Get 4x4 tranformation matrix from object frame to world frame
Input:
x: object pose [px, py, pz, qw, qx, qy, qz]
"""
def get_H_o_2_w(self, x):
H = SX.zeros((4,4))
quat = [x[0,3], x[0,4], x[0,5], x[0,6]]
R = utils.get_matrix_from_quaternion(quat)
p = np.array([x[0,0], x[0,1], x[0,2]])
H[3,3] = 1
H[0:3,0:3] = R
H[0:3,3] = p[:]
# Test transformation
#print("calculated: {}".format(H @ np.array([0,0,0,1])))
#print("actual: {}".format(p))
return H
"""
Get 4x4 transformation matrix from world to object frame
"""
def get_H_w_2_o(self, x):
H = np.zeros((4,4))
quat = [x[0,3], x[0,4], x[0,5], x[0,6]]
p = np.array([x[0,0], x[0,1], x[0,2]])
p_inv, quat_inv = utils.invert_transform(p, quat)
R = utils.get_matrix_from_quaternion(quat_inv)
H[3,3] = 1
H[0:3,0:3] = R
H[0:3,3] = p_inv[:]
# Test transformation
#print("calculated: {}".format(H @ np.array([0,0,1,1])))
return H
"""
Get list of contact point dicts given cp_params list
Each contact point is: {"position_of", "orientation_of"}
"""
def get_contact_points_from_cp_params(self, cp_params):
cp_list = []
for param in cp_params:
pos_of, quat_of = self.cp_param_to_cp_of(param)
cp = {"position": pos_of, "orientation": quat_of}
cp_list.append(cp)
return cp_list
"""
Get contact point position and orientation in object frame (OF)
Input:
(x_param, y_param, z_param) tuple
"""
def cp_param_to_cp_of(self, cp_param):
pnorm = self.get_pnorm(cp_param)
#print("cp_param: {}".format(cp_param))
#print("pnorm: {}".format(pnorm))
cp_of = []
# Get cp position in OF
for i in range(3):
cp_of.append(-self.obj_shape[i]/2 + (cp_param[i]+1)*self.obj_shape[i]/2)
cp_of = np.asarray(cp_of)
# TODO: Find analytical way of computing theta
# Compute derivatives dx, dy, dz of pnorm
#d_pnorm_list = []
#for param in cp_param:
# d = (param * (fabs(param) ** (self.p - 2))) / (pnorm**(self.p-1))
# d_pnorm_list.append(d)
#print("d_pnorm: {}".format(d_pnorm_list))
#dx = d_pnorm_list[0]
#dy = d_pnorm_list[1]
#dz = d_pnorm_list[2]
#w = np.sin(np.arctan2(dz*dz+dy*dy, dx)/2)
#x = 0
## This if case is to deal with the -0.0 behavior in arctan2
#if dx == 0: # TODO: this is going to be an error for through contact opt, when dx is an SX var
# y = np.sin(np.arctan2(dz, dx)/2)
#else:
# y = np.sin(np.arctan2(dz, -dx)/2)
#if dx == 0: # TODO: this is going to be an error for through contact opt, when dx is an SX var
# z = np.sin(np.arctan2(-dy, dx)/2)
#else:
# z = np.sin(np.arctan2(-dy, dx)/2)
#quat = (w,x,y,z)
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
quat = (0, 0, np.sqrt(2)/2, np.sqrt(2)/2)
elif y_param == 1:
quat = (0, 0, -np.sqrt(2)/2, np.sqrt(2)/2)
elif x_param == 1:
quat = (0, 1, 0, 0)
elif z_param == 1:
quat = (0, np.sqrt(2)/2, 0, np.sqrt(2)/2)
elif x_param == -1:
quat = (0, 0, 0, 1)
elif z_param == -1:
quat = (0, -np.sqrt(2)/2, 0, np.sqrt(2)/2)
return cp_of, quat
def test_cp_param_to_cp_of(self):
print("\nP1")
p1 = (0, -1, 0)
q = self.cp_param_to_cp_of(p1)
print("quat: {}".format(q))
print("\nP2")
p2 = (0, 1, 0)
q = self.cp_param_to_cp_of(p2)
print("quat: {}".format(q))
print("\nP3")
p3 = (1, 0, 0)
q = self.cp_param_to_cp_of(p3)
print("quat: {}".format(q))
print("\nP4")
p4 = (0, 0, 1)
q = self.cp_param_to_cp_of(p4)
print("quat: {}".format(q))
print("\nP5")
p5 = (-1, 0, 0)
q = self.cp_param_to_cp_of(p5)
print("quat: {}".format(q))
print("\nP6")
p6 = (0, 0, -1)
q = self.cp_param_to_cp_of(p6)
print("quat: {}".format(q))
################################################################################
# Path constraints
################################################################################
"""
Define upper and lower bounds for decision variables
Constrain initial x, q
Constrain l if specified
Constrain initial and final object velocity, if specified
"""
def path_constraints(self,
z,
x0,
x_goal = None,
l0 = None,
dx0 = None,
dx_end = None,
):
if self.log_file is not None:
with open(self.log_file, "a+") as f:
f.write("\nPath constraints: {}\n")
t,s_flat,l_flat,a = self.decvar_unpack(z)
nGrid = self.nGrid
# Time bounds
t_range = [0,self.tf] # initial and final time
t_lb = np.linspace(t_range[0],t_range[1],nGrid) # lower bound
t_ub = t_lb # upper bound
#print("Timestamps: {}".format(t_lb))
# State path constraints
# Unpack state vector
x,dx = self.s_unpack(s_flat) # Object pose constraints
x_range = np.array([
[-0.15, 0.15], # x coord range
[-0.15,0.15], # y coord range
[0.01,0.15], # z coord range TODO Hardcoded
[-np.inf, np.inf], # qx range
[-np.inf, np.inf], # qy range
[-np.inf, np.inf], # qz range
[-np.inf, np.inf], # qw range
])
x_lb = np.ones(x.shape) * x_range[:,0]
x_ub = np.ones(x.shape) * x_range[:,1]
# Object pose boundary contraint (starting position of object)
if self.log_file is not None:
with open(self.log_file, "a+") as f:
f.write("Constrain x0 to {}\n".format(x0))
x_lb[0] = x0
x_ub[0] = x0
#if x_goal is not None:
# x_lb[-1] = x_goal
# x_ub[-1] = x_goal
# # Just z goal
# #x_lb[-1,1] = x_goal[0,1]
# #x_ub[-1,1] = x_goal[0,1]
# Object velocity constraints
dx_range = np.array([
[-0.05,0.05], # x vel range
[-0.05,0.05], # y vel range
[-0.05,0.05], # z vel range
[-np.pi/2, np.pi/2], # angular velocity range
[-np.pi/2, np.pi/2], # angular velocity range
[-np.pi/2, np.pi/2], # angular velocity range
])
dx_lb = np.ones(dx.shape) * dx_range[:,0]
dx_ub = np.ones(dx.shape) * dx_range[:,1]
if dx0 is not None:
if self.log_file is not None:
with open(self.log_file, "a+") as f:
f.write("Constrain dx0 to {}\n".format(dx0))
dx_lb[0] = dx0
dx_ub[0] = dx0
if dx_end is not None:
if self.log_file is not None:
with open(self.log_file, "a+") as f:
f.write("Constrain dx_end to {}\n".format(dx_end))
dx_lb[-1] = dx_end
dx_ub[-1] = dx_end
# Contact force contraints
# For now, just define min and max forces
l = self.l_unpack(l_flat)
l_epsilon = 0
# Limits for one finger
f1_l_range = np.array([
[0, np.inf], # c1 fn force range
[-np.inf, np.inf], # c1 ft force range
[-np.inf, np.inf], # c1 ft force range
#[-np.inf, np.inf], # c1 ft force range
])
l_range = np.tile(f1_l_range, (self.fnum, 1))
l_lb = np.ones(l.shape) * l_range[:,0]
l_ub = np.ones(l.shape) * l_range[:,1]
# Initial contact force constraints
if l0 is not None:
if self.log_file is not None:
with open(self.log_file, "a+") as f:
f.write("Constrain l0 to {}\n".format(l0))
l_lb[0] = l0
l_ub[0] = l0
# Pack state contraints
s_lb = self.s_pack(x_lb,dx_lb)
s_ub = self.s_pack(x_ub,dx_ub)
a_lb = np.zeros(a.shape)
a_ub = np.ones(a.shape) * np.inf
# Pack the constraints for all dec vars
z_lb = self.decvar_pack(t_lb,s_lb,self.l_pack(l_lb),a_lb)
z_ub = self.decvar_pack(t_ub,s_ub,self.l_pack(l_ub),a_ub)
return z_lb, z_ub
"""
Set initial trajectory guess
For now, just define everything to be 0
"""
def get_initial_guess(self, z_var, x0, x_goal):
t_var, s_var, l_var, a_var = self.decvar_unpack(z_var)
# Define time points to be equally spaced
t_traj = np.linspace(0,self.tf,self.nGrid)
x_var, dx_var = self.s_unpack(s_var)
dx_traj = np.zeros(dx_var.shape)
x_traj = np.squeeze(np.linspace(x0, x_goal, self.nGrid))
s_traj = self.s_pack(x_traj, dx_traj)
l_traj = np.ones(self.l_unpack(l_var).shape)
#l_traj[:,2] = 0.1
#l_traj[:,8] = 0.1
#l_traj[:,0] = 1
#l_traj[:,6] = 1
a_traj = np.zeros(a_var.shape)
z_traj = self.decvar_pack(t_traj, s_traj, self.l_pack(l_traj),a_traj)
return z_traj
def main():
system = FixedContactPointSystem()
system.test_cp_param_to_cp_of()
if __name__ == "__main__":
main()
```
#### File: rrc_package/scripts/hierarchical_lift_task.py
```python
import json
import sys
import os
import os.path as osp
import numpy as np
from rrc_iprl_package.envs import cube_env, custom_env
from trifinger_simulation.tasks import move_cube
from rrc_iprl_package.control.controller_utils import PolicyMode
from rrc_iprl_package.control.control_policy import HierarchicalControllerPolicy
FRAMESKIP = 1
#MAX_STEPS = 20 * 1000 // FRAMESKIP
MAX_STEPS = None # For running on real robot
class RandomPolicy:
"""Dummy policy which uses random actions."""
def __init__(self, action_space):
self.action_space = action_space
def predict(self, observation):
return self.action_space.sample()
def main():
# the difficulty level and the goal pose (as JSON string) are passed as
# arguments
difficulty = int(sys.argv[1])
goal_pose_json = sys.argv[2]
if os.path.exists(goal_pose_json):
with open(goal_pose_json) as f:
goal = json.load(f)['goal']
else:
goal = json.loads(goal_pose_json)
initial_pose = move_cube.sample_goal(-1)
initial_pose.position = np.array([0.0,0.0,move_cube._CUBOID_SIZE[2]/2])
theta = 0
initial_pose.orientation = np.array([0, 0, np.sin(theta/2), np.cos(theta/2)])
if osp.exists('/output'):
save_path = '/output/action_log.npz'
else:
save_path = 'action_log.npz'
env = cube_env.RealRobotCubeEnv(
goal, initial_pose.to_dict(), difficulty,
cube_env.ActionType.TORQUE_AND_POSITION, frameskip=FRAMESKIP,
num_steps=MAX_STEPS, visualization=True, save_npz=save_path
)
rl_load_dir, start_mode = '', PolicyMode.TRAJ_OPT
goal_pose = move_cube.Pose.from_dict(goal)
policy = HierarchicalControllerPolicy(action_space=env.action_space,
initial_pose=initial_pose, goal_pose=goal_pose,
load_dir=rl_load_dir, difficulty=difficulty,
start_mode=start_mode)
env = custom_env.HierarchicalPolicyWrapper(env, policy)
observation = env.reset()
accumulated_reward = 0
is_done = False
old_mode = policy.mode
steps_so_far = 0
try:
while not is_done:
if MAX_STEPS is not None and steps_so_far == MAX_STEPS: break
action = policy.predict(observation)
observation, reward, is_done, info = env.step(action)
if old_mode != policy.mode:
#print('mode changed: {} to {}'.format(old_mode, policy.mode))
old_mode = policy.mode
#print("reward:", reward)
accumulated_reward += reward
steps_so_far += 1
except Exception as e:
print("Error encounted: {}. Saving logs and exiting".format(e))
env.save_action_log()
policy.impedance_controller.save_log()
raise e
env.save_action_log()
# Save control_policy_log
policy.impedance_controller.save_log()
#print("------")
#print("Accumulated Reward: {:.3f}".format(accumulated_reward))
if __name__ == "__main__":
main()
```
|
{
"source": "jeffreyzhang/devtool",
"score": 2
}
|
#### File: devtool/mysite/view.py
```python
from django.shortcuts import render
# from django.views.decorators.clickjacking import xframe_options_deny
# from django.views.decorators.clickjacking import xframe_options_sameorigin
# from django.views.decorators.clickjacking import xframe_options_exempt
def home(request):
context = {}
context['hello'] = 'Hello World!'
return render(request, 'home.html', context)
def crypto(request):
return render(request, 'crypto.html', {})
```
|
{
"source": "jeffrey-zhang/learn",
"score": 3
}
|
#### File: core_python/basic/threading_join.py
```python
import threading
import time
products = []
condition = threading.Condition()
class consumer(threading.Thread):
def consume(self):
global condition
global products
condition.acquire()
if len(products) == 0:
condition.wait()
print('consumer is notified: no product to consume')
products.pop()
print("consumer notification: consume 1 product")
print('consumer notification: there are ' + len(products) +" left that can be consume")
condition.notify()
condition.release()
def run(self):
for i in range(0,20):
time.sleep(4)
self.consume()
class Producer(threading.Thread):
def produce(self):
global condition
global products
condition.acquire()
if len(products) == 10:
condition.wait()
print('consumer notified')
```
#### File: demos/hello/app.py
```python
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello Flask'
@app.route('/greet')
@app.route('/greet/<name>')
def greet(name='default value'):
return '<h1>Hello, %s! Welcome</h1>' % name
```
|
{
"source": "Jeffrey-zhao/flask-learning",
"score": 3
}
|
#### File: flask-learning/flaskr/auth.py
```python
import functools
from flask import (
Blueprint,flash,g,redirect,render_template,request,session,url_for
)
from werkzeug.security import check_password_hash,generate_password_hash
from flaskr.db import get_db
bp=Blueprint('auth',__name__,url_prefix='/auth')
@bp.route('/register',methods=('GET','POST'))
def register():
if request.method =='POST':
username=request.form['username']
password=request.form['password']
db=get_db()
error=None
if not username:
error='Username is required.'
elif not password:
error='Password is required.'
elif db.execute(
'select id from user where username=?',(username,)
).fetchone() is not None:
error='User {} is already registered'.format(username)
if error is None:
db.execute('insert into user (username,password) values(?,?)',
(username,generate_password_hash(password))
)
db.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
@bp.route('/login',methods=('GET','POST'))
def login():
if request.method=='POST':
username=request.form['username']
password=request.form['password']
db=get_db()
error=None
user=db.execute(
'select * from user where username=?',(username,)
).fetchone()
if user is None:
error='Incorrect username.'
elif not check_password_hash(user['password'],password):
error='Incorrect password.'
if error is None:
session.clear()
session['user_id']=user['id']
return redirect(url_for('index'))
flash(error)
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
user_id=session.get('user_id')
if user_id is None:
g.user=None
else:
g.user=get_db().execute(
'select * from user where id=?',(user_id,)
).fetchone()
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
```
|
{
"source": "jeffreyzli/pokerbot-2017",
"score": 3
}
|
#### File: pokerbot-2017/pythonbot_1.0/GameData.py
```python
import HandRankings as Hand
from deuces.deuces import Card, Evaluator
class GameData:
def __init__(self, name, opponent_name, stack_size, bb):
# match stats
self.name = name
self.opponent_name = opponent_name
self.starting_stack_size = int(stack_size)
self.num_hands = 0
self.num_wins = 0
self.num_flop = 0
self.big_blind = int(bb)
# self pre-flop stats
self.pfr = 0
self.vpip = 0
self.three_bet = 0
self.fold_big_bet = 0
# opponent pre-flop stats
self.opponent_pfr = 0
self.opponent_vpip = 0
self.opponent_three_bet = 0
self.opponent_fold_pfr = 0
self.opponent_fold_three_bet = 0
# self post-flop stats
self.aggression_factor = False
self.showdown = 0
self.c_bet = 0
self.showdown_win = 0
self.double_barrel = 0
self.discarded_card = None
# opponent post-flop stats
self.opponent_c_bet = 0
self.opponent_fold_c_bet = 0
self.opponent_double_barrel = 0
# current hand stats
self.button = True
self.current_pot_size = 0
self.current_hand = []
self.current_hand_strength = 0.0
self.hand_class = ''
self.hand_score = 0
self.current_game_state = ''
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.has_called = False
self.opponent_has_called = False
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.discard = False
self.has_five_bet = False
self.has_bet_aggressively = False
self.time_bank = 0.0
self.opc = 0
def new_hand(self, data_list):
self.num_hands += 1
self.button = data_list[2]
if "true" in self.button:
self.button = True
else:
self.button = False
self.current_hand = [data_list[3], data_list[4]]
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.current_game_state = 'PREFLOP'
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.aggression_factor = False
self.discarded_card = None
def get_action(self, data_list):
self.current_pot_size = int(data_list[1])
self.opc = self.starting_stack_size - self.current_pot_size
self.time_bank = float(data_list[-1])
num_board_cards = int(data_list[2])
self.street_dict[str(num_board_cards)] += 1
if self.current_game_state == 'PREFLOP':
if self.street_dict['3'] > 0 and self.street_dict['4'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'FLOPTURN'
self.num_flop += 1
elif self.current_game_state == 'FLOPTURN':
if self.street_dict['4'] > 0 and self.street_dict['5'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'TURNRIVER'
elif self.current_game_state == 'TURNRIVER':
if self.street_dict['5'] > 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'POSTRIVER'
for i in range(num_board_cards):
board_card = data_list[3 + i]
if board_card not in self.board_cards:
self.board_cards.append(data_list[3 + i])
if num_board_cards > 0:
board_cards = []
for board_card in self.board_cards:
board_cards.append(Card.new(board_card))
hand = []
for card in self.current_hand:
hand.append(Card.new(card))
self.hand_score = Evaluator().evaluate(hand, board_cards)
self.hand_class = Evaluator().class_to_string(Evaluator().get_rank_class(self.hand_score))
index = 3 + num_board_cards
num_last_actions = int(data_list[index])
index += 1
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index + i])
self.last_actions.append(current_last_actions)
if self.discard:
for action in current_last_actions:
if 'DISCARD' in action and self.name in action:
old_card = action[8:10]
new_card = action[11:13]
self.current_hand[self.current_hand.index(old_card)] = new_card
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.discard = False
break
if self.current_game_state == 'PREFLOP':
if self.current_pot_size == 4:
if self.button:
self.vpip += 1
self.has_called = True
else:
self.opponent_vpip += 1
self.opponent_has_called = True
else:
for action in current_last_actions:
if 'RAISE' in action:
round_num = self.street_dict['0']
if round_num == 1:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_two_bet = True
elif round_num == 2:
if self.button:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_two_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_three_bet = True
else:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_three_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_four_bet = True
elif round_num == 3:
if self.name in action:
self.pfr += 1
self.vpip += 1
elif 'CALL' in action:
if self.name in action:
self.vpip += 1
else:
self.opponent_vpip += 1
elif self.current_game_state == 'FLOPTURN':
round_num = self.street_dict['3']
if round_num == 1:
self.discard = True
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'TURNRIVER':
round_num = self.street_dict['4']
if round_num == 1:
self.discard = True
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
break
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'POSTRIVER':
round_num = self.street_dict['5']
if round_num == 1:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.double_barrel += 1
else:
self.opponent_double_barrel += 1
break
index += num_last_actions
num_legal_actions = int(data_list[index])
index += 1
self.current_legal_actions = []
for i in range(num_legal_actions):
self.current_legal_actions.append(data_list[index + i])
def legal_action(self, action):
for legal_action in self.current_legal_actions:
if action in legal_action:
if action == 'BET' or action == 'RAISE':
index = legal_action.index(':') + 1
sub = legal_action[index:]
index = sub.index(':')
return [int(sub[:index]), int(sub[index+1:])]
if action == 'CALL':
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.opponent_name in last_action:
sub = last_action[last_action.index(':')+1:]
return int(sub[:sub.index(':')])
return True
return None
def hand_over(self, data_list):
num_board_cards = data_list[3]
index = 4+num_board_cards
num_last_actions = data_list[index]
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index+i])
if self.current_game_state == 'PREFLOP':
for action in current_last_actions:
if 'FOLD' in action and self.opponent_name in action:
if self.button:
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
else:
for last_action in current_last_actions:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
elif self.current_game_state == 'FLOPTURN':
for action in current_last_actions:
if self.button:
if 'FOLD' in action and self.opponent_name in action:
for last_action in self.last_actions[-1]:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
else:
if 'FOLD' in action and self.opponent_name in action:
for last_action in current_last_actions:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
elif self.current_game_state == 'POSTRIVER':
for action in current_last_actions:
if 'WIN' in action:
if self.name in action:
self.num_wins += 1
for last_action in current_last_actions:
if 'SHOW' in last_action:
self.showdown += 1
self.showdown_win += 1
break
break
```
|
{
"source": "jeffreyzpan/directory-scraper",
"score": 4
}
|
#### File: jeffreyzpan/directory-scraper/student_directory.py
```python
import json
class StudentDirectory:
def __init__(self, student_dict):
self.student_dict = student_dict
self.dorm_list = []
self.location_list = []
for student in self.student_dict.values():
if student['living_location'] not in self.dorm_list:
self.dorm_list.append(student['living_location'])
if student['location'] not in self.location_list:
self.location_list.append(student['location'])
self.class_list = ['junior', 'lower', 'upper', 'senior']
def search_by_name(self, first_name='', last_name=''):
student_list = []
for student in self.student_dict:
if last_name in student.split(',')[0] and first_name in student.split(',')[1]:
student_list.append(student)
return student_list
def list_by_class(self, class_name):
assert class_name.lower() in self.class_list, "Error: Class name must be Junior, Lower, Upper, or Senior"
student_list = []
for student in self.student_dict:
if self.student_dict[student]['class'].lower() == class_name.lower():
student_list.append(student)
return student_list
def list_by_dorm(self, dorm_name):
student_list = []
for student in self.student_dict:
if dorm_name.lower() in self.student_dict[student]['living_location'].lower():
student_list.append(student)
return student_list
def list_by_hometown(self, hometown):
student_list = []
for student in self.student_dict:
if hometown.lower() in self.student_dict[student]['location'].lower():
student_list.append(student)
return student_list
def list_by_enter_year(self, enter_year):
student_list = []
for student in self.student_dict:
if int(self.student_dict[student]['enter_year']) == enter_year:
student_list.append(student)
return student_list
```
|
{
"source": "jeffreyzpan/micronet-submission",
"score": 3
}
|
#### File: micronet-submission/data_providers/augment.py
```python
import numpy as np
import torch
class Cutout(object):
"""Randomly mask out one or more patches from an image.
please refer to https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
if isinstance(img, np.ndarray):
h = img.shape[1]
w = img.shape[2]
else:
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
# center point of the cutout region
y = np.random.randint(h)
x = np.random.randint(w)
width = int(self.length / 2)
y1 = np.clip(y - width, 0, h)
y2 = np.clip(y + width, 0, h)
x1 = np.clip(x - width, 0, w)
x2 = np.clip(x + width, 0, w)
mask[y1: y2, x1: x2] = 0.0
if isinstance(img, np.ndarray):
mask = np.expand_dims(mask, axis=0)
else:
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
return img * mask
class PostNormRandomHorizontalFlip(object):
""" Random horizontal flip after normalization """
def __init__(self, flip_prob=0.5):
self.flip_prob = flip_prob
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image after random horizontal flip.
"""
if np.random.random_sample() < self.flip_prob:
np_img = img.numpy() # C, H, W
np_img = np_img[:, :, ::-1].copy()
img = torch.from_numpy(np_img).float()
return img
class PostNormRandomCrop(object):
""" Random crop after normalization """
def __init__(self, pad=4):
self.pad = pad
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image after random horizontal flip.
"""
np_img = img.numpy() # C, H, W
init_shape = np_img.shape
new_shape = [init_shape[0],
init_shape[1] + self.pad * 2,
init_shape[2] + self.pad * 2]
zeros_padded = np.zeros(new_shape)
zeros_padded[:, self.pad:init_shape[1] + self.pad, self.pad:init_shape[2] + self.pad] = np_img
# randomly crop to original size
init_x = np.random.randint(0, self.pad * 2)
init_y = np.random.randint(0, self.pad * 2)
cropped = zeros_padded[:,
init_x: init_x + init_shape[1],
init_y: init_y + init_shape[2]]
img = torch.from_numpy(cropped).float()
return img
```
#### File: jeffreyzpan/micronet-submission/_init_paths.py
```python
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
base_dir = osp.join(osp.dirname(__file__), '..')
# Add lib to PYTHONPATH
add_path(base_dir)
```
|
{
"source": "JeffreyZZ/knboard",
"score": 2
}
|
#### File: backend/accounts/serializers.py
```python
from datetime import datetime
from pathlib import Path
from dj_rest_auth.models import TokenModel
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from dj_rest_auth.registration.serializers import RegisterSerializer
from .models import Avatar
try:
from allauth.account import app_settings as allauth_settings
from allauth.utils import (email_address_exists,
get_username_max_length)
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.providers.base import AuthProcess
except ImportError:
raise ImportError("allauth needs to be added to INSTALLED_APPS.")
User = get_user_model()
class AvatarSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
class Meta:
model = Avatar
fields = ["id", "photo", "name"]
def get_name(self, obj):
return Path(obj.photo.name).stem
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["id", "username", "email"]
class UserSearchSerializer(serializers.ModelSerializer):
avatar = AvatarSerializer(read_only=True)
class Meta:
model = User
fields = ["id", "username", "avatar"]
class UserDetailSerializer(serializers.ModelSerializer):
avatar = AvatarSerializer(read_only=True)
email = serializers.EmailField(
validators=[UniqueValidator(queryset=User.objects.all())], required=False
)
def update(self, instance, validated_data):
instance.update_time = datetime.now().timestamp()
return super().update(instance, validated_data)
class Meta:
model = User
fields = [
"id",
"username",
"first_name",
"last_name",
"email",
"avatar",
"date_joined",
"is_guest",
]
read_only_fields = [
"id",
"avatar",
"date_joined",
]
class BoardOwnerSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["id"]
class BoardMemberSerializer(serializers.ModelSerializer):
avatar = AvatarSerializer(read_only=True)
class Meta:
model = User
fields = ["id", "username", "email", "first_name", "last_name", "avatar"]
class TokenSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(source="user.id", read_only=True)
username = serializers.CharField(source="user.username", read_only=True)
photo_url = serializers.SerializerMethodField()
class Meta:
model = TokenModel
# Include field "key" once frontend actually uses token auth
# instead of the current session auth
fields = ("id", "username", "photo_url")
def get_photo_url(self, obj):
if not obj.user.avatar:
return None
return obj.user.avatar.photo.url
class CustomRegisterSerializer(RegisterSerializer):
"""
Custom RegisterSerializer to allow save extra information for user to be compatible with mdclub user
"""
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
# mdclub user-specific properties
user.create_time = user.date_joined.timestamp()
# save
adapter.save_user(request, user, self)
self.custom_signup(request, user)
setup_user_email(request, user, [])
return user
```
|
{
"source": "jeffridge/ownphotos",
"score": 2
}
|
#### File: ownphotos/api/social_graph.py
```python
import networkx as nx
from api.models import Person, Photo
import itertools
from django.db.models import Count
def build_social_graph():
G = nx.Graph()
me = Person.objects.all().annotate(face_count=Count('faces')).order_by('-face_count').first()
people = list(Person.objects.all().annotate(face_count=Count('faces')).order_by('-face_count'))[1:]
for person in people:
if person.id == me.id:
continue
person = Person.objects.prefetch_related('faces__photo__faces__person').filter(id=person.id)[0]
for this_person_face in person.faces.all():
for other_person_face in this_person_face.photo.faces.all():
if other_person_face.person.id != me.id:
G.add_edge(person.name,other_person_face.person.name)
# pos = nx.kamada_kawai_layout(G,scale=1000)
pos = nx.spring_layout(G,scale=1000,iterations=20)
nodes = [{'id':node,'x':pos[0],'y':pos[1]} for node,pos in pos.items()]
links = [{'source':pair[0], 'target':pair[1]} for pair in G.edges()]
res = {"nodes":nodes, "links":links}
return res
def build_ego_graph(person_id):
G = nx.Graph()
person = Person.objects.prefetch_related('faces__photo__faces__person').filter(id=person_id)[0]
for this_person_face in person.faces.all():
for other_person_face in this_person_face.photo.faces.all():
G.add_edge(person.name,other_person_face.person.name)
nodes = [{'id':node} for node in G.nodes()]
links = [{'source':pair[0], 'target':pair[1]} for pair in G.edges()]
res = {"nodes":nodes, "links":links}
return res
```
|
{
"source": "jeffrimko/Auxly",
"score": 4
}
|
#### File: lib/auxly/stringy.py
```python
from random import choice
from string import ascii_lowercase
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def subat(orig, index, replace):
"""Substitutes the replacement string/character at the given index in the
given string, returns the modified string.
**Examples**:
::
auxly.stringy.subat("bit", 2, "n")
"""
return "".join([(orig[x] if x != index else replace) for x in range(len(orig))])
def randomize(length=6, choices=None):
"""Returns a random string of the given length."""
if type(choices) == str:
choices = list(choices)
choices = choices or ascii_lowercase
return "".join(choice(choices) for _ in range(length))
def between(full, start, end):
"""Returns the substring of the given string that occurs between the start
and end strings."""
try:
if not start:
parse = full
else:
parse = full.split(start, 1)[1]
if end:
result = parse.split(end, 1)[0]
else:
result = parse
return result
except:
return full
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
pass
```
#### File: Auxly/tests/filesys_test_2.py
```python
from testlib import *
import auxly
from auxly.filesys import copy, cwd, delete, isempty
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_copy_1(test):
"""Copy file to dir/file, overwrite existing file."""
path0 = FNAME[0]
path1 = op.join(DIR[0], FNAME[0])
fwrite(path0, TEXT[0])
fwrite(path1, TEXT[1])
test.assertTrue(TEXT[1] == fread(path1))
test.assertTrue(copy(path0, path1))
test.assertTrue(TEXT[0] == fread(path1))
def test_copy_2(test):
"""Copy file to dir/file fails due to overwrite flag."""
path0 = FNAME[0]
path1 = op.join(DIR[0], FNAME[0])
fwrite(path0, TEXT[0])
fwrite(path1, TEXT[1])
test.assertTrue(TEXT[1] == fread(path1))
test.assertFalse(copy(path0, path1, overwrite=False))
test.assertTrue(TEXT[1] == fread(path1))
def test_copy_3(test):
"""Copy file to dir."""
path0 = FNAME[0]
fwrite(path0, TEXT[0])
path1 = DIR[1]
test.assertTrue(copy(path0, path1))
path1 = op.join(path1, path0)
test.assertTrue(TEXT[0] == fread(path1))
def test_copy_4(test):
"""Copy file to dir fails due to overwrite flag."""
path0 = FNAME[0]
path1 = DIR[0]
fpath1 = op.join(path1, path0)
fwrite(path0, TEXT[0])
fwrite(fpath1, TEXT[1])
test.assertFalse(copy(path0, path1, overwrite=False))
test.assertTrue(TEXT[1] == fread(fpath1))
def test_copy_5(test):
"""Copy empty dir to dir."""
path0 = op.join(DIR[0])
path1 = op.join(DIR[1])
test.assertTrue(makedirs(path0))
test.assertTrue(makedirs(path1))
test.assertTrue(copy(path0, path1))
tpath = op.join(DIR[1], DIR[0])
test.assertTrue(op.isdir(tpath))
def test_copy_6(test):
"""Copy dir to dir using relative src path."""
test.assertTrue(makedirs(op.join(DIR[0], DIR[1])))
test.assertTrue(makedirs(op.join(DIR[2])))
test.assertFalse(op.isdir(DIR[1]))
cwd(DIR[0])
test.assertTrue(op.isdir(DIR[1]))
path0 = op.join("..", DIR[2])
path1 = DIR[1]
tpath = op.join(DIR[1], DIR[2])
test.assertFalse(op.isdir(tpath))
test.assertTrue(copy(path0, path1))
test.assertTrue(op.isdir(path0))
test.assertTrue(op.isdir(tpath))
def test_copy_7(test):
"""Copy dir to dir using relative dst path."""
test.assertTrue(makedirs(op.join(DIR[0], DIR[1])))
cwd(DIR[0])
test.assertTrue(op.isdir(DIR[1]))
path0 = DIR[1]
path1 = ".."
tpath = op.join("..", DIR[1])
test.assertFalse(op.isdir(tpath))
test.assertTrue(copy(path0, path1))
test.assertTrue(op.isdir(path0))
test.assertTrue(op.isdir(tpath))
def test_copy_8(test):
"""Copy dir to dir that does not exist."""
path0 = DIR[0]
path1 = DIR[1]
test.assertTrue(makedirs(path0))
test.assertFalse(op.isdir(path1))
test.assertTrue(copy(path0, path1))
test.assertTrue(op.isdir(path0))
test.assertTrue(op.isdir(path1))
test.assertTrue(isempty(path1))
def test_copy_9(test):
"""Copy file without extension."""
path0 = "../LICENSE"
path1 = "LICENSE"
test.assertTrue(op.isfile(path0))
test.assertFalse(op.isfile(path1))
test.assertTrue(copy(path0, path1))
test.assertTrue(op.isfile(path0))
test.assertTrue(op.isfile(path1))
test.assertTrue(delete(path1))
test.assertFalse(op.isfile(path1))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
```
#### File: Auxly/tests/filesys_test_5.py
```python
from testlib import *
from auxly.filesys import Path
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_path_1(test):
"""Basic Path usage."""
path = FNAME[0]
fwrite(path, TEXT[0])
base,ext = op.splitext(path)
dir_ = op.abspath(".")
p = Path(path)
test.assertTrue(p.exists())
test.assertTrue(p.isfile())
test.assertFalse(p.isdir())
test.assertEqual(dir_, p.dir)
test.assertEqual(path, p.filename)
test.assertEqual(base, p.file)
test.assertEqual(ext, p.ext)
test.assertEqual(FNAME[0], p.name)
def test_path_2(test):
"""Basic Path usage."""
p = Path(DIR[0], FNAME[0])
test.assertFalse(p.exists())
test.assertFalse(p.isfile())
test.assertFalse(p.isdir())
test.assertEqual(op.abspath(op.join(DIR[0], FNAME[0])), p)
test.assertEqual(FNAME[0], p.name)
def test_path_3(test):
"""Basic Path usage."""
p = Path("not_a_real_dir")
test.assertFalse(p.exists())
test.assertFalse(p.isfile())
test.assertFalse(p.isdir())
test.assertTrue(None == p.created())
test.assertTrue(None == p.modified())
test.assertTrue(None == p.size())
test.assertTrue(None == p.isempty())
test.assertTrue(None == p.isempty())
test.assertTrue("not_a_real_dir" == p.name)
def test_path_4(test):
"""Basic Path usage."""
with test.assertRaises(TypeError):
p = Path()
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeffrimko/crash-model",
"score": 3
}
|
#### File: src/data/propagate_volume.py
```python
import json
import os
import rtree
import argparse
from . import util
from .record import Record
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import geopandas as gpd
from sklearn.neighbors import KNeighborsRegressor
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
PROCESSED_DATA_FP = os.path.join(BASE_DIR, 'data/processed')
STANDARDIZED_DATA_FP = os.path.join(BASE_DIR, 'data', 'standardized')
def update_properties(segments, df, features):
"""
Takes a segment list and a dataframe, and writes out updated
intersection and non-intersection segments
Args:
segments - a list of intersection and non-intersection segments
df - a dataframe of features
features - a list of features to extract from the dataframe
Returns:
nothing - writes to inter_segments.geojson and non_inter_segments.geojson
"""
df = df[['id'] + features]
df = df.fillna('')
# a dict where the id is the key and the value is the feature
values = df.to_dict()
id_mapping = {value: key for key, value in values['id'].items()}
for segment in segments:
seg_id = str(segment.properties['id'])
if seg_id in id_mapping:
for feature in features:
if values[feature][id_mapping[seg_id]] == '':
segment.properties[feature] = None
else:
segment.properties[feature] = values[feature][id_mapping[seg_id]]
inters = [x for x in segments if util.is_inter(x.properties['id'])]
non_inters = [x for x in segments if not util.is_inter(x.properties['id'])]
util.write_segments(non_inters, inters, os.path.join(
PROCESSED_DATA_FP, 'maps'))
def read_volume():
"""
Read the standardized volume data, snap to nearest segments,
and read relevant data
Args:
None - reads from file
Returns:
volume - a list of geojson points with volume properties
"""
volume = []
with open(os.path.join(STANDARDIZED_DATA_FP, 'volume.json')) as data_file:
data = json.load(data_file)
for record in data:
if record['location']['longitude'] and record[
'location']['latitude']:
properties = {
'speed': record['speed']['averageSpeed'],
'heavy': record['volume']['totalHeavyVehicles'],
'light': record['volume']['totalLightVehicles'],
'bikes': record['volume']['bikes'],
'volume': record['volume']['totalVolume'],
'orig': record['location']['address']
}
properties['location'] = {
'latitude': float(record['location']['latitude']),
'longitude': float(record['location']['longitude'])
}
record = Record(properties)
volume.append(record)
return [{'point': x.point, 'properties': x.properties} for x in volume]
return volume
def propagate_volume():
"""
Propagate volume from given volume data to other segments
Args:
None - reads segment and volume data from file
Returns:
None - writes results to file
"""
# Read in segments
inter = util.read_geojson(os.path.join(
PROCESSED_DATA_FP, 'maps/inters_segments.geojson'))
non_inter = util.read_geojson(
os.path.join(PROCESSED_DATA_FP, 'maps/non_inters_segments.geojson'))
print("Read in {} intersection, {} non-intersection segments".format(
len(inter), len(non_inter)))
# Combine inter + non_inter
combined_seg = inter + non_inter
# # Create spatial index for quick lookup
segments_index = rtree.index.Index()
for idx, element in enumerate(combined_seg):
segments_index.insert(idx, element.geometry.bounds)
print('Created spatial index')
volume = read_volume()
# Find nearest atr - 20 tolerance
print("Snapping atr to segments")
util.find_nearest(volume, combined_seg, segments_index, 20)
# Should deprecate once imputed atrs are used, but for the moment
# this is needed for make_canon_dataset
with open(os.path.join(PROCESSED_DATA_FP, 'snapped_atrs.json'), 'w') as f:
json.dump([x['properties'] for x in volume], f)
volume_df = json_normalize(volume)
volume_df = volume_df[[
'properties.near_id',
'properties.heavy',
'properties.light',
'properties.bikes',
'properties.speed',
'properties.volume'
]]
# rename columns
volume_df.columns = [
'id',
'heavy',
'light',
'bikes',
'speed',
'volume'
]
# remove atrs that didn't bind to a segment
before = len(volume_df)
volume_df = volume_df[volume_df['id'] != '']
after = len(volume_df)
print(('Removed {} volume(s) that did not bind to a segment'.format(
before-after)))
# change dtypes
volume_df['id'] = volume_df['id']
volume_df['heavy'] = volume_df['heavy'].astype(int)
volume_df['light'] = volume_df['light'].astype(int)
volume_df['bikes'] = volume_df['bikes'].astype(int)
volume_df['speed'] = volume_df['speed'].astype(int)
volume_df['volume'] = volume_df['volume'].astype(int)
# remove ATRs that bound to same segment
volume_df.drop_duplicates('id', inplace=True)
print(('Dropped {} volumes that bound to same segment as another'.format(
after - len(volume_df))))
# create dataframe of all segments
seg_df = pd.DataFrame([(x.geometry, x.properties) for x in combined_seg])
seg_df.columns = ['geometry', 'seg_id']
# seg_id column is read in as a dictionary
# separate, get `id` value, and rejoin
seg_df = pd.concat([seg_df['geometry'], seg_df.seg_id.apply(
pd.Series)['id']], axis=1)
# change to geo df for centroid method
seg_gdf = gpd.GeoDataFrame(seg_df['id'], geometry=seg_df['geometry'])
# create two columns for x and y of centroid of segment
seg_gdf['px'] = seg_gdf.geometry.centroid.apply(lambda p: p.x)
seg_gdf['py'] = seg_gdf.geometry.centroid.apply(lambda p: p.y)
# merge atrs and seg_gdf
merged_df = pd.merge(seg_gdf, volume_df, on='id', how='left')
print(('Length of merged: {}, Length of seg_gdf: {}'.format(
len(merged_df), len(seg_gdf))))
# values to run KNN on
col_to_predict = ['heavy', 'light', 'bikes', 'speed', 'volume']
for col in col_to_predict:
print(('Predicting missing values for {} column'.format(col)))
# split into X, y, X_pred for KNN regression
X = merged_df[merged_df[col].notnull()][['px', 'py']]
y = merged_df[merged_df[col].notnull()][col]
X_pred = merged_df[merged_df[col].isnull()][['px', 'py']]
# predict on missing segments
knn = KNeighborsRegressor(3, weights='distance')
y_pred = knn.fit(X, y).predict(X_pred)
# create empty column to fill with predicted values
col_name = col + '_pred'
merged_df[col_name] = np.nan
merged_df.loc[merged_df[col].isnull(), col_name] = y_pred
# coalesce all columns
print('Creating coalesced columns')
for col in col_to_predict:
col_name = col + '_coalesced'
pred_col = col + '_pred'
merged_df[col_name] = merged_df[col].combine_first(merged_df[pred_col])
# drop predicted columns
pred_cols = [col + '_pred' for col in col_to_predict]
# drop
merged_df.drop(labels=pred_cols, axis=1, inplace=True)
# in pandas 0.21.0 do:
# merged_df.drop(columns=pred_cols, axis=1, inplace=True)
# write to csv
print('Writing to CSV')
output_fp = os.path.join(PROCESSED_DATA_FP, 'atrs_predicted.csv')
# force id into string
merged_df['id'] = merged_df['id'].astype(str)
merged_df.to_csv(output_fp, index=False)
update_properties(
combined_seg,
merged_df,
['volume', 'speed', 'volume_coalesced', 'speed_coalesced']
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--datadir", type=str,
help="Can give alternate data directory.")
# Can force update
parser.add_argument('--forceupdate', action='store_true',
help='Whether force update the maps')
args = parser.parse_args()
if args.datadir:
PROCESSED_DATA_FP = os.path.join(args.datadir, 'processed')
STANDARDIZED_DATA_FP = os.path.join(args.datadir, 'standardized')
if not os.path.exists(os.path.join(STANDARDIZED_DATA_FP, 'volume.json')):
print("No volumes found, skipping...")
sys.exit()
propagate_volume()
```
|
{
"source": "jeffrimko/Doctrine",
"score": 2
}
|
#### File: Doctrine/app/doctrine.py
```python
import fnmatch
import os
import shutil
import sys
import tempfile
import time
import uuid
import webbrowser
import zipfile
import os.path as op
from ctypes import *
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtWebKit import *
from asciidocapi import AsciiDocAPI
import doctview
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
# Set up the Asciidoc environment.
os.environ['ASCIIDOC_PY'] = op.join(op.dirname(__file__), r"asciidoc\asciidoc.py")
if getattr(sys, 'frozen', None):
os.environ['ASCIIDOC_PY'] = op.normpath(op.join(sys._MEIPASS, r"asciidoc\asciidoc.py"))
# Splash displayed at startup.
SPLASH = r"static\splash.html"
if getattr(sys, 'frozen', None):
SPLASH = op.join(sys._MEIPASS, r"static\splash.html")
SPLASH = QUrl().fromLocalFile(op.abspath(SPLASH))
# Splash displayed at startup.
RENDER = r"static\render.html"
if getattr(sys, 'frozen', None):
RENDER = op.join(sys._MEIPASS, r"static\render.html")
RENDER = QUrl().fromLocalFile(op.abspath(RENDER))
# Prefix of the generated HTML document.
DOCPRE = "__doctrine-"
# Extension of the generated HTML document.
DOCEXT = ".html"
# URL prefix of a local file.
URLFILE = "file:///"
# Name of archive info file.
ARCINFO = "__archive_info__.txt"
# Name and version of the application.
NAMEVER = "Doctrine 0.1.0-alpha"
FILETYPES = dict()
FILETYPES['AsciiDoc'] = ["*.txt", "*.ad", "*.adoc", "*.asciidoc"]
FILETYPES['Zip File'] = ["*.zip"]
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class DoctrineApp(QApplication):
"""The main Doctrine application."""
def __init__(self, *args, **kwargs):
"""Initializes the application."""
super(DoctrineApp, self).__init__(*args, **kwargs)
self.aboutToQuit.connect(self._handle_quit)
self._init_ui()
self.deldoc = False
self.docpath = None
#: Path to the temporary rendered document.
self.tmppath = None
#: Path to a temporary directory, if needed.
self.tmpdir = None
def _init_ui(self):
"""Initializes the UI."""
# Set up palette.
pal = self.palette()
col = pal.color(QPalette.Highlight)
pal.setColor(QPalette.Inactive, QPalette.Highlight, col)
col = pal.color(QPalette.HighlightedText)
pal.setColor(QPalette.Inactive, QPalette.HighlightedText, col)
self.setPalette(pal)
# Set up basic UI elements.
self.mainwin = doctview.MainWindow()
self.mainwin.setWindowTitle(NAMEVER)
self.mainwin.actn_reload.setDisabled(True)
self.mainwin.actn_display.setDisabled(True)
self.mainwin.menu_navi.setDisabled(True)
# Set up event handling.
self.mainwin.actn_open.triggered.connect(self._handle_open)
self.mainwin.actn_quit.triggered.connect(self.quit)
self.mainwin.actn_reload.triggered.connect(self._handle_reload)
self.mainwin.actn_frwd.triggered.connect(self._handle_nav_forward)
self.mainwin.actn_back.triggered.connect(self._handle_nav_backward)
self.mainwin.actn_display.triggered.connect(self._handle_display)
self.mainwin.webview.view.linkClicked.connect(self._handle_link)
self.mainwin.webview.view.setAcceptDrops(True)
self.mainwin.webview.view.dragEnterEvent = self._handle_drag
self.mainwin.webview.view.dropEvent = self._handle_drop
self.mainwin.find_dlog.find_btn.clicked.connect(self._handle_find_next)
self.mainwin.find_dlog.prev_btn.clicked.connect(self._handle_find_prev)
# Set up how web links are handled.
self.mainwin.webview.view.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
# Set up keyboard shortcuts.
scut_reload = QShortcut(self.mainwin)
scut_reload.setKey(QKeySequence("F5"))
scut_reload.activated.connect(self._handle_reload)
scut_find1 = QShortcut(self.mainwin)
scut_find1.setKey(QKeySequence("F3"))
scut_find1.activated.connect(self._display_find)
scut_find2 = QShortcut(self.mainwin)
scut_find2.setKey(QKeySequence("Ctrl+F"))
scut_find2.activated.connect(self._display_find)
scut_find_next = QShortcut(self.mainwin)
scut_find_next.setKey(QKeySequence("Ctrl+N"))
scut_find_next.activated.connect(self._handle_find_next)
scut_find_prev = QShortcut(self.mainwin)
scut_find_prev.setKey(QKeySequence("Ctrl+P"))
scut_find_prev.activated.connect(self._handle_find_prev)
# NOTE: Use to create custom context menu.
self.mainwin.webview.view.contextMenuEvent = self._handle_context
self.mainwin.webview.view.mouseReleaseEvent = self._handle_mouse
def _handle_nav_forward(self):
"""Navigates the web view forward."""
self.mainwin.webview.view.page().triggerAction(QWebPage.Forward)
def _handle_nav_backward(self):
"""Navigates the web view back."""
self.mainwin.webview.view.page().triggerAction(QWebPage.Back)
def _handle_find_next(self, event=None):
"""Find the next occurrence of the phrase in the find dialog."""
self._find()
def _handle_find_prev(self, event=None):
"""Find the previous occurrence of the phrase in the find dialog."""
options = QWebPage.FindBackward
self._find(options)
def _find(self, options=0):
"""Find the phrase in the find dialog."""
text = self.mainwin.find_dlog.find_edit.text()
if self.mainwin.find_dlog.case_cb.checkState():
options |= QWebPage.FindCaseSensitively
self.mainwin.webview.view.findText(text, options=options)
def _handle_mouse(self, event=None):
"""Handles mouse release events."""
if event.button() == Qt.MouseButton.XButton1:
self._handle_nav_backward()
return
if event.button() == Qt.MouseButton.XButton2:
self._handle_nav_forward()
return
return QWebView.mouseReleaseEvent(self.mainwin.webview.view, event)
def _handle_context(self, event=None):
"""Handles context menu creation events."""
if self.docpath:
menu = QMenu()
menu.addAction(self.mainwin.webview.style().standardIcon(QStyle.SP_BrowserReload), "Reload", self._handle_reload)
menu.exec_(event.globalPos())
def _handle_drag(self, event=None):
"""Handles drag enter events."""
event.accept()
def _handle_drop(self, event=None):
"""Handles drag-and-drop events."""
if event.mimeData().hasUrls():
self._load_doc(str(event.mimeData().urls()[0].toLocalFile()))
def _handle_quit(self):
"""Handles quitting the application."""
self._delete_tmppath()
self._delete_tmpdir()
def _handle_display(self):
"""Handles displaying the document in the web view."""
if not self.docpath:
return
if not self.tmppath:
self._load_doc(reload_=True)
if not self.tmppath:
return
webbrowser.open(self.tmppath)
def _handle_reload(self):
"""Handles reloading the document."""
if self.docpath:
self._load_doc(reload_=True)
def _display_find(self):
"""Displays the find dialog."""
self.mainwin.find_dlog.show()
self.mainwin.find_dlog.activateWindow()
self.mainwin.find_dlog.find_edit.setFocus()
def _handle_link(self, url=None):
"""Handles link clicked events."""
# Open URLs to webpages with default browser.
if is_webpage(url):
webbrowser.open(str(url.toString()))
return
# Open links to Asciidoc files in Doctrine.
if is_asciidoc(url2path(url)):
self._load_doc(url2path(url))
return
# Open the URL in the webview.
self.mainwin.webview.view.load(url)
def _handle_open(self):
"""Handles open file menu events."""
path = self.mainwin.show_open_file(format_filter(FILETYPES))
self._load_doc(path)
def _load_doc(self, path="", reload_=False):
"""Handles loading the document to view."""
# Delete existing temp files.
self._delete_tmppath()
self._delete_tmpdir()
# If not reloading the previous document, clear out tmppath.
if not reload_:
self.tmppath = None
self.tmpdir = None
# Set the doc path.
prev = self.docpath
if path:
self.docpath = path
if not self.docpath:
return
self.docpath = op.abspath(self.docpath)
self.setOverrideCursor(QCursor(Qt.WaitCursor))
# Attempt to prepare the document for display.
url = ""
if self.docpath.endswith(".txt"):
url = self._prep_text()
elif self.docpath.endswith(".zip"):
url = self._prep_archive()
elif self.docpath.endswith(".csv"):
url = self._prep_csv()
# NOTE: URL is populated only if ready to display output.
if url:
self.mainwin.webview.view.load(url)
self.mainwin.actn_reload.setDisabled(False)
self.mainwin.actn_display.setDisabled(False)
self.mainwin.menu_navi.setDisabled(False)
self.mainwin.setWindowTitle("%s (%s) - %s" % (
op.basename(self.docpath),
op.dirname(self.docpath),
NAMEVER))
elif prev:
self.docpath = prev
self.restoreOverrideCursor()
def _prep_text(self):
"""Prepares a text document for viewing."""
if not self.docpath:
return
if not self.tmppath:
self.tmppath = getuniqname(op.dirname(self.docpath), DOCEXT, DOCPRE)
try:
AsciiDocAPI().execute(self.docpath, self.tmppath)
except:
self.restoreOverrideCursor()
err_msg = str(sys.exc_info()[0])
err_msg += "\n"
err_msg += str(sys.exc_info()[1])
self.mainwin.show_error_msg(err_msg)
return QUrl().fromLocalFile(self.tmppath)
def _prep_archive(self):
"""Prepares an archive for viewing."""
if not self.docpath:
return
if not self.tmpdir:
self.tmpdir = tempfile.mkdtemp()
if self.tmpdir and not op.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
zfile = zipfile.ZipFile(self.docpath)
zfile.extractall(self.tmpdir)
path = ""
# Attempt to locate archive info file.
arcinfo = op.join(self.tmpdir, ARCINFO)
if op.exists(arcinfo):
path = arcinfo
# If no archive info file found, attempt to locate any asciidoc text file.
if not path:
txts = findfile("*.txt", self.tmpdir)
if txts:
path = txts[0]
# If no text file path was found, bail.
if not path:
return
if not self.tmppath:
self.tmppath = getuniqname(op.dirname(path), DOCEXT, DOCPRE)
AsciiDocAPI().execute(path, self.tmppath)
return QUrl().fromLocalFile(self.tmppath)
def _prep_csv(self):
"""Prepares a CSV file for viewing."""
if not self.docpath:
return
if not self.tmppath:
self.tmppath = getuniqname(op.dirname(self.docpath), DOCEXT, DOCPRE)
path = getuniqname(op.dirname(self.docpath), ".txt", "__temp-")
with open(path, "w") as f:
f.write('[format="csv"]\n')
f.write("|===\n")
f.write("include::" + self.docpath + "[]\n")
f.write("|===\n")
AsciiDocAPI().execute(path, self.tmppath)
os.remove(path)
return QUrl().fromLocalFile(self.tmppath)
def _delete_tmppath(self):
"""Deletes the rendered HTML."""
if not self.tmppath:
return
retries = 3
while retries:
if not op.exists(self.tmppath):
return
try:
os.remove(self.tmppath)
except:
time.sleep(0.1)
retries -= 1
def _delete_tmpdir(self):
"""Deletes the temporary directory."""
if not self.tmpdir:
return
if op.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def show_main(self):
"""Shows the main view of the application."""
self.mainwin.show()
self.mainwin.webview.view.load(SPLASH)
def run_loop(self):
"""Runs the main loop of the application."""
if self.docpath:
self._load_doc()
self.exec_()
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def getuniqname(base, ext, pre=""):
"""Returns a unique random file name at the given base directory. Does not
create a file."""
while True:
uniq = op.join(base, pre + "tmp" + str(uuid.uuid4())[:6] + ext)
if not os.path.exists(uniq):
break
return op.normpath(uniq)
def is_webpage(url):
"""Returns true if the given URL is for a webpage (rather than a local file)."""
# Handle types.
url = url2str(url)
if type(url) != str:
return False
# Return true if URL is external webpage, false otherwise.
if url.startswith("http:") or url.startswith("https:"):
return True
return False
def findfile(pattern, path):
"""Finds a file matching the given pattern in the given path. Taken from
`http://stackoverflow.com/questions/1724693/find-a-file-in-python`."""
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(op.join(root, name))
return result
def url2path(url):
"""Returns the normalized path of the given URL."""
url = url2str(url)
if url.startswith(URLFILE):
url = url[len(URLFILE):]
return op.normpath(url)
def url2str(url):
"""Returns given URL as a string."""
if type(url) == PySide.QtCore.QUrl:
url = str(url.toString())
return url
def is_asciidoc(path):
"""Returns true if the given path is an Asciidoc file."""
# NOTE: Only checking the extension for now.
if path.endswith(".txt"):
return True
return False
def format_filter(filetypes):
"""Returns a filetype filter formatted for the Open File prompt."""
filt = ""
for t in sorted(filetypes, key=lambda key: filetypes[key]):
filt += "%s (" % (t)
filt += " ".join(e for e in filetypes[t])
filt += ");;"
return filt.strip(";;")
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
# Show the main application.
app = DoctrineApp(sys.argv)
if len(sys.argv) > 1 and op.isfile(sys.argv[1]):
app.docpath = str(sys.argv[1])
app.show_main()
app.run_loop()
```
|
{
"source": "jeffrimko/FlaskVue-SimpleDemo",
"score": 2
}
|
#### File: jeffrimko/FlaskVue-SimpleDemo/_Run.py
```python
import time
import webbrowser
from qprompt import alert, warn
from auxly.shell import start
from auxly.filesys import Cwd
from ubuild import menu, main
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
APPADDRPORT = "127.0.0.1:5000"
TESTSERVER = None
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def is_running():
global TESTSERVER
return TESTSERVER != None
@menu
def run():
global TESTSERVER
if is_running():
warn("App server already running!")
return
with Cwd("app"):
TESTSERVER = start("python app.py", "__temp-flask.log")
alert("Application starting...")
time.sleep(3)
if TESTSERVER.isrunning():
alert("Application started.")
browse()
else:
warn("Issue starting application!")
stop()
@menu
def browse():
if not is_running():
warn("Application not running!")
return
webbrowser.open(f"http://{APPADDRPORT}")
@menu
def stop():
global TESTSERVER
if not is_running():
warn("Application not running!")
return
TESTSERVER.stop()
TESTSERVER = None
alert("Application stopped.")
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
main(default="r")
```
|
{
"source": "jeffrimko/Jupylib",
"score": 2
}
|
#### File: Jupylib/lib/_Install_Package.py
```python
import os
import subprocess
import sys
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def generate_readme():
subprocess.call("asciidoctor -b docbook ../README.adoc", shell=True)
subprocess.call("pandoc -r docbook -w rst -o README.rst ../README.xml", shell=True)
os.remove("../README.xml")
def cleanup_readme():
os.remove("README.rst")
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
generate_readme()
if len(sys.argv) > 1 and sys.argv[1] == "generate_readme":
exit()
subprocess.call("python setup.py install --force", shell=True)
cleanup_readme()
```
|
{
"source": "jeffrimko/PicNotes",
"score": 2
}
|
#### File: jeffrimko/PicNotes/picnotes.py
```python
from datetime import datetime
from tempfile import gettempdir
import os
import os.path as op
import sys
from auxly.stringy import randomize, between
import auxly
import click
import qprompt
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def process_pic_yellow_mask(picpath, cleanup=True):
"""Use this when all the notes use blue text on the rgb(255,255,185)
yellowish background. Results in virtually no extra stuff. Returns the path
to the processed pic."""
unique = randomize()
# Creates black mask areas.
maskpath = op.join(gettempdir(), f"{unique}-mask.png")
cmd = f"magick convert {picpath} -fill white +opaque rgb(255,255,185) -blur 10 -monochrome -morphology open octagon:12 -negate {maskpath}"
auxly.shell.silent(cmd)
# Mask out only notes.
notepath = op.join(gettempdir(), f"{unique}-note.png")
cmd = f"magick convert {picpath} {maskpath} -compose minus -composite {notepath}"
auxly.shell.silent(cmd)
# Finds only text.
textpath = op.join(gettempdir(), f"{unique}-text.png")
cmd = f"magick convert {notepath} +opaque yellow -fill white -monochrome {textpath}"
auxly.shell.silent(cmd)
if cleanup:
auxly.filesys.delete(maskpath)
auxly.filesys.delete(notepath)
return textpath
def format_notes_basic(text):
"""Use this when there is little risk of extra stuff in the processed pic."""
lines = text.strip().splitlines()
notes = " ".join(lines)
return notes
def extract_notes(pngpath):
"""Extracts notes from a pre-processed PNG file. Returns the path of a
text file containing the extracted note text."""
unique = randomize()
# NOTE: The ".txt" extension is added automatically by Tesseract.
txt_noext = op.join(gettempdir(), f"{unique}-note")
auxly.shell.silent(f"tesseract {pngpath} {txt_noext}")
return txt_noext + ".txt"
def scan_notes(picpath, cleanup=True):
"""Attempts to return note text from the given pic."""
pngpath = process_pic_yellow_mask(picpath)
txtpath = extract_notes(pngpath)
text = auxly.filesys.File(txtpath).read()
if cleanup:
auxly.filesys.delete(pngpath)
auxly.filesys.delete(txtpath)
if text:
notes = format_notes_basic(text)
return notes
def create_picnotes(dirpath, confirm=True, shrink=False):
"""Attempts to extract notes from all pics found under the given directory
and write them to a file."""
dirpath = op.abspath(dirpath)
titlepath = os.sep.join(dirpath.split(os.sep)[-2:])
pics = list(auxly.filesys.walkfiles(dirpath, ".(png|jpg)", recurse=True))
if not pics:
qprompt.warn("No pics found under directory!")
return
pics = sort_pics(pics)
qprompt.alert(f"Found {len(pics)} pics found under directory.")
doc = auxly.filesys.File(dirpath, "pic_notes.adoc")
existing_notes = {}
if doc.exists():
existing_notes = parse_picnotes(doc)
if confirm:
if not qprompt.ask_yesno(f"The `pic_notes.adoc` file already exists with {len(existing_notes)} pic notes found, overwrite it?"):
return
doc.empty()
qprompt.alert(f"Initialized file `{doc.path}`")
doc.appendline(f"= PIC NOTES: `{titlepath}`")
doc.appendline(":date: " + datetime.now().strftime("%d %B %Y %I:%M%p"))
doc.appendline(":toc:")
doc.appendline("")
doc.appendline("NOTE: Entries sorted by base filename.")
doc.appendline("")
count = {'reused': 0, 'scanned': 0}
for idx,picpath in enumerate(pics, 1):
relpath = op.relpath(picpath, dirpath)
msg = f"({idx} of {len(pics)})"
if relpath in existing_notes.keys() and auxly.filesys.checksum(picpath) == existing_notes[relpath]['md5']:
qprompt.alert(f"{msg} Reusing `{picpath}`.")
notes = existing_notes[relpath]['note']
if shrink:
attempt_shrink(picpath, notes)
line = format_adoc_line(relpath, picpath, notes)
count['reused'] += 1
else:
notes = qprompt.status(f"{msg} Scanning `{picpath}`...", scan_notes, [picpath]) or "NA"
if shrink:
attempt_shrink(picpath, notes)
line = format_adoc_line(relpath, picpath, notes)
count['scanned'] += 1
doc.appendline(line)
return count
def format_adoc_line(relpath, picpath, notes):
line = ""
line += f"== {relpath}\n"
line += f" - link:{relpath}[window='_blank'] [[md5_{auxly.filesys.checksum(picpath)}]] - {notes}\n"
line += f"+\n"
line += f"link:{relpath}[ image:{relpath}[width=35%] , window='_blank']\n"
return line
def attempt_shrink(picpath, old_notes):
old_size = auxly.filesys.File(picpath).size()
tmppath = op.join(gettempdir(), "__temp-shrink.png")
cmd = f"pngquant --quality=40-60 --output {tmppath} {picpath}"
auxly.shell.silent(cmd)
new_size = auxly.filesys.File(tmppath).size()
if new_size and old_size:
if new_size < old_size:
new_notes = scan_notes(tmppath) or "NA"
if new_notes == old_notes:
if auxly.filesys.move(tmppath, picpath):
qprompt.alert(f"Saved {old_size - new_size} bytes shrinking `{picpath}`.")
return True
qprompt.alert(f"Could not shrink `{picpath}`.")
auxly.filesys.delete(tmppath)
return False
def parse_picnotes(doc):
existing_notes = {}
for line in doc.read().splitlines():
if line.lstrip().startswith("- link:"):
try:
entry = {}
entry['file'] = between(line, " - link:", "[")
entry['md5'] = between(line, "[[md5_", "]]")
entry['note'] = line.split("]] - ")[1]
entry['line'] = line
existing_notes[entry['file']] = entry
except:
pass
return existing_notes
def sort_pics(pics):
sorted_pics = sorted(pics, key=lambda p: op.basename(p))
return sorted_pics
@click.group()
def cli(**kwargs):
return True
@cli.command()
@click.option("--scandir", default=".", show_default=True, help="Directory to scan and create notes in.")
@click.option("--picdirname", default="pics", show_default=True, help="Name of pic directories.")
@click.option("--overwrite", is_flag=True, help="Always overwrite existing notes.")
@click.option("--shrink", is_flag=True, help="Attempt to shrink size of pics.")
def scan(scandir, picdirname, overwrite, shrink):
"""Scan scandir and all subdirectories for pics. Note text will be
extracted and a notes file will be created under scandir."""
dirpath = auxly.filesys.Path(scandir)
if not dirpath.isdir():
qprompt.fatal("Given path must be existing directory!")
if picdirname != dirpath.name:
if not qprompt.ask_yesno("Directory not named `pics`, continue?"):
sys.exit()
create_picnotes(dirpath, confirm=not overwrite, shrink=shrink)
@cli.command()
@click.option("--startdir", default=".", show_default=True, help="The walk start directory.")
@click.option("--picdirname", default="pics", show_default=True, help="Name of pic directories.")
def walk(startdir, picdirname):
"""Walk all directories under startdir and scan when directory name matches
picdirname. Existing notes are overwritten."""
if not op.isdir(startdir):
qprompt.fatal("Given path must be existing directory!")
total_count = {'reused': 0, 'scanned': 0}
for d in auxly.filesys.walkdirs(startdir, "pics"):
if op.basename(d) != picdirname:
continue
qprompt.hrule()
qprompt.alert(f"Walking through `{d}`...")
dirpath = auxly.filesys.Path(d)
new_count = create_picnotes(dirpath, False)
if new_count:
total_count['reused'] += new_count['reused']
total_count['scanned'] += new_count['scanned']
qprompt.alert(f"Walk complete, scanned={total_count['scanned']} reused={total_count['reused']}")
sys.exit()
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
cli(obj={})
```
|
{
"source": "jeffrimko/PopPage",
"score": 2
}
|
#### File: PopPage/app/gitr.py
```python
import io
import sys
import os.path as op
import auxly.filesys as fsys
import qprompt
import requests
##==============================================================#
## SECTION: Setup #
##==============================================================#
# Handle Python 2/3 differences.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding("utf-8")
from urllib import unquote
else:
from urllib.parse import unquote
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
GHAPI = "https://api.github.com/repos/"
GHURL = "https://github.com/"
GHRAW = "https://raw.githubusercontent.com/"
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def is_github(src):
if src.startswith(GHAPI):
return True
if src.startswith(GHURL):
return True
if src.startswith(GHRAW):
return True
return False
def prep_url(url):
"""Preps the given URL and returns either an API URL (for directories) or a
raw content URL (for files)."""
if url.startswith(GHURL):
tok = url.split("/")[3:]
if len(tok) > 4:
name = tok[-1]
else:
name = tok[1]
if 2 == len(tok):
tok.append("tree")
if 3 == len(tok):
tok.append("master")
if "blob" == tok[2]:
url = GHRAW
url += "{0}/{1}/{3}/".format(*tok)
url += "/".join(tok[4:])
elif "tree" == tok[2]:
url = GHAPI
url += "{0}/{1}/contents/".format(*tok)
url += "/".join(tok[4:])
url += "?ref=" + tok[3]
else:
tok = url.split("/")
name = tok[-1]
return url,name
def is_file(url):
"""Checks if the given URL is for a file and returns the filename if so;
returns None otherwise."""
url,name = prep_url(url)
if url.startswith(GHRAW):
return unquote(name)
def is_dir(url):
if is_file(url):
return None
return unquote(url.split("/")[-1])
def download(srcurl, dstpath=None):
"""Handles downloading files/dirs from the given GitHub repo URL to the
given destination path."""
def download_api(srcurl, dstdir):
items = requests.get(srcurl).json()
if op.isfile(dstdir):
raise Exception("DestDirIsFile")
fsys.makedirs(dstdir, ignore_extsep=True)
if isinstance(items, dict) and "message" in items.keys():
qprompt.error(items['message'])
return
for item in items:
if "file" == item['type']:
fpath = op.join(dstdir, item['name'])
with io.open(fpath, "w", encoding="utf-8") as fo:
text = requests.get(item['download_url']).text
fo.write(text)
else:
download_api(item['url'], op.join(dstdir, item['name']))
def download_raw(srcurl, dstfile):
fsys.makedirs(dstfile)
if op.isdir(dstfile):
dstfile = op.join(dstfile, srcurl.split("/")[-1])
dstfile = unquote(dstfile)
with io.open(dstfile, "w") as fo:
fo.write(requests.get(srcurl).text)
url,name = prep_url(srcurl)
if not dstpath:
dstpath = op.join(op.abspath("."), name)
dstpath = op.abspath(dstpath)
if url.startswith(GHAPI):
download_api(url, dstpath)
else:
download_raw(url, dstpath)
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
pass
```
#### File: PopPage/app/poppage.py
```python
import collections
import io
import os
import os.path as op
import random
import sys
import tempfile
from string import ascii_lowercase
from pprint import pprint
import auxly.filesys as fsys
import auxly.shell as sh
import qprompt
from binaryornot.check import is_binary
from docopt import docopt
from jinja2 import FileSystemLoader, Template, Undefined, meta
from jinja2.environment import Environment
from jinja2schema import infer, model
import gitr
import utilconf
##==============================================================#
## SECTION: Setup #
##==============================================================#
# Handle Python 2/3 differences.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding("utf-8")
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
#: Application version string.
__version__ = "0.8.1"
#: Key separator.
KEYSEP = "::"
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class SkipUndefined(Undefined):
def _fail_with_undefined_error(self, *args, **kwargs):
return None
def __getitem__(self, key):
return self
def __getattr__(self, key):
return self
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
#: Random uppercase string of length x.
_getrands = lambda x: "".join(random.choice(ascii_lowercase) for _ in range(x))
def handle_paths(**dkwargs):
def wrap(func):
def handler(inpath, outpath):
if gitr.is_github(inpath):
tpath = tempfile.mkdtemp(prefix="poppage-")
gitr.download(inpath, tpath)
fname = gitr.is_file(inpath)
dname = gitr.is_dir(inpath)
if outpath == None:
outpath = os.getcwd()
if fname:
outpath = op.join(outpath, op.basename(fname))
if dname:
outpath = op.join(outpath, op.basename(dname))
if fname:
return op.join(tpath, op.basename(fname)), outpath, tpath
return tpath, outpath, tpath
return inpath, outpath, None
def inner(*fargs, **fkwargs):
inpath = fkwargs.get('inpath') or (
fargs[dkwargs['inpath']] if (
'inpath' in dkwargs.keys() and dkwargs['inpath'] < len(fargs))
else None)
outpath = fkwargs.get('outpath') or (
fargs[dkwargs['outpath']] if (
'outpath' in dkwargs.keys() and dkwargs['outpath'] < len(fargs))
else None)
inpath, outpath, to_delete = handler(inpath, outpath)
fargs = list(fargs)
# The following tries to intelligently handle function arguments so
# that this decorator can be generalized. Need to handle conditions
# where arguments may positional or keyword.
for var,key in ((inpath,"inpath"),(outpath,"outpath")):
if key in dkwargs.keys():
if not var:
continue
if key in fkwargs.keys():
fkwargs[key] = var
elif dkwargs[key] < len(fargs):
fargs[dkwargs[key]] = var
else:
fkwargs[key] = var
if to_delete:
if op.isdir(inpath):
# If using a temporary directory to hold the template (e.g.
# downloaded from Github), don't include that directory
# name in the output paths.
fkwargs['pathsubs'] = [[op.basename(to_delete), "."]]
retval = func(*fargs, **fkwargs)
if to_delete:
fsys.delete(to_delete)
return retval
return inner
return wrap
def check_template(tmplstr, tmpldict=None):
"""Checks the given template string against the given template variable
dictionary. Returns a list of variables not provided in the given
dictionary."""
def check_tmplitems(items, tmpldict, topkey=""):
missing = []
for key,val in items:
if type(val) == model.Dictionary:
missing += check_tmplitems(
val.items(),
tmpldict.get(key, {}),
key if not topkey else "%s%s%s" % (topkey, KEYSEP, key))
else:
name = key if not topkey else "%s%s%s" % (topkey, KEYSEP, key)
try:
if key not in tmpldict.keys():
missing.append(name)
except:
qprompt.warn("Issue checking var `%s`!" % (name))
return missing
tmpldict = tmpldict or {}
try:
missing = check_tmplitems(infer(tmplstr).items(), tmpldict)
except:
missing = []
return missing
def render_str(tmplstr, tmpldict, bail_miss=False):
"""Renders the given template string using the given template variable
dictionary. Returns the rendered text as a string."""
env = Environment(undefined=SkipUndefined, extensions=['jinja2_time.TimeExtension'])
env.trim_blocks = True
env.lstrip_blocks = True
miss = check_template(tmplstr, tmpldict)
if miss:
qprompt.warn("Template vars `%s` were not supplied values!" % (
"/".join(miss)))
return env.from_string(tmplstr).render(**tmpldict)
def render_file(tmplpath, tmpldict, bail_miss=False):
"""Renders the template file and the given path using the given template
variable dictionary. Returns the rendered text as a string."""
tmplpath = op.abspath(tmplpath)
env = Environment(undefined=SkipUndefined, extensions=['jinja2_time.TimeExtension'])
env.trim_blocks = True
env.lstrip_blocks = True
env.keep_trailing_newline = True
for encoding in ["utf-8", "mbcs"]:
try:
env.loader = FileSystemLoader(op.dirname(tmplpath), encoding=encoding)
tmpl = env.get_template(op.basename(tmplpath))
break
except UnicodeDecodeError:
qprompt.warn("Issue while decoding template with `%s`!" % encoding)
else:
qprompt.fatal("Unknown issue while loading template!")
with io.open(tmplpath) as fo:
tmplstr = fo.read()
miss = check_template(tmplstr, tmpldict)
if miss:
qprompt.warn("Template vars `%s` in `%s` were not supplied values!" % (
"/".join(miss),
op.basename(tmplpath)))
return tmpl.render(**tmpldict)
@handle_paths(inpath=0,outpath=2)
def make(inpath, tmpldict, outpath=None, **kwargs):
"""Generates a file or directory based on the given input
template/dictionary."""
if op.isfile(inpath):
return make_file(inpath, tmpldict, outpath=outpath, **kwargs)
else:
return make_dir(inpath, tmpldict, outpath=outpath, **kwargs)
def make_file(inpath, tmpldict, outpath=None):
inpath = op.abspath(inpath)
if outpath:
outpath = render_str(outpath, tmpldict)
if op.isdir(outpath):
outpath = op.join(outpath, op.basename(inpath))
outpath = render_str(outpath, tmpldict)
if is_binary(inpath):
qprompt.status("Copying `%s`..." % (outpath), fsys.copy, [inpath,outpath])
return
text = render_file(inpath, tmpldict)
if text == None:
return False
# Handle rendered output.
if outpath:
outpath = op.abspath(outpath)
if inpath == outpath:
qprompt.fatal("Output cannot overwrite input template!")
fsys.makedirs(op.dirname(outpath))
with io.open(outpath, "w", encoding="utf-8") as f:
qprompt.status("Writing `%s`..." % (outpath), f.write, [text])
else:
qprompt.echo(text)
return True
def make_dir(inpath, tmpldict, outpath=None, pathsubs=None):
pathsubs = pathsubs or []
inpath = op.abspath(inpath)
bpath = op.basename(inpath)
if not outpath:
outpath = os.getcwd()
dname = render_str(bpath, tmpldict)
if not dname:
return False
mpath = op.abspath(op.join(outpath, dname))
if not mpath:
return False
for sub in pathsubs:
mpath = mpath.replace(*sub)
if inpath == mpath:
qprompt.fatal("Output cannot overwrite input template!")
mpath = render_str(mpath, tmpldict)
qprompt.status("Making dir `%s`..." % (mpath), fsys.makedirs, [mpath])
# Iterate over files and directories IN PARENT ONLY.
for r,ds,fs in os.walk(inpath):
for f in fs:
ipath = op.join(r,f)
fname = render_str(f, tmpldict)
opath = op.join(mpath, fname)
if not make_file(ipath, tmpldict, opath):
return False
for d in ds:
ipath = op.join(r, d)
if not make_dir(ipath, tmpldict, mpath, pathsubs=pathsubs):
return False
break # Prevents from walking beyond parent.
return True
@handle_paths(inpath=0)
def check(inpath, echo=False, **kwargs):
"""Checks the inpath template for variables."""
tvars = check_template(op.basename(inpath))
if op.isfile(inpath):
if not is_binary(inpath):
with io.open(inpath) as fi:
tvars += check_template(fi.read())
else:
for r,ds,fs in os.walk(inpath):
for x in ds+fs:
xpath = op.join(r,x)
tvars += check(xpath)
tvars = sorted(list(set(tvars)))
if echo:
qprompt.echo("Found variables:")
for var in tvars:
qprompt.echo(" " + var)
return tvars
def run(inpath, tmpldict, outpath=None, execute=None, runargs=None):
"""Handles logic for `run` command."""
if not outpath:
outpath = op.join(os.getcwd(), "__temp-poppage-" + _getrands(6))
make(inpath, tmpldict, outpath=outpath)
qprompt.hrule()
if not execute:
execute = outpath
tmpldict.update({'outpath': outpath})
tmpldict.update({'runargs': " ".join(runargs or [])})
execute = render_str(execute, tmpldict)
for line in execute.splitlines():
sh.call(line.strip())
fsys.delete(outpath)
def main():
"""This function implements the main logic."""
if len(sys.argv) > 1 and op.isfile(sys.argv[1]):
args = {}
args['--defaults'] = sys.argv[1]
args['--file'] = []
args['--keysep'] = "::"
args['--string'] = []
args['PATH'] = []
args['VAL'] = []
args['runargs'] = sys.argv[2:] or ""
else:
args = docopt(__doc__, version="poppage-%s" % (__version__))
utildict, tmpldict = utilconf.parse(args)
# Check required conditions.
if not utildict.get('inpath'):
qprompt.fatal("Must supply INPATH!")
# Handle command.
if utildict['command'] == "check":
check(utildict['inpath'][0], echo=True)
elif utildict['command'] == "make":
for inpath, outpath in zip(utildict['inpath'], utildict['outpath']):
make(inpath, tmpldict, outpath=outpath)
elif utildict['command'] == "run":
run(
utildict['inpath'][0],
tmpldict,
outpath=utildict['outpath'][0],
execute=utildict.get('execute'),
runargs=utildict.get('runargs'))
elif utildict['command'] == "debug":
qprompt.echo("Utility Dictionary:")
pprint(utildict)
qprompt.echo("Template Dictionary:")
pprint(tmpldict)
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
main()
```
#### File: PopPage/tests/cli_debug_test_1.py
```python
from testlib import *
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_cli_debug_1(test):
errcode = call("defaults/d8.yaml")
test.assertEqual(0, errcode)
def test_cli_debug_2(test):
errcode = call("defaults/d9.yaml")
test.assertEqual(0, errcode)
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
```
#### File: PopPage/tests/cookiecutter_test_1.py
```python
from testlib import *
import poppage
from poppage import check, make
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_cookiecutter_1(test):
"""GitHub check functionality."""
poppage.KEYSEP = "::"
tvars = check("https://github.com/solarnz/cookiecutter-avr")
if not tvars:
print("Skipping check...")
return
evars = []
evars.append("cookiecutter::full_name")
evars.append("cookiecutter::repo_name")
evars.append("cookiecutter::year")
test.assertEqual(sorted(tvars), sorted(evars))
def test_cookiecutter_2(test):
test.assertFalse(op.isfile("MyTest.c"))
url = "https://github.com/solarnz/cookiecutter-avr/blob/master/%7B%7Bcookiecutter.repo_name%7D%7D/%7B%7Bcookiecutter.repo_name%7D%7D.c"
test.assertTrue(make(url, {'cookiecutter':{'repo_name':"MyTest"}}))
test.assertTrue(op.isfile("MyTest.c"))
delete("MyTest.c")
def test_cookiecutter_3(test):
test.assertFalse(op.isfile("AnotherName.c"))
url = "https://github.com/solarnz/cookiecutter-avr/blob/master/%7B%7Bcookiecutter.repo_name%7D%7D/%7B%7Bcookiecutter.repo_name%7D%7D.c"
test.assertTrue(make(url, {'cookiecutter':{'repo_name':"MyTest"}}, "AnotherName.c"))
test.assertTrue(op.isfile("AnotherName.c"))
delete("AnotherName.c")
def test_cookiecutter_4(test):
url = "https://github.com/solarnz/cookiecutter-avr/tree/master/%7B%7Bcookiecutter.repo_name%7D%7D"
if not check(url):
print("Skipping check...")
return
tvars = {'cookiecutter':{}}
tvars['cookiecutter']['full_name'] = "<NAME>"
tvars['cookiecutter']['repo_name'] = "MyRepo"
tvars['cookiecutter']['year'] = 2017
test.assertFalse(op.isdir("MyRepo"))
test.assertTrue(make(url, tvars))
test.assertTrue(op.isdir("MyRepo"))
delete("MyRepo")
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
# unittest.main()
print("SKIPPING TEST FOR NOW.")
```
|
{
"source": "jeffrisandy/image_classifier",
"score": 3
}
|
#### File: image_classifier/test/predict.py
```python
import matplotlib.pyplot as plt
import argparse
import torch
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from train import build_classifier, build_model
def get_input_args():
"""
5 command line arguments are created:
input - path to image file to predict
checkpoint - path to checkpoint file
top_k - Top k label with most probabilities (default- 1)
cat - path to json file for mapping flower names (default- None)
gpu - select GPU processing (default - False)
Returns:
parse_args() - store data structure
"""
parser = argparse.ArgumentParser(description='Get arguments')
#Define arguments
parser.add_argument('input', type=str, help='image file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file to load')
parser.add_argument('--top_k', default=1, type=int, help='default top_k results')
parser.add_argument('--gpu', default=False, action='store_true', help='use GPU processing')
parser.add_argument('--cat', default='', type=str, help='default category name json file path' )
return parser.parse_args()
def process_image(image):
'''
Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
from PIL import Image
import torch
img = Image.open(image)
#resize to 256 pixels
img = img.resize((256,256))
width, height = img.size
new_width = 224
new_height = 224
#crop center 22x224
left = np.ceil((width - new_width)/2.)
top = np.ceil((height - new_height)/2.)
right = np.floor((width + new_width)/2.)
bottom = np.floor((height + new_height)/2.)
img = img.crop((left,top,right,bottom))
#convert to array --> shape (224,224,3)
img = np.array(img)
#normalizing to range [0,1]
img = img /255.0
#normalizing to specified mean and std
norm_mean = np.array([0.485,0.456,0.406])
norm_std = np.array([0.229, 0.224, 0.225])
img = (img - norm_mean) / norm_std
#return a transpose to shape (3,224,224) for pytorch input
return torch.from_numpy(img.transpose(2,0,1))
def predict(image_path, model, gpu=None, topk=1):
'''
Predict the class (or classes) of an image using a trained deep learning model.
'''
model.eval()
image = process_image(image_path)
image = image.unsqueeze(0).float()
with torch.no_grad():
if gpu and torch.cuda.is_available():
model.cuda()
image = image.cuda()
print('\nGPU processing')
print('===============')
elif gpu and not torch.cuda.is_available():
print('\nGPU is not detected, continue with CPU PROCESSING')
print('==================================================')
else:
print('\nCPU processing')
print('===============')
output = model.forward(image)
ps = torch.exp(output)
probs, classes = torch.topk(ps,topk)
probs = np.array(probs).flatten()
classes = np.array(classes).flatten()
#idx_to_class
idx_to_class = { v:k for k,v in model.class_to_idx.items()}
classes = [idx_to_class[cls] for cls in classes]
return probs, classes
def print_predict( probs, classes,cat_names):
"""
Prints predictions. Returns Nothing
Parameters:
classes - list of predicted classes
probs - list of probabilities
Returns:
None - Use to print predictions
"""
import json
if cat_names:
with open(cat_names, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[k] for k in classes]
predictions = list(zip(classes, probs))
print("The predicted image is....\n")
for i in range(len(predictions)):
print('{} : {:.3%}'.format(predictions[i][0], predictions[i][1]))
print("\n")
pass
def load_model(checkpoint_path):
"""
Load model. Returns loaded model
Parameters:
checkpoint_path - checkpoint file path
Returns:
model - loaded model
"""
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
input_size = checkpoint['input_size']
hidden_layers = checkpoint['hidden_layers']
output_size = checkpoint['output_size']
arch = checkpoint['arch']
model = build_model(arch, hidden_layers, output_size)
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
#define optimizer and scheduler
if arch == 'resnet152':
optimizer = optim.SGD(model.fc.parameters(), lr=0.01, momentum=0.9)
else:
optimizer = optim.SGD(model.classifier.parameters(), lr=0.01, momentum=0.9)
optimizer.load_state_dict(checkpoint['optimizer'])
model.optimizer = optimizer
model = model.eval()
return model
def main():
in_args = get_input_args()
model = load_model(in_args.checkpoint)
probs, classes = predict(in_args.input, model,in_args.gpu, in_args.top_k)
print_predict(probs, classes, in_args.cat)
pass
if __name__ == '__main__':
main()
```
|
{
"source": "jeffrisandy/starbuck_capstone",
"score": 3
}
|
#### File: jeffrisandy/starbuck_capstone/helpers.py
```python
import pandas as pd
import numpy as np
import math
import json
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import datetime
"""
########################
Porfolio preprocessing
########################
"""
def add_portfolio_name(portfolio_df):
"""
INPUT :
portfolio_df : portfolio df
RETURN :
portfolio_df : updated portfolio df with addtional col of name of each offer ids
"""
portfolio_df = portfolio_df.copy()
portfolio_df['name'] = portfolio_df.offer_type.astype(str) + "_" + portfolio_df.difficulty.astype(str) +\
"_" + portfolio_df.reward.astype(str) + \
"_" + portfolio_df.duration.astype(str)
return portfolio_df
def one_hot_channels(portfolio_df):
"""
INPUT :
portfolio_df : portfolio df
RETURN :
portfolio_df : updated portfolio df with addtional columsn of one hot encoded of channels columns
"""
portfolio_df = portfolio_df.copy()
channels = ['web', 'email', 'mobile', 'social']
for channel in channels:
portfolio_df[channel] = portfolio_df.channels.apply(lambda x: channel in x)*1
#drop channels column
portfolio_df = portfolio_df.drop('channels', axis=1)
return portfolio_df
def portfolio_preprocessing(portfolio_df):
"""
INPUT :
portfolio_df : portfolio df
RETURN :
portfolio_df : updated preprocessed portfolio df
"""
portfolio_df = portfolio_df.copy()
# add portfolio add_portfolio_name
portfolio_df = add_portfolio_name(portfolio_df)
# one_hot_channels
portfolio_df = one_hot_channels(portfolio_df)
return portfolio_df
"""
######################
Profile preprocessing
######################
"""
def profile_parse_dates(profile_df):
"""
INPUT :
portfolio_df : profile df with became_member_on as int
RETURN :
profile_df : updated portfolio df with parsed dates as datetime in became_member_on
"""
profile_df = profile_df.copy()
#convert to string
profile_df['became_member_on'] = profile_df.became_member_on.apply(lambda x: str(x))
#convert to datetime format
profile_df['became_member_on'] = pd.to_datetime(profile_df.became_member_on)
return profile_df
"""
########################
Transcript preprocessing
########################
"""
def encode_offer_id(x):
"""
This function return a value of "offer id" or "offer_id"
"""
try :
return x['offer id']
except:
return x['offer_id']
def transcript_encoded(transcript_df):
"""
To encode column :
- event : received, reviewed, completed
- value : offer_id, amount
INPUT :
transcript_df : transcript def
RETURN :
transcript_encoded : encoded transcript df of value column
"""
transcript_encoded = transcript_df.copy()
transcript_encoded['offer_id'] = transcript_df[transcript_df.event.isin(['offer received', 'offer viewed', 'offer completed'])]\
.value.apply(encode_offer_id)
transcript_encoded['amount'] = transcript_df[transcript_df.event.isin(['transaction'])].value.apply(lambda x: x['amount'])
return transcript_encoded
def merge_transcript_profile(transcript_df, profile_df):
"""
This function is to merge profile df to transcript df
INPUT:
transcript_df : transcript df
profile_df : profile df
RETURN :
transcript_profile_df : a merge of transcript and profile df
"""
profile_df = profile_parse_dates(profile_df)
transcript_encoded_df = transcript_encoded(transcript_df)
transcript_profile_df = pd.merge(transcript_encoded_df, profile_df, left_on=['person'],
right_on = ['id'], how ='left')
transcript_profile_df = transcript_profile_df.drop(['id'], axis=1)
return transcript_profile_df
def merge_transcript_profile_portfolio(transcript_df, profile_df, portfolio_df):
"""
This function is to merge profile to a merged df of profile & transcript df
INPUT:
transcript_df : transcript df
profile_df : profile df
portfolio_df : portfolio df
RETURN :
transcript_profile_porto: a merge of transcript and profile, and portfolio df
"""
portfolio_df = portfolio_preprocessing(portfolio_df)
transcript_profile_df = merge_transcript_profile(transcript_df, profile_df)
transcript_profile_porto = pd.merge(transcript_profile_df, portfolio_df, left_on = 'offer_id', right_on ='id', how='left').drop('id', axis=1)
#parse date became_member_on
transcript_profile_porto = profile_parse_dates(transcript_profile_porto)
return transcript_profile_porto
def find_invalid_index(transcript_df, profile_df, portfolio_df):
"""
INPUT : transcript, profile, portfolio dataframe
RETURN : a list of invalid index in transcript dataframe
"""
#merge transcript, profile, and portfolio dataframe
trascript_merge_df = merge_transcript_profile_portfolio(transcript_df, profile_df, portfolio_df)
# list of invalid index of offer completed
invalid_index = []
#iterate over profile_id (person)
for profile_id in tqdm(trascript_merge_df.person.unique()):
# take a subest_df for profile_id person
subset_df = trascript_merge_df[trascript_merge_df.person == profile_id]
# take a subset of 'offer completed'
completed_df = subset_df[subset_df.event == 'offer completed']
# iterate over the completed offer_id
for offer in completed_df.offer_id.unique():
# take a subset df of completed offer
comp = completed_df[completed_df.offer_id == offer]
# estimate the offer received time in hours using the offer duration (in days)
start_time = comp.time.values - (comp.duration.values *24)
# the offer completed time
comp_time = comp.time.values
# take the subset within start_time and comp_time
subset = subset_df[(subset_df.time >= start_time[0]) & (subset_df.time <= comp.time.values[0])]
# take only event of offer viewed for the given offer_id
subset_viewed = subset[(subset.event == 'offer viewed') & ( subset.offer_id == offer)]
# check whether subset_viewed is empty or not, if it is empty then the offer completed is not valid
# because the offer is completed before the customer viewed it,
# it means that the customer was not affected by the portfolio campaign
if subset_viewed.shape[0] == 0 :
invalid_index.extend(comp.index)
return invalid_index
def transcript_cleaning(transcript_df, profile_df, portfolio_df):
"""
INPUT :
transcript_df : transcript df
profile_df : profile df
portfolio_df : portfolio df
RETURN :
transcript_clean_df : a clean transcript df that has additinal column to mark the invalid offer completed
This function will check whether a saved "transcript_clean.csv" is available and use it if available
Ohter wise, the function will continue to execute the next block code to clean the dataframe,
and save a clean transcript df as "transcript_clean.csv".
The function will mark where the invalid offer completed as 1, else 0.
The invalid offer completed is the offer completed when the customer never viewed the offer.
"""
try:
transcript_clean_df = load_file('data/transcript_clean.csv')
print("The transcript_clean.csv and transcript_merge.csv file are available at local folder")
except:
transcript_clean_df = merge_transcript_profile_portfolio(transcript_df, profile_df, portfolio_df)
invalid_index = find_invalid_index(transcript_df, profile_df, portfolio_df)
#marking invalid in transcript_merge_df
transcript_clean_df.loc[transcript_clean_df.index.isin(invalid_index),"invalid"] = 1
transcript_clean_df.loc[~transcript_clean_df.index.isin(invalid_index),"invalid"] = 0
#saving df
transcript_clean_df.to_csv('data/transcript_clean.csv')
return transcript_clean_df
def transcript_preprocessing(transcript_df, profile_df, portfolio_df):
"""
INPUT : transcript_df, profile_df, portfolio_df : DataFrame
RETURN :
transcript_valid_df : transcript df that only contains the valid offer, mark as 0 in invalid column
transcript_all_df : transcript all df as return by transcript_cleaning function
"""
transcript_all_df = transcript_cleaning(transcript_df, profile_df, portfolio_df)
transcript_valid_df = transcript_all_df[transcript_all_df.invalid == 0]
return transcript_valid_df, transcript_all_df
def load_file(filepath):
"""Load file csv"""
df_clean = pd.read_csv(filepath)
df_clean = df_clean.set_index(df_clean.columns[0])
df_clean = profile_parse_dates(df_clean)
return df_clean
"""
################################
FEATURES EXTRACTION
################################
"""
def get_response_time(df, profile_id):
"""
INPUT :
df : DataFrame, clean merge transcript df
profile_id : profile id
RETURN :
response_time_series : a Series of response_time of offering for given profile_id
Response time is caluclated from the time delta between time(hour) of offer viewed to offer completed
"""
subset_offer_typ = df[df.event == 'offer completed']['name'].unique().tolist()
response_time_series = pd.Series(name=profile_id)
for offer in subset_offer_typ:
completed_time = df[(df.name == offer) & (df.event == 'offer completed')]['time'].values
reviewed_time = df[(df.name == offer) & (df.event == 'offer viewed')]['time'].values
if (completed_time.shape[0] != reviewed_time.shape[0]) and (reviewed_time.shape[0] != 0):
reviewed_time_clean = np.array([])
for t in completed_time:
reviewed_time_clean = np.append(reviewed_time_clean, reviewed_time[reviewed_time <= t].max())
response_time = completed_time - reviewed_time_clean
else:
response_time = completed_time - reviewed_time
response_time = response_time[response_time > 0]
if response_time.shape[0] != 0 :
response_time_avg = response_time.mean()
else:
response_time_avg = np.nan
response_time_series[offer +'_' +'response_time_avg'] = response_time_avg
return response_time_series
def get_spending_series(df, profile_id):
"""
INPUT :
df : DataFrame, clean merge transcript df
profile_id : profile id
RETURN :
spending_series : a Series of spending for a given profile_id (avg, transaction_count, and sum_spending)
"""
avg_spending = df.amount.mean()
transaction_count = df.amount.count()
sum_spending = df.amount.sum()
spending_series = pd.Series([avg_spending, transaction_count, sum_spending], index=["avg_spending", "transaction_count", 'sum_spending'], name=profile_id)
return spending_series
def get_event_typ_series(df, profile_id):
"""
INPUT :
df : DataFrame, clean merge transcript df
profile_id : profile id
RETURN :
event_typ_series : a Series of event_type value counts for given profile_id
"""
event_typ_series = (df.event + "_" + df.name).value_counts()
event_typ_series.name = profile_id
return event_typ_series
def get_attributes_series(df, profile_id):
"""
INPUT :
df : DataFrame, clean merge transcript df
profile_id : profile id
RETURN :
attributes_series : a Series of attributes for given profile_id
"""
event_typ_series = get_event_typ_series(df, profile_id)
response_time_series = get_response_time(df, profile_id)
spending_series = get_spending_series(df, profile_id)
attributes_series = pd.concat([event_typ_series, response_time_series, spending_series], axis=0)
return attributes_series
def generate_attributes(portfolio_df):
"""
INPUT :
portfolio_df : portfolio df
RETURN :
attributes: a list of attributes name
"""
portfolio_df = portfolio_preprocessing(portfolio_df)
events = ['offer received', 'offer viewed', 'offer completed']
portfolio_names = [ event +"_"+ name for event in events for name in portfolio_df.name.tolist() ]
response_time_attributes = [name +'_' +'response_time_avg' for name in portfolio_df.name.tolist() ]
attributes = portfolio_names + response_time_attributes + ["avg_spending", "transaction_count", "sum_spending"]
return attributes
def feature_fillna(profile_updated_df):
"""
This function is to fill missing value with zero (0) for selected feature
INPUT: profile_updated_df with missing values
RETURN : profile_updated_df with no missing values
"""
profile_updated_df = profile_updated_df.copy()
cols_to_fillna = ['offer received_bogo_10_10_7',
'offer received_bogo_10_10_5',
'offer received_informational_0_0_4',
'offer received_bogo_5_5_7',
'offer received_discount_20_5_10',
'offer received_discount_7_3_7',
'offer received_discount_10_2_10',
'offer received_informational_0_0_3',
'offer received_bogo_5_5_5',
'offer received_discount_10_2_7',
'offer viewed_bogo_10_10_7',
'offer viewed_bogo_10_10_5',
'offer viewed_informational_0_0_4',
'offer viewed_bogo_5_5_7',
'offer viewed_discount_20_5_10',
'offer viewed_discount_7_3_7',
'offer viewed_discount_10_2_10',
'offer viewed_informational_0_0_3',
'offer viewed_bogo_5_5_5',
'offer viewed_discount_10_2_7',
'offer completed_bogo_10_10_7',
'offer completed_bogo_10_10_5',
'offer completed_informational_0_0_4',
'offer completed_bogo_5_5_7',
'offer completed_discount_20_5_10',
'offer completed_discount_7_3_7',
'offer completed_discount_10_2_10',
'offer completed_informational_0_0_3',
'offer completed_bogo_5_5_5',
'offer completed_discount_10_2_7',
'avg_spending',
'sum_spending'
]
col_null_frac = profile_updated_df.isnull().sum() / profile_updated_df.shape[0]
cols_to_drop = col_null_frac[col_null_frac ==1].index.tolist()
profile_updated_df[cols_to_fillna] = profile_updated_df[cols_to_fillna].fillna(0)
profile_updated_df = profile_updated_df.drop(cols_to_drop, axis=1)
return profile_updated_df
def add_invalid_feature(profile_updated_df, transcript_merge_df):
"""
INPUT :
profile_updated_df : updated profile df
transcript_merge_df : transcript_all_df as return by transcrip_preprocessing function
RETURN :
profile_updated_df : updated profile df with invalid columns that is a count how many each profile id made an invalid offer completed
"""
profile_updated_df = profile_updated_df.copy()
person_invalid = transcript_merge_df[transcript_merge_df.invalid == 1].person.value_counts()
# create new feature 'invalid', how many invalid transaction made by customer (transaction that not influenced by offer)
profile_updated_df['invalid'] = person_invalid
profile_updated_df['invalid'] = profile_updated_df['invalid'].fillna(0)
return profile_updated_df
def add_feature_rate_portfolio_type(profile_updated_df):
"""
Create features
- Total Count of the offer received, reviewed, completed for each type of portfolio (bogo, discount, and informational)
- Rates for each offering type (bogo/discount/informational) :
- rate_review = total reviewed / total received
- rate_completed_reviewed = total completed / total reviewed
- rate_completed_received = tatal completed / total received
"""
profile_updated = profile_updated_df.copy()
for offer in ['bogo', 'discount', 'informational']:
received_cols = profile_updated.columns[(profile_updated.columns.str.contains('received_' + offer)) & \
(~profile_updated.columns.str.contains('rate' ))].tolist()
profile_updated[offer +'_received'] = profile_updated[received_cols].sum(axis=1).fillna(0)
viewed_cols = profile_updated.columns[(profile_updated.columns.str.contains('viewed_' + offer)) & \
(~profile_updated.columns.str.contains('rate'))].tolist()
profile_updated[offer +'_viewed'] = profile_updated[viewed_cols].sum(axis=1).fillna(0)
profile_updated['rate_viewed_' + offer] = (profile_updated[offer +'_viewed'] / profile_updated[offer +'_received']).fillna(0)
if offer != 'informational':
completed_cols = profile_updated.columns[(profile_updated.columns.str.contains('completed_' + offer)) & \
(~profile_updated.columns.str.contains('rate' ))].tolist()
profile_updated[offer +'_completed'] = profile_updated[completed_cols].sum(axis=1).fillna(0)
profile_updated['rate_completed_viewed_' + offer] = \
(profile_updated[offer +'_completed'] /profile_updated[offer +'_viewed']).fillna(0)
profile_updated['rate_completed_received_' + offer] = \
(profile_updated[offer +'_completed'] / profile_updated[offer +'_received']).fillna(0)
return profile_updated
def add_feature_rate_overall(profile_updated_df):
"""
Create Feature :
- Total count of received, viewed, completed
- Overall Rates :
- rate_review = total reviewed / total received
- rate_completed_reviewed = total completed / total reviewed
- rate_completed_received = tatal completed / total received
"""
profile_updated = profile_updated_df.copy()
profile_updated['offer_received_total'] = profile_updated.bogo_received + profile_updated.discount_received + \
profile_updated.informational_received
profile_updated['offer_viewed_total'] = profile_updated.bogo_viewed + profile_updated.discount_viewed + \
profile_updated.informational_viewed
profile_updated['offer_completed_total'] = profile_updated.bogo_completed + profile_updated.discount_completed
profile_updated['rate_offer_viewed_overall'] = \
(profile_updated['offer_viewed_total'] / profile_updated['offer_received_total']).fillna(0)
profile_updated['rate_offer_completed_received_overall'] = \
(profile_updated['offer_completed_total'] / profile_updated['offer_received_total']).fillna(0)
profile_updated['rate_offer_completed_viewed_overall'] = \
(profile_updated['offer_completed_total'] / profile_updated['offer_viewed_total']).fillna(0)
return profile_updated
def add_feature_rate_portfolio_id(profile_updated_df, portfolio_df):
"""
Create Feature :
- Rates for each offering in portfolio :
- rate_review = total reviewed / total received
- rate_completed_reviewed = total completed / total reviewed
- rate_completed_received = tatal completed / total received
"""
profile_updated = profile_updated_df.copy()
portfolio_updated = portfolio_preprocessing(portfolio_df)
for offer_name in portfolio_updated.name.tolist():
profile_updated['rate_offer_viewed_' + offer_name ] = \
(profile_updated['offer viewed_' + offer_name] / profile_updated['offer received_' + offer_name]).fillna(0)
if offer_name not in portfolio_updated[portfolio_updated.name.str.contains('informational')]['name'].tolist() :
profile_updated['rate_offer_completed_viewed_' + offer_name ] = \
(profile_updated['offer completed_' + offer_name] / profile_updated['offer viewed_' + offer_name]).fillna(0)
profile_updated['rate_offer_completed_received_' + offer_name ] = \
(profile_updated['offer completed_' + offer_name] / profile_updated['offer received_' + offer_name]).fillna(0)
return profile_updated
def add_feature_transaction_completed_ratio(profile_updated_df):
"""
Create feature transcation count to offer completed ratio
to avoid np.inf as a result of division, a 0.1 number was added to the denominator
"""
profile_updated = profile_updated_df.copy()
profile_updated['transaction_completed_ratio'] = \
profile_updated.transaction_count / (profile_updated.offer_completed_total + 0.1)
return profile_updated
def feature_extraction(transcript_clean_df, transcript_all_df, profile_df, portfolio_df):
"""
INPUT :
transcript_clean_df : a clean transcript df
transcript_all_df : transcript all df as return by transcript_preprocessing function
portfolio_df : portfolio df
RETURN :
profile_updated : profile updated df with 92 features
This function will check first whether the saved "profile_updated.csv" is available
If not available, the next function code block will be execute, then save it.
"""
try:
profile_updated = load_file('data/profile_updated.csv')
print("The profile_updated.csv file is available at local folder.")
except:
attributes_df = pd.DataFrame(index=generate_attributes(portfolio_df))
for profile_id in tqdm(transcript_clean_df.person.unique()):
subset_df = transcript_clean_df[transcript_clean_df.person == profile_id]
subset_attributes_series = get_attributes_series(subset_df, profile_id)
attributes_df[profile_id] = subset_attributes_series
#parse dates became_member_on in profile_df
profile_df = profile_parse_dates(profile_df)
#df concatenation
profile_updated = pd.concat([profile_df.set_index('id'),attributes_df.T ], axis=1, sort=False)
# re-encode selected features as they should be zero instead of NaN as they did not received any offer
profile_updated = feature_fillna(profile_updated)
# create new FEATURES
# add feature whether the customer made a valid or invalid transaction of offer completed
profile_updated = add_invalid_feature(profile_updated, transcript_all_df)
# add feature rate per portfolio type (bogo/discount/informational)
profile_updated = add_feature_rate_portfolio_type(profile_updated)
# add feature rate overall by event type (offer received, viewed, completed)
profile_updated = add_feature_rate_overall(profile_updated)
# add feature rate for individual portfolio id
profile_updated = add_feature_rate_portfolio_id(profile_updated, portfolio_df)
# add feature transaction to offer completed ratio
profile_updated = add_feature_transaction_completed_ratio(profile_updated)
#saving
profile_updated.to_csv('data/profile_updated.csv')
return profile_updated
"""
#######################
FEATURE PREPROCESSING
#######################
"""
def separate_profile(profile_updated_df):
"""
INPUT :
profile_updated_df : dataframe of profile
RETURN :
profile_updated_main : updated profile df for main profile, age < 100
profile_updated_sp : updated profile df for special profile, age >= 100
"""
# sparate data with age < 100 and age >= 100, missing value on gender and income
#main profile
profile_updated_main = profile_updated_df[profile_updated_df.age < 100]
#special profile
profile_updated_sp = profile_updated_df[profile_updated_df.age >= 100]
profile_updated_sp = profile_updated_sp.drop(['gender', 'income', 'age'], axis=1)
return profile_updated_main, profile_updated_sp
def encode_member_day(profile_updated_df):
"""
INPUT :
profile_updated_df : profile df
RETURN :
profile_updated_df : updated profile df, with additional col of 'member_year'
It calculate delta days 31 dec 2018 and became_member_on date
"""
profile_updated_df = profile_updated_df.copy()
profile_updated_df['member_days_since'] = (datetime.datetime(2018,12,31) - profile_updated_df.became_member_on).dt.days
profile_updated_df['member_year'] = profile_updated_df.became_member_on.dt.year.astype(str)
profile_updated_df = profile_updated_df.drop('became_member_on', axis=1)
return profile_updated_df
def feature_preprocessing(profile_updated_df, transcript_all_df, portfolio_df):
"""
INPUT :
profile_updated_df : updated profile df
transcript_all_df : transcript df that contains both invalid and valid profile as output of transcrip_preprocessing function
portfolio_df : portfolio df
RETURN :
profile_onehot_main : main profile df with one_hot enconded
profile_onehot_sp : sp profile df with one_hot enconded
"""
#drop features that have more than 50% missing values
col_null = profile_updated_df.isnull().sum()
col_null_frac = col_null / profile_updated_df.shape[0]
cols_to_drop = col_null_frac[col_null_frac > 0.5].index.tolist()
profile_updated_df = profile_updated_df.drop(cols_to_drop, axis=1)
# remove row data that have age > 100 years, missing values on income and gender
profile_updated_main, profile_updated_sp = separate_profile(profile_updated_df)
# re-encode became_member_on to member_day (how may days since become member from 31 dec 2018)
profile_updated_clean = encode_member_day(profile_updated_main)
profile_updated_sp = encode_member_day(profile_updated_sp)
# one-hot the categorical features
profile_onehot_main = pd.get_dummies(profile_updated_clean)
profile_onehot_sp = pd.get_dummies(profile_updated_sp)
return profile_onehot_main, profile_onehot_sp
"""
#######################
SAVE & LOAD MODEL
#######################
"""
def save(model, filename):
"""
This function is to save the sklearn object
INPUT :
model : sklearn object
filename : filepath to saved
RETURN : none
"""
import pickle
pickle.dump(model, open(filename,'wb'))
def load(filename):
"""
This function is to load the saved sklearn object
INPUT : filename : filepath
RETURN : loaded sklearn object
"""
import pickle
return pickle.load(open(filename, 'rb'))
"""
##################################
SPOT CHECK ML Supervised Alogrithm
ref : https://machinelearningmastery.com/spot-check-machine-learning-algorithms-in-python/
##################################
"""
import warnings
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import HuberRegressor
from sklearn.linear_model import Lars
from sklearn.linear_model import LassoLars
from sklearn.linear_model import PassiveAggressiveRegressor
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import TheilSenRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
# create a dict of standard models to evaluate {name:object}
def get_models(models=dict()):
# linear models
models['lr'] = LinearRegression()
alpha = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for a in alpha:
models['lasso-'+str(a)] = Lasso(alpha=a)
for a in alpha:
models['ridge-'+str(a)] = Ridge(alpha=a)
for a1 in alpha:
for a2 in alpha:
name = 'en-' + str(a1) + '-' + str(a2)
models[name] = ElasticNet(a1, a2)
models['huber'] = HuberRegressor()
models['lars'] = Lars()
models['llars'] = LassoLars()
models['pa'] = PassiveAggressiveRegressor(max_iter=1000, tol=1e-3)
models['ranscac'] = RANSACRegressor()
models['sgd'] = SGDRegressor(max_iter=1000, tol=1e-3)
models['theil'] = TheilSenRegressor()
# non-linear models
n_neighbors = range(1, 21)
for k in n_neighbors:
models['knn-'+str(k)] = KNeighborsRegressor(n_neighbors=k)
models['cart'] = DecisionTreeRegressor()
models['extra'] = ExtraTreeRegressor()
models['svml'] = SVR(kernel='linear')
models['svmp'] = SVR(kernel='poly')
c_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for c in c_values:
models['svmr'+str(c)] = SVR(C=c)
# ensemble models
n_trees = 100
models['ada'] = AdaBoostRegressor(n_estimators=n_trees)
models['bag'] = BaggingRegressor(n_estimators=n_trees)
models['rf'] = RandomForestRegressor(n_estimators=n_trees)
models['et'] = ExtraTreesRegressor(n_estimators=n_trees)
models['gbm'] = GradientBoostingRegressor(n_estimators=n_trees)
print('Defined %d models' % len(models))
return models
# create a dict of standard models to evaluate {name:object} for MultiOutputRegressor
def get_models_multioutput(models=dict()):
# linear models
models['lr'] = MultiOutputRegressor(LinearRegression())
alpha = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for a in alpha:
models['lasso-'+str(a)] = MultiOutputRegressor(Lasso(alpha=a))
for a in alpha:
models['ridge-'+str(a)] = MultiOutputRegressor(Ridge(alpha=a))
for a1 in alpha:
for a2 in alpha:
name = 'en-' + str(a1) + '-' + str(a2)
models[name] = MultiOutputRegressor(ElasticNet(a1, a2))
models['huber'] = MultiOutputRegressor(HuberRegressor())
models['lars'] = MultiOutputRegressor(Lars())
models['llars'] = MultiOutputRegressor(LassoLars())
models['pa'] = MultiOutputRegressor(PassiveAggressiveRegressor(max_iter=1000, tol=1e-3))
models['ranscac'] = MultiOutputRegressor(RANSACRegressor())
models['sgd'] = MultiOutputRegressor(SGDRegressor(max_iter=1000, tol=1e-3))
models['theil'] = MultiOutputRegressor(TheilSenRegressor())
# non-linear models
n_neighbors = range(1, 21)
for k in n_neighbors:
models['knn-'+str(k)] = MultiOutputRegressor(KNeighborsRegressor(n_neighbors=k))
models['cart'] = MultiOutputRegressor(DecisionTreeRegressor())
models['extra'] = MultiOutputRegressor(ExtraTreeRegressor())
models['svml'] = MultiOutputRegressor(SVR(kernel='linear'))
models['svmp'] = MultiOutputRegressor(SVR(kernel='poly'))
c_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for c in c_values:
models['svmr'+str(c)] = SVR(C=c)
# ensemble models
n_trees = 100
models['ada'] = MultiOutputRegressor(AdaBoostRegressor(n_estimators=n_trees))
models['bag'] = MultiOutputRegressor(BaggingRegressor(n_estimators=n_trees))
models['rf'] = MultiOutputRegressor(RandomForestRegressor(n_estimators=n_trees))
models['et'] = MultiOutputRegressor(ExtraTreesRegressor(n_estimators=n_trees))
models['gbm'] = MultiOutputRegressor(GradientBoostingRegressor(n_estimators=n_trees))
print('Defined %d models' % len(models))
return models
# create a feature preparation pipeline for a model
def make_pipeline(model):
steps = list()
# standardization
steps.append(('standardize', StandardScaler()))
# normalization
steps.append(('normalize', MinMaxScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# evaluate a single model
def evaluate_model(X, y, model, folds, metric):
# create the pipeline
pipeline = make_pipeline(model)
# evaluate model
scores = cross_val_score(pipeline, X, y, scoring=metric, cv=folds, n_jobs=-1)
return scores
# evaluate a model and try to trap errors and and hide warnings
def robust_evaluate_model(X, y, model, folds, metric):
scores = None
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
scores = evaluate_model(X, y, model, folds, metric)
except:
scores = None
return scores
# evaluate a dict of models {name:object}, returns {name:score}
def evaluate_models(X, y, models, folds=10, metric='accuracy'):
results = dict()
for name, model in models.items():
# evaluate the model
scores = robust_evaluate_model(X, y, model, folds, metric)
# show process
if scores is not None:
# store a result
results[name] = scores
mean_score, std_score = np.mean(scores), np.std(scores)
print('>%s: %.3f (+/-%.3f)' % (name, mean_score, std_score))
else:
print('>%s: error' % name)
return results
# print and plot the top n results
def summarize_results(results, maximize=True, top_n=10):
# check for no results
if len(results) == 0:
print('no results')
return
# determine how many results to summarize
n = min(top_n, len(results))
# create a list of (name, mean(scores)) tuples
mean_scores = [(k,np.mean(v)) for k,v in results.items()]
# sort tuples by mean score
mean_scores = sorted(mean_scores, key=lambda x: x[1])
# reverse for descending order (e.g. for accuracy)
if maximize:
mean_scores = list(reversed(mean_scores))
# retrieve the top n for summarization
names = [x[0] for x in mean_scores[:n]]
scores = [results[x[0]] for x in mean_scores[:n]]
# print the top n
print()
for i in range(n):
name = names[i]
mean_score, std_score = mean(results[name]), std(results[name])
print('Rank=%d, Name=%s, Score=%.3f (+/- %.3f)' % (i+1, name, mean_score, std_score))
# boxplot for the top n
pyplot.boxplot(scores, labels=names)
_, labels = pyplot.xticks()
pyplot.setp(labels, rotation=90)
#pyplot.savefig('spotcheck.png')
```
|
{
"source": "jeffrobots/gatech_ml",
"score": 2
}
|
#### File: assignment4/environments/cognitive_radio.py
```python
import sys
import numpy as np
from gym import utils
from gym.envs.toy_text import discrete
from six import StringIO
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
MAPS = {
"5x20": [
"BBOOOAOOCCCCCCCCOOOY",
"XBOOAAOOCCOOOOOOODDY",
"OBBBAAOOCCOOAOADDDDY",
"OOOBAAOOCCOAAAAADDDY",
"OOOOBOOOOOOOOOOODDDY",
],
"8x30": [
"BBOOOAOOCCCCCCCCOOOCCOCCOCCOCY",
"OBBOAAOOCCOOJJOOOAOOOAOOAOAOAY",
"OOBBAAOOCCOOJJOOOAOOOAOOAOAOAY",
"OOOBAAOOCCOOJJOOOAOOOAOOAOAOAY",
"XOOCBBOOOOOOBJOOOBBBOOODDODOOY",
"OOOCOBBBBOOOBJOOOBBOOOODODODDY",
"OOOCOOBBBOOOOJBOBBOOOOODDODOOY",
"OOOCOOOBOOOOOJBBOOOOOOODODODDY"
],
"10x40": [
"OOOOODOOODOOODOOODOOODOOODOOODOOODOOODOY",
"OOOOODOOODOOODOOODOOODOOODOOODOOODOOODOY",
"OOOOODOOODOOODOOODOOODOOODOOODOOODOOODOY",
"OJJJJJOOOOOOOJJJJJJJJOOOOOOOOJJJJJOOOOOY",
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY",
"OOAAAOOOAOOOAOOOAOOOAOOOAOOAOOOAOOOAOOOY",
"OBBOBBOBBOOOOBBBBOOOOOOOOOOOOOBBBOBBOOOY",
"XOOOOOOBBBBOOBBOOOOOOOCCCOOOOOCCCOOOCCCY",
"OCCCOOOOOOCCCCCCCOOOCCOOOCCCOOOOOCCCOOOY",
"OOOCCOOCCCOOOOOOOCCCOOOOOOOOOOOOOOOOOOOY"
]
}
class CognitiveRadio(discrete.DiscreteEnv):
"""
This is an extremely simplified problem that illustrates a case in which cognitive radio could be used to allow
an agent to communicate on a finite RF spectrum that has been divided into discrete channels (assuming no overlap and sufficient guard bands)
The map is formatted such that each column corresponds to a time slot in the discrete world (pretend it's 802.11 or whatever) and
each row corresponds to a discrete channel that the agent or any other transmitter in the scene could occupy.
The agent (a transmit/receive pair) must learn a series of channels that it can use in order to provide gapless communication.
The following restrictions are also placed on the agent:
- The agent can tune to any channel that is open BUT:
- Not needing to retune is a reward of (max num channels, so 5 in our example)
- Each distance tuned from there removes 1 from the reward.
# TODO maybe try to somehow favor gradual retuning? IE make this reward reduction exponential somehow?
- The agent MUST transmit (gaps are provided such that this is always possible) or else gameover. (enforced via large negative reward)
- The agent MUST move one and only one time slot forward in time (no time traveling)
- There is no reward associated with this, it is simply a physical limitation.
# MODES
- There are two modes, a simple TDMA strucutre where we assume that we can always find a slot and that penalties for retuning are linear AND
a more complex mode where the scene isnt so well behaves.
The complex mode has the following rules:
- We must transmit data for at least some percentage of the total time (say 75% of all time) or else we get a large negative reward at the end
- If we tune to a channel that is adjacent to one that is filled, there is some probability that we won't succeed in transmitting on that slot
-
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, desc=None, map_name="5x20", rewarding=True, step_reward=1, failure_reward=-100,
simple_tdma=False, collision_reward=-2, max_tune_dist=None):
if desc is None and map_name is None:
raise ValueError('Must provide either desc or map_name')
elif desc is None:
desc = MAPS[map_name]
self.desc = desc = np.asarray(desc, dtype='c')
self.nrow, self.ncol = nrow, ncol = desc.shape
self.reward_range = (0, nrow)
self.step_reward = step_reward
self.collision_reward = collision_reward
if max_tune_dist is None:
self.max_tune_dist = nrow
else:
self.max_tune_dist = max_tune_dist
self.simple_tdma = simple_tdma
self.failure_reward = failure_reward
self.other_transmits = 'ABCDEFGHIJ'
self.end_char = b'Y'
self.start_char = b'X'
self.jam_character = b'J'
self.adjacent_collision_prob = .15
self.out_of_band_tune_multiplier = 2
self.tune_distance_reward_ratio = .5
nA = nrow
nS = nrow * ncol
isd = np.array(desc == self.start_char).astype('float64').ravel()
isd /= isd.sum()
P = {s: {a: [] for a in range(nA)} for s in range(nS)}
def to_s(row, col):
return row * ncol + col
def tune(row, col, new_tune):
"""
Incrementer. Always advances column.
"""
row = new_tune
return (row, col+1)
for row in range(nrow):
for col in range(ncol):
s = to_s(row, col)
for a in range(self.nrow):
li = P[s][a]
letter = desc[row, col]
if letter in self.end_char or letter in self.jam_character:
# Game over
li.append((1.0, s, 0, True))
else:
# Tune and update reward
newrow, newcol = tune(row, col, a)
newstate = to_s(newrow, newcol)
new_letter = desc[newrow, newcol]
if self.simple_tdma:
rew, done = self.compute_reward_simple([newrow, newcol], [row, col], new_letter)
else:
rew, done = self.compute_reward_complex([newrow, newcol], [row, col], desc)
li.append((1.0, newstate, rew, done))
super(CognitiveRadio, self).__init__(nS, nA, P, isd)
def compute_reward_simple(self, new_pos, old_pos, new_letter):
"""
Compute reward from old position to new position.
Positions are in [row, col]
"""
new_row = new_pos[0]
old_row = old_pos[0]
if new_letter in b'OY':
rew = self.max_tune_dist - np.abs(new_row - old_row)
else:
rew = self.failure_reward
done = str(new_letter) in self.other_transmits or new_letter in self.end_char
return rew, done
def compute_reward_complex(self, new_pos, old_pos, spectrum):
"""
Compute reward from old position to new position.
Positions are in [row, col]
"""
new_letter = spectrum[new_pos[0], new_pos[1]]
new_row = new_pos[0]
old_row = old_pos[0]
total_reward = self.step_reward
# If the tile is open or the end, compute an inverse linear reward with tune distance.
if new_letter.astype(str) in "OY":
# If the distance is greater than the tune dist (tuning outside of max dist)
# Then we want to give a bigger negative reward that makes this expensive.
potential_new_reward = (self.max_tune_dist - np.abs(new_row - old_row)) * self.tune_distance_reward_ratio
if potential_new_reward < 0:
potential_new_reward *= self.out_of_band_tune_multiplier
# If the reward was open but has an occupied channel adjacent to it, compute prob. of collision.
row_above = np.clip(new_pos[0] - 1, 0, self.nrow - 1)
row_below = np.clip(new_pos[0] + 1, 0, self.nrow - 1)
above = spectrum[row_above, new_pos[1]].astype(str)
below = spectrum[row_below, new_pos[1]].astype(str)
num_adjacent = 0
if above in self.other_transmits:
num_adjacent += 1
if below in self.other_transmits:
num_adjacent += 1
if num_adjacent:
p_collide = num_adjacent * self.adjacent_collision_prob
# With some probability, perform a collision.
collision = np.random.choice([True, False], 1,
p=[p_collide, 1-p_collide])
if collision:
potential_new_reward += self.collision_reward * num_adjacent
total_reward += potential_new_reward
elif new_letter == self.jam_character:
total_reward += 0
else:
total_reward += self.collision_reward
done = str(new_letter) == str(self.end_char) or new_letter == self.jam_character
return total_reward, done
def render(self, mode='human'):
outfile = StringIO() if mode == 'ansi' else sys.stdout
row, col = self.s // self.ncol, self.s % self.ncol
desc = self.desc.tolist()
desc = [[c.decode('utf-8') for c in line] for line in desc]
desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True)
if self.lastaction is not None:
outfile.write(" ({})\n".format(["Left", "Down", "Right", "Up"][self.lastaction]))
else:
outfile.write("\n")
outfile.write("\n".join(''.join(line) for line in desc) + "\n")
if mode != 'human':
return outfile
def colors(self):
return {
b'A': 'purple',
b'B': 'skyblue',
b'C': 'yellow',
b'D': 'brown',
b'E': 'orange',
b'F': 'grey',
b'J': 'red',
b'O': 'white',
b'X': 'green',
b'Y': 'green',
}
def directions(self):
return {
-5: '5⬆',
-4: '4⬆',
-3: '3⬆',
-2: '2⬆',
-1: '1⬆',
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: '10'
}
def new_instance(self):
return CognitiveRadio(desc=self.desc,step_reward=self.step_reward, simple_tdma=self.simple_tdma,
failure_reward=self.failure_reward, collision_reward=self.collision_reward,
max_tune_dist=self.max_tune_dist)
```
|
{
"source": "JeffroMF/mars",
"score": 2
}
|
#### File: scheduling/worker/workerslot.py
```python
import asyncio
import time
from collections import namedtuple
from typing import Optional, Tuple
import psutil
from .... import oscar as mo
from ....oscar.backends.allocate_strategy import IdleLabel
DispatchDumpType = namedtuple('DispatchDumpType', 'free_slots')
class WorkerSlotManagerActor(mo.Actor):
def __init__(self):
self._cluster_api = None
self._global_slots_ref = None
async def __post_create__(self):
from ...cluster.api import ClusterAPI
from ..supervisor import GlobalSlotManagerActor
self._cluster_api = await ClusterAPI.create(self.address)
[self._global_slots_ref] = await self._cluster_api.get_supervisor_refs([
GlobalSlotManagerActor.default_uid()])
band_to_slots = await self._cluster_api.get_bands()
for band, n_slot in band_to_slots.items():
await mo.create_actor(
BandSlotManagerActor, band[1], n_slot, self._global_slots_ref,
uid=BandSlotManagerActor.gen_uid(band[1]),
address=self.address)
class BandSlotManagerActor(mo.Actor):
@classmethod
def gen_uid(cls, band_name: str):
return f'{band_name}_band_slot_manager'
def __init__(self, band_name: str, n_slots: int,
global_slots_ref: Optional[mo.ActorRef] = None):
super().__init__()
self._band_name = band_name
self._global_slots_ref = global_slots_ref
self._n_slots = n_slots
self._semaphore = asyncio.Semaphore(0)
self._slot_control_refs = dict()
self._free_slots = set()
self._slot_kill_events = dict()
self._slot_to_session_stid = dict()
self._slot_to_usage = dict()
self._last_report_time = time.time()
async def __post_create__(self):
strategy = IdleLabel(self._band_name, 'worker_slot_control')
for slot_id in range(self._n_slots):
self._slot_control_refs[slot_id] = await mo.create_actor(
BandSlotControlActor,
self.ref(), self._band_name, slot_id,
uid=BandSlotControlActor.gen_uid(self._band_name, slot_id),
address=self.address,
allocate_strategy=strategy)
async def acquire_free_slot(self, session_stid: Tuple[str, str]):
yield self._semaphore.acquire()
slot_id = self._free_slots.pop()
self._slot_to_session_stid[slot_id] = session_stid
raise mo.Return(slot_id)
def release_free_slot(self, slot_id):
if slot_id in self._slot_kill_events:
event = self._slot_kill_events.pop(slot_id)
event.set()
self._slot_to_session_stid.pop(slot_id, None)
self._free_slots.add(slot_id)
self._semaphore.release()
async def kill_slot(self, slot_id):
event = self._slot_kill_events[slot_id] = asyncio.Event()
await mo.kill_actor(self._slot_control_refs[slot_id])
return event.wait()
async def set_slot_usage(self, slot_id: int, usage: float):
self._slot_to_usage[slot_id] = usage
if self._global_slots_ref is None:
return
if time.time() - self._last_report_time >= 1.0:
self._last_report_time = time.time()
delays = []
for slot_id, (session_id, subtask_id) in self._slot_to_session_stid.items():
if slot_id not in self._slot_to_usage:
continue
delays.append(self._global_slots_ref.update_subtask_slots.delay(
self._band_name, session_id, subtask_id,
max(1.0, self._slot_to_usage[slot_id])))
return self._global_slots_ref.update_subtask_slots.batch(*delays)
def dump_data(self):
"""
Get all refs of slots of a queue
"""
return DispatchDumpType(self._free_slots)
class BandSlotControlActor(mo.Actor):
@classmethod
def gen_uid(cls, band_name: str, slot_id: int):
return f'{band_name}_{slot_id}_band_slot_control'
def __init__(self, manager_ref, band_name, slot_id: int):
self._manager_ref = manager_ref
self._band_name = band_name
self._slot_id = slot_id
self._report_task = None
async def __post_create__(self):
await self._manager_ref.release_free_slot.tell(self._slot_id)
async def report_usage():
proc = psutil.Process()
proc.cpu_percent()
while True:
await asyncio.sleep(1)
await self._manager_ref.set_slot_usage.tell(
self._slot_id, proc.cpu_percent())
self._report_task = asyncio.create_task(report_usage())
async def __pre_destroy__(self):
if self._report_task: # pragma: no branch
self._report_task.cancel()
```
|
{
"source": "JeffRous/ispc",
"score": 2
}
|
#### File: JeffRous/ispc/perf.py
```python
def print_file(line):
if options.output != "":
output = open(options.output, 'w')
output.writelines(line)
output.close()
def execute_test(commands):
r = 0
common.remove_if_exists(perf_temp+"_test")
common.remove_if_exists(perf_temp+"_ref")
for k in range(int(options.number)):
r = r + os.system(commands[0])
if options.ref:
r = r + os.system(commands[1])
return r
#gathers all tests results and made an item test from answer structure
def run_test(commands, c1, c2, test, test_ref, b_serial):
if execute_test(commands) != 0:
error("Execution fails of test %s\n" % test[0], 0)
global exit_code
exit_code = 1
return
print_debug("TEST COMPILER:\n", s, perf_log)
analyse_test(c1, c2, test, b_serial, perf_temp+"_test")
if options.ref:
print_debug("REFERENCE COMPILER:\n", s, perf_log)
analyse_test(c1, c2, test_ref, b_serial, perf_temp+"_ref")
def analyse_test(c1, c2, test, b_serial, perf_temp_n):
tasks = [] #list of results with tasks, it will be test[2]
ispc = [] #list of results without tasks, it will be test[1]
absolute_tasks = [] #list of absolute results with tasks, it will be test[4]
absolute_ispc = [] #list of absolute results without tasks, ut will be test[3]
serial = [] #list serial times, it will be test[5]
j = 1
for line in open(perf_temp_n): # we take test output
if "speedup" in line: # we are interested only in lines with speedup
if j == c1: # we are interested only in lines with c1 numbers
line = line.expandtabs(0)
line = line.replace("("," ")
line = line.split(",")
for i in range(len(line)):
subline = line[i].split(" ")
number = float(subline[1][:-1])
if "speedup from ISPC + tasks" in line[i]:
tasks.append(number)
else:
ispc.append(number)
c1 = c1 + c2
j+=1
if "million cycles" in line:
if j == c1:
if line[0] == '@':
print_debug(line, True, perf_log)
else:
line = line.replace("]","[")
line = line.split("[")
number = float(line[3])
if "tasks" in line[1]:
absolute_tasks.append(number)
else:
if "ispc" in line[1]:
absolute_ispc.append(number)
if "serial" in line[1]:
serial.append(number)
if len(ispc) != 0:
if len(tasks) != 0:
print_debug("ISPC speedup / ISPC + tasks speedup / ISPC time / ISPC + tasks time / serial time\n", s, perf_log)
for i in range(0,len(serial)):
print_debug("%10s /\t%10s\t /%9s / %10s\t /%10s\n" %
(ispc[i], tasks[i], absolute_ispc[i], absolute_tasks[i], serial[i]), s, perf_log)
else:
print_debug("ISPC speedup / ISPC time / serial time\n", s, perf_log)
for i in range(0,len(serial)):
print_debug("%10s /%9s /%10s\n" % (ispc[i], absolute_ispc[i], serial[i]), s, perf_log)
else:
if len(tasks) != 0:
print_debug("ISPC + tasks speedup / ISPC + tasks time / serial time\n", s, perf_log)
for i in range(0,len(serial)):
print_debug("%10s\t / %10s\t /%10s\n" % (tasks[i], absolute_tasks[i], serial[i]), s, perf_log)
test[1] = test[1] + ispc
test[2] = test[2] + tasks
test[3] = test[3] + absolute_ispc
test[4] = test[4] + absolute_tasks
if b_serial == True:
#if we concatenate outputs we should use only the first serial answer.
test[5] = test[5] + serial
def cpu_get():
p = open("/proc/stat", 'r')
cpu = p.readline()
p.close()
cpu = cpu.split(" ")
cpu_usage = (int(cpu[2]) + int(cpu[3]) + int(cpu[4]))
cpu_all = cpu_usage + int(cpu[5])
return [cpu_usage, cpu_all]
#returns cpu_usage
def cpu_check():
if is_windows == False:
if is_mac == False:
cpu1 = cpu_get()
time.sleep(1)
cpu2 = cpu_get()
cpu_percent = (float(cpu1[0] - cpu2[0])/float(cpu1[1] - cpu2[1]))*100
else:
os.system("sysctl -n vm.loadavg > cpu_temp")
c = open("cpu_temp", 'r')
c_line = c.readline()
c.close
os.remove("cpu_temp")
R = c_line.split(' ')
cpu_percent = float(R[1]) * 3
else:
os.system("wmic cpu get loadpercentage /value > cpu_temp")
c = open("cpu_temp", 'r')
c_lines = c.readlines()
c.close()
os.remove("cpu_temp")
t = "0"
for i in c_lines[2]:
if i.isdigit():
t = t + i
cpu_percent = int(t)
return cpu_percent
#returns geomean of list
def geomean(par):
temp = 1
l = len(par)
for i in range(l):
temp = temp * par[i]
if l != 0:
temp = temp ** (1.0/l)
else:
temp = 0
return round(temp, 2)
#takes an answer struct and print it.
#answer struct: list answer contains lists test
#test[0] - name of test
#test[1] - list of results without tasks
#test[2] - list of results with tasks
#test[3] - list of absolute results without tasks
#test[4] - list of absolute results with tasks
#test[5] - list of absolute time without ISPC (serial)
#test[1..4] may be empty
def print_answer(answer, target_number):
filelist = []
print_debug("--------------------------------------------------------------------------\n", s, perf_log)
print_debug("test name:\t ISPC speedup: ISPC + tasks speedup: | " +
" ISPC time: ISPC + tasks time: serial:\n", s, perf_log)
if target_number > 1:
if options.output == "":
options.output = "targets.csv"
filelist.append("test name,ISPC speedup" + "," * target_number + "ISPC + tasks speedup\n")
filelist.append("," + options.perf_target + "," + options.perf_target + "\n")
else:
filelist.append("test name,ISPC speedup,diff," +
"ISPC + tasks speedup,diff,ISPC time,diff,ISPC + tasks time,diff,serial,diff\n")
max_t = [0,0,0,0,0]
diff_t = [0,0,0,0,0]
geomean_t = []
list_of_max = []
for i1 in range(target_number):
geomean_t.append([0,0,0,0,0])
list_of_max.append([[],[],[],[],[]])
list_of_compare = [[],[],[],[],[],[]]
target_k = 0
temp_str_1 = ""
temp_str_2 = ""
for i in range(len(answer)):
list_of_compare[0].append(answer[i][0])
for t in range(1,6):
if len(answer[i][t]) == 0:
max_t[t-1] = "n/a"
diff_t[t-1] = "n/a"
list_of_compare[t].append(0);
else:
if t < 3:
mm = max(answer[i][t])
else:
mm = min(answer[i][t])
list_of_compare[t].append(mm)
max_t[t-1] = '%.2f' % mm
list_of_max[i % target_number][t-1].append(mm)
diff_t[t-1] = '%.2f' % (max(answer[i][t]) - min(answer[i][t]))
print_debug("%s:\n" % answer[i][0], s, perf_log)
print_debug("\t\tmax:\t%5s\t\t%10s\t|min:%10s\t%10s\t%10s\n" %
(max_t[0], max_t[1], max_t[2], max_t[3], max_t[4]), s, perf_log)
print_debug("\t\tdiff:\t%5s\t\t%10s\t|%14s\t%10s\t%10s\n" %
(diff_t[0], diff_t[1], diff_t[2], diff_t[3], diff_t[4]), s, perf_log)
for t in range(0,5):
if max_t[t] == "n/a":
max_t[t] = ""
if diff_t[t] == "n/a":
diff_t[t] = ""
if target_number > 1:
if target_k == 0:
temp_str_1 = answer[i][0] + ","
temp_str_2 = ""
temp_str_1 += max_t[0] + ","
temp_str_2 += max_t[1] + ","
target_k = target_k + 1
if target_k == target_number:
filelist.append(temp_str_1 + temp_str_2[:-1] + "\n")
target_k = 0
else:
filelist.append(answer[i][0] + "," +
max_t[0] + "," + diff_t[0] + "," + max_t[1] + "," + diff_t[1] + "," +
max_t[2] + "," + diff_t[2] + "," + max_t[3] + "," + diff_t[3] + "," +
max_t[4] + "," + diff_t[4] + "\n")
for i in range(0,5):
for i1 in range(target_number):
geomean_t[i1][i] = geomean(list_of_max[i1][i])
print_debug("---------------------------------------------------------------------------------\n", s, perf_log)
print_debug("Geomean:\t\t%5s\t\t%10s\t|%14s\t%10s\t%10s\n" %
(geomean_t[0][0], geomean_t[0][1], geomean_t[0][2], geomean_t[0][3], geomean_t[0][4]), s, perf_log)
if target_number > 1:
temp_str_1 = "Geomean,"
temp_str_2 = ""
for i in range(target_number):
temp_str_1 += str(geomean_t[i][0]) + ","
temp_str_2 += str(geomean_t[i][1]) + ","
filelist.append(temp_str_1 + temp_str_2[:-1] + "\n")
else:
filelist.append("Geomean," + str(geomean_t[0][0]) + ",," + str(geomean_t[0][1])
+ ",," + str(geomean_t[0][2]) + ",," + str(geomean_t[0][3]) + ",," + str(geomean_t[0][4]) + "\n")
print_file(filelist)
return list_of_compare
def compare(A, B):
print_debug("\n\n_____________________PERFORMANCE REPORT____________________________\n", False, "")
print_debug("test name: ISPC time: ISPC time ref: %:\n", False, "")
for i in range(0,len(A[0])):
if B[3][i] == 0:
p1 = 0
else:
p1 = 100 - 100 * A[3][i]/B[3][i]
print_debug("%21s: %10.2f %10.2f %10.2f" % (A[0][i], A[3][i], B[3][i], abs(p1)), False, "")
if p1 < -1:
print_debug(" <+", False, "")
if p1 > 1:
print_debug(" <-", False, "")
print_debug("\n", False, "")
print_debug("\n", False, "")
print_debug("test name: TASKS time: TASKS time ref: %:\n", False, "")
for i in range(0,len(A[0])):
if B[4][i] == 0:
p2 = 0
else:
p2 = 100 - 100 * A[4][i]/B[4][i]
print_debug("%21s: %10.2f %10.2f %10.2f" % (A[0][i], A[4][i], B[4][i], abs(p2)), False, "")
if p2 < -1:
print_debug(" <+", False, "")
if p2 > 1:
print_debug(" <-", False, "")
print_debug("\n", False, "")
if "performance.log" in options.in_file:
print_debug("\n\n_________________Watch performance.log for details________________\n", False, "")
else:
print_debug("\n\n__________________________________________________________________\n", False, "")
def perf(options1, args):
global options
options = options1
global s
s = options.silent
# save current OS
global is_windows
is_windows = (platform.system() == 'Windows' or
'CYGWIN_NT' in platform.system())
global is_mac
is_mac = (platform.system() == 'Darwin')
# save current path
pwd = os.getcwd()
pwd = pwd + os.sep
pwd1 = pwd
if is_windows:
pwd1 = "..\\..\\"
if options.perf_target != "":
test_only_r = " sse2-i32x4 sse2-i32x8 sse4-i32x4 sse4-i32x8 sse4-i16x8 \
sse4-i8x16 avx1-i32x4 avx1-i32x8 avx1-i32x16 avx1-i64x4 avx1.1-i32x8 \
avx1.1-i32x16 avx1.1-i64x4 avx2-i32x8 avx2-i32x16 avx2-i64x4 \
avx512knl-i32x16 avx512skx-i32x16 "
test_only = options.perf_target.split(",")
for iterator in test_only:
if not (" " + iterator + " " in test_only_r):
error("unknow option for target: " + iterator, 1)
# check if cpu usage is low now
cpu_percent = cpu_check()
if cpu_percent > 20:
error("CPU Usage is very high.\nClose other applications.\n", 2)
# prepare build.log, perf_temp and perf.log files
global perf_log
if options.in_file:
perf_log = pwd + options.in_file
common.remove_if_exists(perf_log)
else:
perf_log = ""
global build_log
build_log = pwd + os.sep + "logs" + os.sep + "perf_build.log"
common.remove_if_exists(build_log)
if os.path.exists(pwd + os.sep + "logs") == False:
os.makedirs(pwd + os.sep + "logs")
global perf_temp
perf_temp = pwd + "perf_temp"
global ispc_test
global ispc_ref
global ref_compiler
global refc_compiler
# check that required compilers exist
PATH_dir = string.split(os.getenv("PATH"), os.pathsep)
ispc_test_exists = False
ispc_ref_exists = False
ref_compiler_exists = False
if is_windows == False:
ispc_test = "ispc"
ref_compiler = "clang++"
refc_compiler = "clang"
if options.compiler != "":
if options.compiler == "clang" or options.compiler == "clang++":
ref_compiler = "clang++"
refc_compiler = "clang"
if options.compiler == "icc" or options.compiler == "icpc":
ref_compiler = "icpc"
refc_compiler = "icc"
if options.compiler == "gcc" or options.compiler == "g++":
ref_compiler = "g++"
refc_compiler = "gcc"
else:
ispc_test = "ispc.exe"
ref_compiler = "cl.exe"
ispc_ref = options.ref
if options.ref != "":
options.ref = True
if os.environ.get("ISPC_HOME") != None:
if os.path.exists(os.environ["ISPC_HOME"] + os.sep + ispc_test):
ispc_test_exists = True
ispc_test = os.environ["ISPC_HOME"] + os.sep + ispc_test
for counter in PATH_dir:
if ispc_test_exists == False:
if os.path.exists(counter + os.sep + ispc_test):
ispc_test_exists = True
ispc_test = counter + os.sep + ispc_test
if os.path.exists(counter + os.sep + ref_compiler):
ref_compiler_exists = True
if ispc_ref != "":
if os.path.exists(counter + os.sep + ispc_ref):
ispc_ref_exists = True
ispc_ref = counter + os.sep + ispc_ref
if not ispc_test_exists:
error("ISPC compiler not found.\nAdded path to ispc compiler to your PATH variable or ISPC_HOME variable\n", 1)
if not ref_compiler_exists:
error("C/C++ compiler %s not found.\nAdded path to %s compiler to your PATH variable.\n" % (ref_compiler, ref_compiler), 1)
if options.ref:
if not ispc_ref_exists:
error("ISPC reference compiler not found.\nAdded path to ispc reference compiler to your PATH variable.\n", 1)
# checks that config file exists
path_config = os.path.normpath(options.config)
if os.path.exists(path_config) == False:
error("config file not found: %s.\nSet path to your config file in --config.\n" % options.config, 1)
sys.exit()
# read lines from config file except comments
f = open(path_config, 'r')
f_lines = f.readlines()
f.close()
lines =[]
for i in range(len(f_lines)):
if f_lines[i][0] != "%":
lines.append(f_lines[i])
length = len(lines)
# end of preparations
print_debug("Okey go go go!\n\n", s, perf_log)
# report command line
if __name__ == "__main__":
print_debug("Command line: %s\n" % " ".join(map(str, sys.argv)), s, perf_log)
# report used ispc
print_debug("Testing ispc: " + ispc_test + "\n", s, perf_log)
#print compilers versions
common.print_version(ispc_test, ispc_ref, ref_compiler, False, perf_log, is_windows)
# begin
i = 0
answer = []
answer_ref = []
# loop for all tests
perf_targets = [""]
target_number = 1
target_str_temp = ""
if options.perf_target != "":
perf_targets = options.perf_target.split(',')
target_str_temp = " -DISPC_IA_TARGETS="
target_number = len(perf_targets)
# Generate build targets for tests
if options.generator:
generator = options.generator
else:
if is_windows == True:
generator = "Visual Studio 14 Win64"
else:
generator = "Unix Makefiles"
examples_folder_ref = "examples_ref"
examples_folder_test = "examples_test"
install_prefix = "install"
cmake_command = "cmake -G " + "\"" + generator + "\"" + " -DCMAKE_INSTALL_PREFIX=" + install_prefix + " " + pwd + "examples"
if is_windows == False:
cmake_command += " -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang"
for target_i in range(target_number):
cur_target = perf_targets[target_i]
target_str = target_str_temp + cur_target
if options.ref:
build_folder = examples_folder_ref + os.sep + cur_target
if os.path.exists(build_folder):
shutil.rmtree(build_folder)
os.makedirs(build_folder)
cmake_command_ref = "cd " + build_folder + " && " + cmake_command + \
" -DISPC_EXECUTABLE=" + ispc_ref + target_str + " >> " + build_log
if os.system(cmake_command_ref) != 0:
error("Cmake command failed with reference compiler %s\n" % ispc_ref, 1)
# Build and install tests for reference compiler
if is_windows == False:
bu_command_ref = "cd " + build_folder + " && make install >> "+ build_log+" 2>> "+ build_log
else:
bu_command_ref = "msbuild " + build_folder + os.sep + "INSTALL.vcxproj /V:m /p:Configuration=Release /t:rebuild >> " + build_log
if os.system(bu_command_ref) != 0:
error("Build failed with reference compiler %s\n" % ispc_ref, 1)
build_folder = examples_folder_test + os.sep + cur_target
if os.path.exists(build_folder):
shutil.rmtree(build_folder)
os.makedirs(build_folder)
cmake_command_test = "cd " + build_folder + " && " + cmake_command + \
" -DISPC_EXECUTABLE=" + ispc_test + target_str + " >> " + build_log
if os.system(cmake_command_test) != 0:
error("Cmake command failed with test compiler %s\n" % ispc_test, 1)
# Build and install tests for test compiler
if is_windows == False:
bu_command_test = "cd " + build_folder + " && make install >> "+ build_log+" 2>> "+ build_log
else:
bu_command_test = "msbuild " + build_folder + os.sep + "INSTALL.vcxproj /V:m /p:Configuration=Release /t:rebuild >> " + build_log
if os.system(bu_command_test) != 0:
error("Build failed with test compiler %s\n" % ispc_test, 1)
# Run tests
while i < length-2:
# we read name of test
print_debug("%s" % lines[i], s, perf_log)
# read location of test
folder = lines[i+1]
folder = folder[:-1]
example = folder
# read parameters of test
command = lines[i+2]
command = command[:-1]
temp = 0
# execute test for each target
for target_i in range(target_number):
test = [lines[i][:-1],[],[],[],[],[]]
test_ref = [lines[i][:-1],[],[],[],[],[]]
cur_target = perf_targets[target_i]
folder = os.path.normpath(options.path + os.sep + examples_folder_test + os.sep + cur_target + \
os.sep + install_prefix + os.sep + "examples" + os.sep + example)
folder_ref = os.path.normpath(options.path + os.sep + examples_folder_ref + os.sep + cur_target + \
os.sep + install_prefix + os.sep + "examples" + os.sep + example)
# check that test exists
if os.path.exists(folder) == False:
error("Can't find test %s. Your path is: \"%s\".\nChange current location to ISPC_HOME or set path to ISPC_HOME in --path.\n" %
(lines[i][:-1], options.path), 1)
if is_windows == False:
ex_command_ref = "cd "+ folder_ref + " && ./" + example + " " + command + " >> " + perf_temp + "_ref"
ex_command = "cd "+ folder + " && ./" + example + " " + command + " >> " + perf_temp + "_test"
else:
ex_command_ref = "cd "+ folder_ref + " && " + example + ".exe " + command + " >> " + perf_temp + "_ref"
ex_command = "cd "+ folder + " && " + example + ".exe " + command + " >> " + perf_temp + "_test"
commands = [ex_command, ex_command_ref]
# parsing config parameters
next_line = lines[i+3]
if next_line[0] == "!": # we should take only one part of test output
R = next_line.split(' ')
c1 = int(R[1]) #c1 is a number of string which we want to use in test output
c2 = int(R[2]) #c2 is total number of strings in test output
temp = 1
else:
c1 = 1
c2 = 1
next_line = lines[i+3]
if next_line[0] == "^":
temp = 1
if next_line[0] == "^" and target_number == 1: #we should concatenate result of this test with previous one
run_test(commands, c1, c2, answer[len(answer)-1], answer_ref[len(answer)-1], False)
else: #we run this test and append it's result to answer structure
run_test(commands, c1, c2, test, test_ref, True)
answer.append(test)
answer_ref.append(test_ref)
i = i + temp
# preparing next loop iteration
i+=4
# delete temp file
common.remove_if_exists(perf_temp+"_test")
common.remove_if_exists(perf_temp+"_ref")
#print collected answer
if target_number > 1:
s = True
print_debug("\n\nTEST COMPILER:\n", s, perf_log)
A = print_answer(answer, target_number)
if options.ref != "":
print_debug("\n\nREFERENCE COMPILER:\n", s, perf_log)
B = print_answer(answer_ref, target_number)
# print perf report
compare(A,B)
###Main###
from optparse import OptionParser
import sys
import os
import operator
import time
import glob
import string
import platform
import shutil
# our functions
import common
print_debug = common.print_debug
error = common.error
exit_code = 0
if __name__ == "__main__":
# parsing options
parser = OptionParser()
parser.add_option('-n', '--number', dest='number',
help='number of repeats', default="3")
parser.add_option('-c', '--config', dest='config',
help='config file of tests', default="./perf.ini")
parser.add_option('-p', '--path', dest='path',
help='path to ispc root', default=".")
parser.add_option('-s', '--silent', dest='silent',
help='silent mode, only table output', default=False, action="store_true")
parser.add_option('-o', '--output', dest='output',
help='output file for script reading', default="")
parser.add_option('--compiler', dest='compiler',
help='C/C++ compiler', default="")
parser.add_option('-r', '--ref', dest='ref',
help='set reference compiler for compare', default="")
parser.add_option('-f', '--file', dest='in_file',
help='file to save perf output', default="")
parser.add_option('-t', '--target', dest='perf_target',
help='set ispc target for building benchmarks (both test and ref)', default="")
parser.add_option('-g', '--generator', dest='generator',
help='cmake generator')
(options, args) = parser.parse_args()
perf(options, args)
exit(exit_code)
```
|
{
"source": "Jeffrowetull/Twitoff",
"score": 3
}
|
#### File: Twitoff/twitoff/predict.py
```python
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import BASILICA
def predict_user(user1_name, user2_name, tweet_text):
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_embeddings = np.array([tweet.embedding for tweet in user1.tweets])
user2_embeddings = np.array([tweet.embedding for tweet in user2.tweets])
embeddings = np.vstack([user1_embeddings, user2_embeddings])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(embeddings, labels)
tweet_embedding = BASILICA.embed_sentence(tweet_text, model='twitter')
return log_reg.predict(np.array(tweet_embedding).reshape(1, -1))
```
|
{
"source": "jeffrutledge/dynamic_desktop_background",
"score": 3
}
|
#### File: jeffrutledge/dynamic_desktop_background/weather_requester.py
```python
from abc import ABC, abstractmethod
import json
class CachedWeatherRequester(ABC):
@staticmethod
@abstractmethod
def _weather_to_desktop_bg_image_name():
pass
def __init__(self, api_key, cache_path, stale_time):
self._api_key = api_key
self._cache_path = cache_path
self.stale_time = stale_time
@abstractmethod
def cache_age(self):
pass
def get_cached_weather(self):
with open(self._cache_path, 'r') as cache:
return json.load(cache)
def set_cached_weather(self, weather):
with open(weather_json_path, 'w') as outfile:
json.dump(weather, self._cache_path)
@abstractmethod
def _request_weather(self):
pass
def get_weather(self):
if self.cache_age() < self.stale_time:
return self.get_cached_weather()
else:
weather = self._request_weather()
self.set_cached_weather(weather)
return weather
def get_current_desktop_bg_image_name(self):
weather = self.get_weather()
return self._weather_to_desktop_bg_image_name(weather)
class WundergroundWeatherRequester(CachedWeatherRequester):
DESKTOP_BG_NAME_FROM_ICON = {
'chanceflurries': 'snow.png',
'chancerain': 'rain.png',
'chancesleet': 'snow.png',
'chancesnow': 'snow.png',
'chancetstorms': 'rain.png',
'clear': 'clear.png',
'cloudy': 'full_clouds.png',
'flurries': 'snow.png',
'fog': 'clear.png',
'hazy': 'clear.png',
'mostlycloudy': 'full_clouds.png',
'mostlysunny': 'partly_sunny.png',
'partlycloudy': 'partly_cloudy.png',
'partlysunny': 'partly_sunny.png',
'rain': 'rain.png',
'sleet': 'sleet.png',
'snow': 'snow.png',
'sunny': 'sun.png',
'tstorms': 'rain.png'
}
def _weather_to_desktop_bg_image_name(weather):
pass
def cache_age(self):
with open(weather_json_path, 'r') as infile:
weather_json = json.load(infile)
# Check if json file is cached observation
if 'current_observation' in weather_json:
# Check if stale
last_update_time_str = weather_json['current_observation'][
'local_time_rfc822']
last_update_time = datetime.datetime.strptime(
last_update_time_str, '%a, %d %b %Y %H:%M:%S %z')
# Remove timezone, because datetime.now has no timezone
last_update_time = last_update_time.replace(tzinfo=None)
need_to_update_weather = (datetime.datetime.now() -
last_update_time > stale_time)
def _request_weather(self):
url_base = 'http://api.wunderground.com/api'
url_params = 'conditions/q/autoip.json'
request_url = '/'.join((url_base,
self._api_key,
url_params))
weather_json = requests.get(request_url).json()
with open(weather_json_path, 'w') as outfile:
json.dump(weather_json, outfile)
return weather_json
```
|
{
"source": "jeffrutledge/gitusr",
"score": 2
}
|
#### File: jeffrutledge/gitusr/setup.py
```python
import os
import stat
# Path to write shell script to
SCRIPT_PATH = "/usr/local/bin/gitusr"
#####################################################
############# Parts of the Script that do not change
#####################################################
SCRIPT_PART1 =\
"""#!/usr/bin/env bash
#
# The gitusr shell script is made for managing multiple git hub accounts.
# Type gitusr -h in the shell for a help message.
# This file may be reconfigure by rerunning the python setup script.
#
# More information can be found at: https://github.com/jeffrutledge/gitusr
while getopts ':gh"""
##################################
# getopts argument flags
##################################
SCRIPT_PART2 ="""' flag; do
case "$flag" in
g) globalFlag=true ;;"""
##################################
# email flags
##################################
SCRIPT_PART3 ="""
h) # Display Help text
echo "gitusr | Check or Set Git Email"
echo ""
echo "Usage: gitusr [options] Display git email"
echo " or: gitusr [options] [email] Set git email"
echo ""
echo "options:"
echo " -h Display this help message"
echo " -g Set global email"
echo " (Otherwise set email of current repository)"
echo ""
echo "email:" """
##################################
# help message flag descriptions
##################################
SCRIPT_PART4 ="""
echo ""
echo "More information can be found at: https://github.com/jeffrutledge/gitusr"
exit ;;
\?) error "Unexpected option $flag" ;;
esac
done
if [ "$globalFlag" = true ] ; then
git config --global user.email $email
else
git config user.email $email
fi"""
#####################################################
############# End Script Strings
#####################################################
def setup():
"""
Sets up the gitusr shell script
Configures the users emails and corresponding flags.
Then writes the shell script to FOLDER_PATH
"""
emailFlagTuples = requestSetupConfiguration()
scriptText = generateScriptText(emailFlagTuples)
writeExecutableScript(scriptText)
def requestSetupConfiguration():
"""
Requests user input for flags and emails,
then returns them as a list of tuples.
"""
print(\
' gitusr Setup\n'\
+ '----------------------------------------------------------------------\n'
+ 'You will choose which emails you would like to be able to switch to.\n'\
+ 'Each email will have a one letter character flag.\n'\
+ '\n'\
+ 'For example, your work gitHub email might use w.\n'\
+ 'Then you could switch to this email with the command $ gitusr -w\n'\
+ '\n'\
+ 'If you ever forget your flags use -h for a help message.\n'\
+ 'To change your emails, simply run this script again.\n'
+ '----------------------------------------------------------------------')
emailInput = '' # Temporarily stores user input for an email
flagInput = '' # Temporarily stores user input for a flag
emailFlagTuples = [] # Stores all Email and flag pairs, is returned
# Ask for email and flag pairs until user inputs 'done' as an email
while True:
emailInput = requestEmail()
if emailInput == 'done':
break
flagInput = requestFlag(emailInput)
emailFlagTuples.append((emailInput, flagInput))
return emailFlagTuples
def requestEmail():
"""
Request an email input.
"""
printBreak()
emailInput = input('Input an email you would like to use (or input done if you are done):\n')
return emailInput
def requestFlag(emailForFlag):
"""
Request a flag input.
"""
printBreak()
flagInput = input('Input the flag you would like to use for [{}]:'.format(emailForFlag)\
+'\n (the flag must be one alpha character '\
+'which is not h, g or already used)\n')
# Make sure the flag is valid
# one alpha character
# not used already, or h or g
while not isValidFlag(flagInput):
printBreak()
print('That was not a valid flag. \n'\
+'A valid flag must be one alpha character that is not h or g,\n'\
+'or not already be used.\n')
flagInput = input('Input the flag you would like to use for [{}]:\n'.format(emailForFlag))
return flagInput
def isValidFlag(flag, usedFlags = ['h', 'g']):
"""
Takes a string and checks to make sure it is a valid flag.
A valid flag is one alpha character and is not used already.
If flag is valid adds it to used flags list.
By defualt h and g are used flags.
"""
if flag.isalpha and len(flag) == 1 and flag not in usedFlags:
usedFlags.append(flag) # Add flag to used flags
return True
return False
def printBreak():
"""
Prints a break to help the user read input requests.
"""
print('\n\n----------------------------------------------------------------------\n\n')
def generateScriptText(emailFlagTuples):
"""
Generates the final bash script text,
using the given emails and flags.
"""
getoptsFlagsString = generateGetoptsFlags(emailFlagTuples)
emailFlagString = generateEmailFlags(emailFlagTuples)
helpMessageString = generateHelpMessage(emailFlagTuples)
#Assemble the full script string
scriptString = SCRIPT_PART1
scriptString += getoptsFlagsString
scriptString += SCRIPT_PART2
scriptString += emailFlagString
scriptString += SCRIPT_PART3
scriptString += helpMessageString
scriptString += SCRIPT_PART4
return scriptString
def generateGetoptsFlags(emailFlagTuples):
"""
Generates the part of the getopts argument,
which declares the flags to look for.
Uses flags in emailFlagTuples.
"""
flagsString = ''
for emailFlagTuple in emailFlagTuples:
flagsString += emailFlagTuple[1]
return flagsString
def generateEmailFlags(emailFlagTuples):
"""
Takes emailFlagTuples and generates the part of the
script case statement which accepts email flags.
"""
emailFlagString = ""
for emailFlagTuple in emailFlagTuples:
#add a new line and indent
emailFlagString += "\n "
#add flag character
emailFlagString += emailFlagTuple[1]
#close case condition and setup email variable
emailFlagString += r''') email="\"'''
#input email address
emailFlagString += emailFlagTuple[0]
#end case statement
emailFlagString += r'''\"" ;;'''
return emailFlagString
def generateHelpMessage(emailFlagTuples):
"""
Takes emailFlagTuples and generates the part of the
script that echos the help message.
"""
helpMessageString = ""
for emailFlagTuple in emailFlagTuples:
#add a new line and indent
helpMessageString += '''\n echo " -'''
#add flag
helpMessageString += emailFlagTuple[1]
#add spacing
helpMessageString += " Set email to: "
#add email
helpMessageString += emailFlagTuple[0]
#close echo quotes
helpMessageString += '''"'''
return helpMessageString
def writeExecutableScript(scriptText):
"""
Writes and makes executable a script from the given string.
Writes to SCRIPT_PATH.
"""
scriptFile = open(SCRIPT_PATH, 'w')
scriptFile.write(scriptText)
# Make the script executable
st = os.stat(SCRIPT_PATH)
os.chmod(SCRIPT_PATH, st.st_mode | stat.S_IEXEC)
#call setup() on file open
if __name__ == '__main__':
setup()
```
|
{
"source": "jeffry1829/mcweb",
"score": 2
}
|
#### File: mcstats/stats/kill_mobs.py
```python
from mcstats import mcstats
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_any',
{
'title': '擊殺領域!',
'desc': '總共殺過的怪物數量',
'unit': 'int',
},
mcstats.StatReader(['minecraft:custom','minecraft:mob_kills'])
))
def create_kill_stat(mobId, title, mobText, minVersion = 0):
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_' + mobId,
{
'title': title,
'desc': '總共殺過的' + mobText + '數量',
'unit': 'int',
},
mcstats.StatReader(['minecraft:killed','minecraft:' + mobId]),
minVersion
))
# Hostiles
create_kill_stat('blaze','滅火器','烈焰神')
create_kill_stat('creeper','苦力怕主宰','苦力怕')
create_kill_stat('endermite','滅蟎專家','終界蟎')
create_kill_stat('ender_dragon','屠龍者','終界龍')
create_kill_stat('ghast','眼淚收集者','地獄幽靈')
create_kill_stat('magma_cube','岩漿冰淇淋','地獄史萊姆')
create_kill_stat('phantom','夜魅射手','夜魅',1467) # added in 18w07a
# Note: Ravagers had been added as Illager Beats in 18w43a (1901)
# support for that snapshot may be added on demand
create_kill_stat('ravager','突襲!','劫毀獸',1930) # changed in 19w05a
create_kill_stat('shulker','開蚌專家','界伏蚌')
create_kill_stat('silverfish','可惡的小東西...','蠹魚')
create_kill_stat('slime','好黏!','史萊姆')
create_kill_stat('vex','惱鬼獵手','惱鬼')
create_kill_stat('witch','女巫獵人','女巫')
create_kill_stat('wither_skeleton','凋零骷髏獵手','凋零骷髏')
# Neutrals
create_kill_stat('bee','嗡嗡嗡','蜜蜂',2200) # added in 19w34a
create_kill_stat('dolphin','海豚獵手','海豚',1482) # added in 18w15a
create_kill_stat('enderman','安德終結者','終界使者')
create_kill_stat('iron_golem','守衛系統下線!','鐵傀儡')
create_kill_stat('panda','功夫熊貓','熊貓',1901) # added in 18w43a
create_kill_stat('polar_bear','北極熊盜獵者','北極熊')
create_kill_stat('snow_golem','反對雪地!','雪人')
create_kill_stat('zombie_pigman','地獄群架','地獄屍人')
# Passives
create_kill_stat('bat','蝙蝠獵手','蝙蝠')
create_kill_stat('chicken','烤雞手','雞')
create_kill_stat('cow','屠牛夫','牛')
create_kill_stat('horse','馬屠手','馬')
create_kill_stat('fox','フブキ フブキ フブキ!','狐狸',1932) # added in 19w07a
create_kill_stat('mooshroom','蘑菇肉愛好者','蘑菇牛')
create_kill_stat('parrot','笨鳥!','鸚鵡')
create_kill_stat('pig','屠夫','豬')
create_kill_stat('rabbit','兔子殺手','兔子')
create_kill_stat('sheep','大野狼','羊')
create_kill_stat('squid','池子清理者','烏賊')
create_kill_stat('turtle','馬力歐','烏龜',1467) # added in 18w07a
create_kill_stat('villager','獨裁者','村民')
create_kill_stat('wandering_trader','商人制裁者','流浪商人',1930) # added in 19w05a
create_kill_stat('wolf','壞狗!','狼/狗')
# Cats (including ozelots)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_ocelot',
{
'title': '屠貓者',
'desc': '殺過的山貓/貓的數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:cat']),
mcstats.StatReader(['minecraft:killed','minecraft:ocelot']),
])
))
# Llamas (including trader llamas)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_llama',
{
'title': '山賊',
'desc': '殺過的羊駝數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:llama']),
mcstats.StatReader(['minecraft:killed','minecraft:trader_llama']),
])
))
# Zombies (including Husks and Zombie Villagers)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_zombie',
{
'title': '殭屍碾碎機',
'desc': '殺過的殭屍數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:husk']),
mcstats.StatReader(['minecraft:killed','minecraft:drowned']),
mcstats.StatReader(['minecraft:killed','minecraft:zombie']),
mcstats.StatReader(['minecraft:killed','minecraft:zombie_villager']),
])
))
# Skeletons (including Strays)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_skeleton',
{
'title': '骨頭收集家',
'desc': '殺過的骷髏弓箭手數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:skeleton']),
mcstats.StatReader(['minecraft:killed','minecraft:stray']),
])
))
# Spiders (including Cave Spiders)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_spider',
{
'title': '蜘蛛恐懼症',
'desc': '殺過的蜘蛛數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:spider']),
mcstats.StatReader(['minecraft:killed','minecraft:cave_spider']),
])
))
# Guardians (including Elder Guardians)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_guardian',
{
'title': '深海突襲者',
'desc': '殺過的深海守衛數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:guardian']),
mcstats.StatReader(['minecraft:killed','minecraft:elder_guardian']),
])
))
# Illagers (all types)
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_illagers',
{
'title': '清除者',
'desc': '殺過的窳民數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:evoker']),
mcstats.StatReader(['minecraft:killed','minecraft:vindicator']),
mcstats.StatReader(['minecraft:killed','minecraft:pillager']),
mcstats.StatReader(['minecraft:killed','minecraft:illusioner']),
mcstats.StatReader(['minecraft:killed','minecraft:illager_beast']),
])
))
# Fish mobs
mcstats.registry.append(
mcstats.MinecraftStat(
'kill_fish',
{
'title': '捕魚專家',
'desc': '殺過的魚的數量',
'unit': 'int',
},
mcstats.StatSumReader([
mcstats.StatReader(['minecraft:killed','minecraft:cod']),
mcstats.StatReader(['minecraft:killed','minecraft:salmon']),
mcstats.StatReader(['minecraft:killed','minecraft:pufferfish']),
mcstats.StatReader(['minecraft:killed','minecraft:tropical_fish']),
]),
1471 # fish mobs added in 18w08b
))
```
|
{
"source": "jeffryang24/dabadee-with-python",
"score": 2
}
|
#### File: Automate the Boring Stuff/Basic/myFunction.py
```python
def hello(name):
print('Hello, ' + name + '!')
hello('World')
hello('Morgan')
```
#### File: Automate the Boring Stuff/Basic/tryExcept.py
```python
def spam(dividedBy):
return 100 / dividedBy
try:
print(spam(20))
print(spam(30))
print(spam(0))
except ZeroDivisionError as e:
print('Oops... Don\'t do that!')
```
|
{
"source": "jeffryang24/rabbitmq-learning",
"score": 3
}
|
#### File: work-queues/python/worker.py
```python
import time
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
# queue_declare is idempotent - so, it will not
# create the same queue again if has been existed.
channel.queue_declare(queue='task_queue', durable=True)
def callback(ch, method, properties, body):
# use decode to remove b'' (bytes literal)
print(" [x] Received %r" % body.decode('UTF-8'))
time.sleep(body.count(b'.'))
print(" [x] Done")
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
# Receive message
channel.basic_consume(
callback,
queue='hello'
)
print(' [x] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
```
|
{
"source": "JeffryCA/subgroupsem",
"score": 3
}
|
#### File: Examples/European Social Survey/Preprocessing.py
```python
import pandas as pd
import numpy as np
from factor_analyzer import FactorAnalyzer
############################################################################################
# imoport and preprocess data
############################################################################################
# Variable descriptions:
# https://www.europeansocialsurvey.org/docs/round3/survey/ESS3_appendix_a3_e01_0.pdf
data_raw = pd.read_stata('ESS3/ESS3e03_7.dta', convert_categoricals=False)
weights = data_raw[['dweight', 'pspwght', 'pweight', 'cntry']] # Desig-, Post-stratification-, Population Size weight
############################################################################################
# choosing variables and combining to create scores
############################################################################################
data = data_raw[['cntry', 'happy', 'rlgatnd', 'rlgdgr', 'trtrsp', 'trtunf', 'rcndsrv','agea', 'maritala', 'uempla', 'uempli', 'hincfel', 'edulvla', 'rlgblg', 'rlgdnm', 'trstlgl', 'livecntr']] # ,'health'
print('Raw Size: ', data.shape)
data = data.dropna(subset=['cntry', 'happy', 'rlgatnd', 'rlgdgr', 'trtrsp', 'trtunf', 'rcndsrv'])
print('Size after dropping missing values: ', data.shape)
data['rlgatnd'] = max(data['rlgatnd']) - data['rlgatnd'] # invert scales
data['trtunf'] = max(data['trtunf']) - data['trtunf'] # invert scales
############################################################################################
# CFA to combine varibales
############################################################################################
from factor_analyzer import (ConfirmatoryFactorAnalyzer, ModelSpecificationParser)
df = data[['rlgatnd','rlgdgr','trtrsp','trtunf','rcndsrv']]
model_dict = {'religionsity':['rlgatnd','rlgdgr'], 'recognition':['trtrsp','trtunf' ,'rcndsrv']}
model_spec = ModelSpecificationParser.parse_model_specification_from_dict(df, model_dict)
cfa = ConfirmatoryFactorAnalyzer(model_spec, disp=False)
cfa.fit(df.values)
print("Factor loadings:\n", cfa.loadings_)
latent_factors = cfa.transform(df.values)
data['religiosity'] = latent_factors[:,0]
data['recognition'] = latent_factors[:,1]
############################################################################################
# create ccountry religiosity score
############################################################################################
df_weights_cntry = weights.groupby('cntry', as_index=False)['pweight'].mean()
df_reli_cntry = data.groupby('cntry', as_index=False)['religiosity'].mean()
MEAN_CNTRY_RELI = np.sum( df_weights_cntry['pweight'] * df_reli_cntry['religiosity'] / np.sum(df_weights_cntry['pweight']) )
#robjects.globalenv['Mod_Mean'] = MEAN_CNTRY_RELI
print('Country Religiosity Average', MEAN_CNTRY_RELI)
data = data.join(df_reli_cntry.set_index('cntry'), on='cntry', how='left', rsuffix='_cntry')
# center data
data[['recognition', 'religiosity', 'happy']] = data[['recognition', 'religiosity', 'happy']].apply(lambda x: x - x.mean())
############################################################################################
# generate interaction terms (for moderation)
############################################################################################
data['religiosity_x_religiosity_cntry'] = data['religiosity'] * data['religiosity_cntry']
data['recognition_x_religiosity_cntry'] = data['recognition'] * data['religiosity_cntry']
############################################################################################
# change respective columns to categorical ones
############################################################################################
to_str = ['maritala', 'uempla', 'uempli', 'hincfel', 'edulvla', 'rlgblg', 'rlgdnm', 'livecntr'] #'health',
for attr in to_str:
data[attr] = data[attr].apply(str)
data.to_pickle('ess_preprocessed_data.pkl') # save data
############################################################################################
### Test cronbach's alpha
############################################################################################
print('Cronbachs alpha')
def CronbachAlpha(itemscores):
itemscores = np.asarray(itemscores)
itemvars = itemscores.var(axis=0, ddof=1)
tscores = itemscores.sum(axis=1)
nitems = itemscores.shape[1]
return (nitems / (nitems-1)) * (1 - (itemvars.sum() / tscores.var(ddof=1)))
religiosity = data[['rlgatnd', 'rlgdgr']].values
recognition = data[['trtrsp', 'trtunf', 'rcndsrv']].values
print('religiosity: ', CronbachAlpha(religiosity))
print('recognition: ', CronbachAlpha(recognition))
```
#### File: subgroup_sem/tests/test_RtoPy.py
```python
import unittest
from rpy2.robjects import r, pandas2ri
from rpy2 import robjects
from rpy2.robjects.conversion import localconverter
import numpy as np
import pandas as pd
from subgroup_sem.tests.DataSets import get_artificial_data
class TestRtoPy(unittest.TestCase):
def test_conncection(self):
robjects.globalenv['vector'] = robjects.IntVector([1, 0, 1, 0, 1, 1, 1])
self.assertEqual(list(robjects.r['vector']), [1, 0, 1, 0, 1, 1, 1])
def test_convert_df(self):
data = get_artificial_data()
with localconverter(robjects.default_converter + pandas2ri.converter):
robjects.globalenv['d'] = robjects.conversion.py2rpy(data)
data_R = robjects.conversion.rpy2py( robjects.r['d'] )
self.assertTrue(data.equals(data))
if __name__ == '__main__':
unittest.main()
```
#### File: subgroup_sem/tests/test_TestQF.py
```python
from timeit import default_timer as timer
import pysubgroup as ps
from subgroup_sem import SEMTarget, TestQF
from subgroup_sem.tests.DataSets import get_artificial_data
if __name__ == '__main__':
data = get_artificial_data()
model = ('# direct effect \n'
'Y ~ c(c1,c2)*X \n'
'# mediator \n'
'M ~ c(a1,a2)*X \n'
'Y ~ c(b1,b2)*M \n'
'# indirect effect (a*b) \n'
'indirect1 := a1*b1 \n'
'indirect2 := a2*b2 \n'
'# total effect \n'
'total1 := c1 + (a1*b1) \n'
'total2 := c2 + (a2*b2) \n'
'# direct effect \n'
'direct1 := c1 \n'
'direct2 := c2 \n'
'# rest \n'
'Y ~~ c(r1_1,r1_2)*Y \n'
'X ~~ c(r2_1,r2_2)*X \n'
'M ~~ c(r3_1,r3_2)*M \n'
'Y ~ c(r4_1,r4_2)*1 \n'
'X ~ c(r5_1,r5_2)*1 \n'
'M ~ c(r6_1,r6_2)*1')
wald_test_contstraints = 'a1==a2 \n b1==b2 \n c1==c2'
target = SEMTarget (data, model, wald_test_contstraints)
searchSpace = ps.create_selectors(data, ignore=["X", "Y", "M"])
def q(WT_score):
return WT_score
task = ps.SubgroupDiscoveryTask(data, target, searchSpace, result_set_size=10, depth=2, qf=TestQF(q, ['WT_score']))
print("running DFS")
start = timer()
result = ps.SimpleDFS().execute(task)
end = timer()
print("Time elapsed: ", (end - start))
for (q, sg) in result.to_descriptions():
print(str(q) + ":\t" + str(sg))
```
|
{
"source": "jeffrylazo/javicho",
"score": 4
}
|
#### File: javicho/src/core.py
```python
import numpy
import pandas
import logging
from os import path
from random import choices, random
# Definition of class Data for simple actions on DataFrames
class Data():
'''
Summary: This class generates a data frame with user-defined parameters, or an existing file.
Examples: ds = Data([name,numRecords,columns,caseSet,dependance,probability])
ds = Data([name,numRecords,columns,caseSet])
ds = Data([name,r'inputPath'])
Required elements for generating the data frame within the parameter list:
1. name (List or Tuple) -> Used for default file deneration
2. numRecords (List or Tuple) -> Number of records
3. columns (List or Tuple) -> Names of the columns
4. caseSet (List or Tuple) -> Options available per column
Optional elements for generating the data frame within the parameter list:
1. dependance (List or Tuple) -> Dependance of a column controlled thru the index of the independent column
2. probability (List or Tuple) -> Weights for the randomized selection of caseSet value selection
Required attributes for loading a dataset:
1. inputPath (String) -> Path of the .cvs, .html, or .xlsx file
'''
def __init__(self, parameter = [None], *args):
'''Constructor'''
# Attribute initialization
self.__parameter = parameter
self.__df = pandas.DataFrame(numpy.zeros(1))
self.__dfTrain = None
self.__dfTest = None
try:
# Check if self.__parameter is a list with 2, 4, or 6 elements and initialize accordingly
if (isinstance(self.__parameter, list)):
if (len(self.__parameter) == 2):
self.__name = str(self.__parameter[0])
self.__inputPath = self.__parameter[1]
if ((len(self.__parameter) == 4) or (len(self.__parameter) == 6)):
self.__name = str(self.__parameter[0])
self.__numRecords = self.__parameter[1]
self.__columns = self.__parameter[2]
self.__caseSet = self.__parameter[3]
# The "None" keyword avoids errors on attributes if the user do not initialize them
self.__dependance = None
self.__probability = None
if (len(self.__parameter) == 6):
self.__dependance = self.__parameter[4]
self.__probability = self.__parameter[5]
# Generate main dataset
self.__generate_main()
# If it fails, log the error
else:
logging.error(" The constructor needs to be initialized as a list containing either 2, 3, or 6 parameters")
# If the initialization generates the following exceptions, log the error
except (TypeError, SyntaxError):
logging.error(" The constructor needs to be initialized as a list containing either 2, 3, or 6 parameters")
def __generate_main(self):
'''
Private method: Generates main data frame based on the parameters obtained from the constructor.
'''
# If there is no predefined data frame, create it from scratch using the parameters on the overloaded constructor
if ((len(self.__parameter) == 4) or (len(self.__parameter) == 6)):
self.__df = pandas.DataFrame(index=range(self.__numRecords),columns=self.__columns)
for header in range(len(self.__columns)):
if(isinstance(self.__caseSet[header],int)):
self.__df[self.__columns[header]] = pandas.Series(round(random()*self.__caseSet[header],0) for record in range(self.__numRecords))
elif(isinstance(self.__caseSet[header],float)):
self.__df[self.__columns[header]] = pandas.Series(round(random()*self.__caseSet[header],2) for record in range(self.__numRecords))
else:
if(self.__dependance[header] == -1):
if(isinstance(self.__caseSet[header],(tuple,list))):
self.__df[self.__columns[header]] = pandas.Series(str(choices(self.__caseSet[header])[0]) for record in range(self.__numRecords))
else:
for record in range(self.__numRecords):
self.__df.iloc[record][self.__columns[header]] = str(choices(self.__caseSet[header],weights=self.__probability[header],k=1)[0])
self.__save_index()
# If there is a predefined data frame, do not do anything
elif (isinstance(self.__parameter[1], pandas.core.frame.DataFrame)):
self.__df = self.__parameter[1]
# Otherwise, load it from the desired location
else:
# Check if the path exists
if (path.isfile(self.__inputPath)):
# Check for the following file formats
if (self.__parameter != self.__inputPath):
root, ext = path.splitext(self.__inputPath)
if (ext == '.csv'):
self.__df = pandas.read_csv(self.__inputPath)
if (ext == 'html'):
self.__df = pandas.read_html(self.__inputPath)
if (ext == '.xlsx'):
self.__df = pandas.read_excel(self.__inputPath,sheet_name='Sheet1')
self.__save_index()
# Display the following log if the path exists, but the object was initialized incorrectly
else:
logging.info(" Valid Path. Incorrect object initialization. Try df = Data([name,numRecords,columns,caseSet,dependance,probability]) or df = Data([name,r'inputPath'])")
# Log the error id the path does not exist
else:
logging.error(" Invalid path. " + root + ext + " does not exists")
def __save_index(self):
'''
Private method: Generates a specialized column for index.
'''
# Create a new Series on the data frame and store it temporary on another location
temp = self.__df['Record'] = self.__df.index
# Eliminate the Series on the data frame, permanently
self.__df.drop(columns='Record', inplace = True)
# Set the temporary Series as first column on the data frame
self.__df.insert(0,'Record',temp)
def gen_train_test(self,percentage = 0.2):
'''
Abstract: Generate random traning and test subsets from main data frame.
Example: ds1.train_test(0.3)
'''
# Check if percentage is a decimal
if (percentage > 0):
if (percentage > 1 and percentage < 100):
percentage = percentage / 100
# Generate sampling data frame and sort index in ascending order
self.__dfTrain = self.__df.sample(frac = percentage, replace=False,random_state=1)
self.__dfTrain = self.__dfTrain.sort_index()
# Generate testing data frame
self.__dfTest = self.__df[~self.__df.isin(self.__dfTrain)]
self.__dfTest = self.__dfTest.dropna()
self.__dfTest['returned'] = ' '
temp = self.__dfTest['Record'] = self.__dfTest.index
self.__dfTest.drop(columns='Record', inplace = True)
self.__dfTest.insert(0,'Record',temp)
def __file_name(self, desired_path = None, key = False):
'''
Private method: Generates names for csv files.
'''
new_name = desired_path
if ((key == True) or (desired_path == None)):
new_name = [None for i in range(3)]
if (desired_path == None):
old_name = self.__name
if (key == True):
old_name = path.splitext(desired_path)
new_name[0] = str(old_name[0]) + '_main_data.csv'
new_name[1] = str(old_name[0]) + '_train_data.csv'
new_name[2] = str(old_name[0]) + '_test_data.csv'
return new_name
def save_main(self, desired_path = None, key = False):
'''
Abstract: Store main data frame on a .csv file. If path = None, default path is used.
Example: ds1.save_main(r'TrainSet.csv')
'''
self.__df.to_csv(self.__file_name(desired_path, key)[0],index = False, header = True)
def save_train(self, desired_path = None, key = False):
'''
Abstract: Store training data frame on a .csv file. If path = None, default path is used.
Example: ds1.save_train(r'TrainSet.csv')
'''
self.__dfTrain.to_csv(self.__file_name(desired_path, key)[1],index = False, header = True)
def save_test(self, desired_path = None, key = False):
'''
Abstract: Store test data frame on a .csv file. If path = None, default path is used.
Example: ds1.save_test(r'TestSet.csv')
'''
self.__dfTest.to_csv(self.__file_name(desired_path, key)[2],index = False, header = True)
def save_all(self, desired_path = None):
'''
Abstract: Store data frame on three separate .csv files using default paths.
Example:x,y,z = ds1.save_all()
'''
self.save_main(desired_path, True)
self.save_train(desired_path, True)
self.save_test(desired_path, True)
def get_data(self):
'''
Abstract: Returns data frames as three separate objects.
Example: x,y,z = ds1.get_data()
'''
return self.__df, self.__dfTrain, self.__dfTest
```
|
{
"source": "JeffryLee/dash-nb.js",
"score": 2
}
|
#### File: dash-nb.js/abr_server/simple_server.py
```python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import base64
import urllib
import sys
import os
import json
import time
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
import time
import itertools
################## ROBUST MPC ###################
S_INFO = 5 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
MPC_FUTURE_CHUNK_COUNT = 5
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BITRATE_REWARD_MAP = {0: 0, 300: 1, 750: 2, 1200: 3, 1850: 12, 2850: 15, 4300: 20}
M_IN_K = 1000.0
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 48
DEFAULT_QUALITY = 0 # default video quality without agent
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> this number of Mbps
SMOOTH_PENALTY = 1
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
# in format of time_stamp bit_rate buffer_size rebuffer_time video_chunk_size download_time reward
NN_MODEL = None
CHUNK_COMBO_OPTIONS = []
# past errors in bandwidth
past_errors = []
past_bandwidth_ests = []
def get_chunk_size(quality, index):
return 1000
def make_request_handler(input_dict):
class Request_Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.input_dict = input_dict
self.log_file = input_dict['log_file']
#self.saver = input_dict['saver']
self.s_batch = input_dict['s_batch']
#self.a_batch = input_dict['a_batch']
#self.r_batch = input_dict['r_batch']
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = json.loads(self.rfile.read(content_length))
print post_data
return_code = -2
if (post_data['playerId'] == post_data['currentPlayerIdx']):
return_code = len(post_data['nextChunkSize'])-1
if (post_data['playerId'] == (post_data['currentPlayerIdx']+1)%8):
if (post_data['lastRequest'] < 3):
return_code = len(post_data['nextChunkSize'])-1
if (post_data['lastRequest'] < 0):
return_code = len(post_data['nextChunkSize'])-1
send_data = str(return_code)
print "return " + send_data
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
# record [state, action, reward]
# put it here after training, notice there is a shift in reward storage
# if end_of_video:
# self.s_batch = [np.zeros((S_INFO, S_LEN))]
# else:
# self.s_batch.append(state)
def do_GET(self):
print >> sys.stderr, 'GOT REQ'
self.send_response(200)
#self.send_header('Cache-Control', 'Cache-Control: no-cache, no-store, must-revalidate max-age=0')
self.send_header('Cache-Control', 'max-age=3000')
self.send_header('Content-Length', 20)
self.end_headers()
self.wfile.write("console.log('here');")
def log_message(self, format, *args):
return
return Request_Handler
def run(server_class=HTTPServer, port=8333, log_file_path=LOG_FILE):
np.random.seed(RANDOM_SEED)
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
# make chunk combination options
for combo in itertools.product([0,1,2,3,4,5], repeat=5):
CHUNK_COMBO_OPTIONS.append(combo)
with open(log_file_path, 'wb') as log_file:
s_batch = [np.zeros((S_INFO, S_LEN))]
last_bit_rate = DEFAULT_QUALITY
last_total_rebuf = 0
# need this storage, because observation only contains total rebuffering time
# we compute the difference to get
video_chunk_count = 0
input_dict = {'log_file': log_file,
'last_bit_rate': last_bit_rate,
'last_total_rebuf': last_total_rebuf,
'video_chunk_coount': video_chunk_count,
's_batch': s_batch}
# interface to abr_rl server
handler_class = make_request_handler(input_dict=input_dict)
server_address = ('localhost', port)
httpd = server_class(server_address, handler_class)
print 'Listening on port ' + str(port)
httpd.serve_forever()
def main():
if len(sys.argv) == 2:
trace_file = sys.argv[1]
run(log_file_path=LOG_FILE + '_robustMPC_' + trace_file)
else:
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Keyboard interrupted."
try:
sys.exit(0)
except SystemExit:
os._exit(0)
```
|
{
"source": "jeffrypaul37/Hospital-Management-System",
"score": 3
}
|
#### File: jeffrypaul37/Hospital-Management-System/appointments.py
```python
from tkinter import *
from tkcalendar import Calendar
from datetime import datetime
from datetime import date
import re
import sqlite3
import tkinter.messagebox
import pandas as pd
import datetime
from dateutil import rrule, parser
today = date.today()
date1 = '05-10-2021'
date2 = '12-31-2050'
datesx = list(rrule.rrule(rrule.DAILY, dtstart=parser.parse(date1), until=parser.parse(date2)))
conn = sqlite3.connect('database copy.db')
c = conn.cursor()
ids = []
class Application:
def __init__(self, master):
self.master = master
self.left = Frame(master, width=800, height=720, bg='sea green')
self.left.pack(side=LEFT)
self.right = Frame(master, width=400, height=720, bg='steelblue')
self.right.pack(side=RIGHT)
self.heading = Label(self.left, text="Histolab Appointments", font=('arial 40 bold'), fg='black', bg='sea green')
self.heading.place(x=0, y=0)
self.name = Label(self.left, text="Patient's Name", font=('arial 18 bold'), fg='black', bg='sea green')
self.name.place(x=0, y=100)
self.age = Label(self.left, text="Age", font=('arial 18 bold'), fg='black', bg='sea green')
self.age.place(x=0, y=140)
self.gender = Label(self.left, text="Gender", font=('arial 18 bold'), fg='black', bg='sea green')
self.gender.place(x=0, y=180)
self.location = Label(self.left, text="Location", font=('arial 18 bold'), fg='black', bg='sea green')
self.location.place(x=0, y=220)
self.date = Label(self.left, text="Appointment Date", font=('arial 18 bold'), fg='black', bg='sea green')
self.date.place(x=0, y=260)
self.time = Label(self.left, text="Appointment Time", font=('arial 18 bold'), fg='black', bg='sea green')
self.time.place(x=0, y=300)
self.phone = Label(self.left, text="Phone Number", font=('arial 18 bold'), fg='black', bg='sea green')
self.phone.place(x=0, y=340)
self.name_ent = Entry(self.left, width=30)
self.name_ent.place(x=250, y=100)
self.age_ent = Entry(self.left, width=30)
self.age_ent.place(x=250, y=140)
self.clicked=StringVar()
self.clicked.set("Male")
self.gender_ent = OptionMenu(self.left,self.clicked,*options)
self.gender_ent.pack()
self.gender_ent.place(x=250, y=180)
self.location_ent=Entry(self.left,width=30)
self.location_ent.place(x=250, y=220)
self.clicked1=StringVar()
self.clicked1.set(today)
self.date_ent = OptionMenu(self.left,self.clicked1,*options1)
self.date_ent.pack()
self.date_ent.place(x=250, y=260)
self.clicked2=StringVar()
self.clicked2.set("10am-11am")
self.time_ent = OptionMenu(self.left,self.clicked2,*options2)
self.time_ent.pack()
self.time_ent.place(x=250, y=300)
self.phone_ent = Entry(self.left, width=30)
self.phone_ent.place(x=250, y=340)
self.submit = Button(self.left, text="Add Appointment", width=20, height=2, bg='steelblue', command=self.add_appointment)
self.submit.place(x=300, y=380)
sql2 = "SELECT ID FROM appointments"
self.result = c.execute(sql2)
for self.row in self.result:
self.id = self.row[0]
ids.append(self.id)
self.new = sorted(ids)
self.final_id = self.new[len(ids)-1]
self.logs = Label(self.right, text="Logs", font=('arial 28 bold'), fg='white', bg='steelblue')
self.logs.place(x=0, y=0)
self.box = Text(self.right, width=50, height=40)
self.box.place(x=20, y=60)
def add_appointment(self):
self.val1 = self.name_ent.get()
self.val2 = self.age_ent.get()
self.val3 = self.clicked.get()
self.val4 = self.location_ent.get()
self.val5 = self.clicked1.get()
self.val6 = self.clicked2.get()
self.val7 = self.phone_ent.get()
pattern=re.compile("[7-9][0-9]{9}")
pattern2=re.compile("[1-9]([0-9])*")
pattern1=re.compile(r'([A-Z])(\s*[A-Z])*$')
pattern.match(self.val7)
if self.val1 == '' or self.val2 == '' or self.val3 == '' or self.val4 == '' or self.val5 == '' or self.val6=='' or self.val7=='':
print("ty",self.val3)
tkinter.messagebox.showinfo("Warning", "Please Fill Up All Boxes")
print(self.val3)
elif not(pattern1.match(self.val1)) or len(self.val1)<2:
tkinter.messagebox.showinfo("Warning","INVALID Name")
elif not(pattern2.match(self.val2)) or len(self.val2)>=3:
tkinter.messagebox.showinfo("Warning","INVALID Age")
elif not(pattern.match(self.val7)) or len(self.val7)>10:
tkinter.messagebox.showinfo("Warning", "INVALID Phone Number")
else:
sql = "INSERT INTO 'appointments' (name, age, gender, location, scheduled_time, phone,date) VALUES(?, ?, ?, ?, ?, ?,?)"
c.execute(sql, (self.val1, self.val2, self.val3, self.val4, self.val6, self.val7,self.val5))
conn.commit()
tkinter.messagebox.showinfo("Success", "Appointment for " + str(self.val1) + " has been created" )
self.box.insert(END, '\n Appointment fixed for ' + str(self.val1) + '\n at ' + str(self.val5)+','+str(self.val6))
self.name_ent.delete(0,END)
self.age_ent.delete(0,END)
self.location_ent.delete(0,END)
self.phone_ent.delete(0,END)
root = Tk()
root.geometry("1200x720+0+0")
options=["Male","Female"]
options1=datesx
options2=["10am-11am","11am-12pm","1pm-2pm"]
b = Application(root)
root.resizable(False, False)
root.mainloop()
```
#### File: jeffrypaul37/Hospital-Management-System/mainmenu.py
```python
from tkinter import *
from tkcalendar import Calendar
from datetime import datetime
from datetime import date
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import askyesno
import re
import sqlite3
import tkinter.messagebox
import pandas as pd
import pandas as pd
import datetime
from dateutil import rrule, parser
today = date.today()
date1 = '05-10-2021'
date2 = '12-31-2050'
datesx = pd.date_range(today, date2).tolist()
conn = sqlite3.connect('database copy.db')
c = conn.cursor()
ids = []
class Application:
def __init__(self, master):
self.master = master
self.left = Frame(master, width=1000, height=800, bg='sea green')
self.left.pack(side=LEFT)
self.right = Frame(master, width=1000, height=800, bg='steelblue')
self.right.pack(side=RIGHT)
self.heading = Label(self.left, text="Appointments", font=('arial 40 bold'), fg='black', bg='sea green')
self.heading.place(x=0, y=0)
self.name = Label(self.left, text="Patient's Name", font=('arial 18 bold'), fg='black', bg='sea green')
self.name.place(x=0, y=100)
self.age = Label(self.left, text="Age", font=('arial 18 bold'), fg='black', bg='sea green')
self.age.place(x=0, y=140)
self.gender = Label(self.left, text="Gender", font=('arial 18 bold'), fg='black', bg='sea green')
self.gender.place(x=0, y=180)
self.location = Label(self.left, text="Location", font=('arial 18 bold'), fg='black', bg='sea green')
self.location.place(x=0, y=220)
self.date = Label(self.left, text="Appointment Date", font=('arial 18 bold'), fg='black', bg='sea green')
self.date.place(x=0, y=260)
self.time = Label(self.left, text="Appointment Time", font=('arial 18 bold'), fg='black', bg='sea green')
self.time.place(x=0, y=300)
self.phone = Label(self.left, text="Phone Number", font=('arial 18 bold'), fg='black', bg='sea green')
self.phone.place(x=0, y=340)
self.allergies = Label(self.left, text="Allergies", font=('arial 18 bold'), fg='black', bg='sea green')
self.allergies.place(x=0, y=380)
self.all_ent = Entry(self.left, width=30)
self.all_ent.place(x=250, y=380)
self.all_ent.insert(0, 'NONE')
self.chronic = Label(self.left, text="Chronic Conditions", font=('arial 18 bold'), fg='black', bg='sea green')
self.chronic.place(x=0, y=420)
self.chr_ent = Entry(self.left, width=30)
self.chr_ent.place(x=250, y=420)
self.chr_ent.insert(0, 'NONE')
self.bg = Label(self.left, text="Blood Group", font=('arial 18 bold'), fg='black', bg='sea green')
self.bg.place(x=0, y=460)
self.clicked3=StringVar()
self.clicked3.set("Select Blood Group")
self.bg_ent = OptionMenu(self.left,self.clicked3,*options3)
self.bg_ent.pack()
self.bg_ent.place(x=250, y=460)
self.name_ent = Entry(self.left, width=30)
self.name_ent.place(x=250, y=100)
self.age_ent = Entry(self.left, width=30)
self.age_ent.place(x=250, y=140)
self.clicked=StringVar()
self.clicked.set("Male")
self.gender_ent = OptionMenu(self.left,self.clicked,*options)
self.gender_ent.pack()
self.gender_ent.place(x=250, y=180)
self.location_ent=Entry(self.left,width=30)
self.location_ent.place(x=250, y=220)
self.clicked1=StringVar()
self.clicked1.set("Select Date")
self.date_ent = OptionMenu(self.left,self.clicked1,*options1)
self.date_ent.pack()
self.date_ent.place(x=250, y=260)
self.clicked2=StringVar()
self.clicked2.set("Select Time")
self.time_ent = OptionMenu(self.left,self.clicked2,*options2)
self.time_ent.pack()
self.time_ent.place(x=250, y=300)
self.phone_ent = Entry(self.left, width=30)
self.phone_ent.place(x=250, y=340)
self.submit = Button(self.left, text="Add Appointment", width=20, height=2, bg='steelblue', command=self.add_appointment)
self.submit.place(x=270, y=500)
self.submit = Button(self.left, text="View Appointments", width=20, height=2, bg='steelblue', command=self.view)
self.submit.place(x=600, y=100)
self.submit = Button(self.left, text="View/Update Patient Details", width=20, height=2, bg='steelblue', command=self.update)
self.submit.place(x=600, y=200)
self.submit = Button(self.left, text="Read Names", width=20, height=2, bg='steelblue', command=self.read)
self.submit.place(x=600, y=300)
self.submit = Button(self.left, text="Exit", width=20, height=2, bg='steelblue', command=self.quit)
self.submit.place(x=600, y=400)
sql2 = "SELECT ID FROM appointments"
self.result = c.execute(sql2)
for self.row in self.result:
self.id = self.row[0]
ids.append(self.id)
self.new = sorted(ids)
self.final_id = self.new[len(ids)-1]
self.logs = Label(self.right, text="Logs", font=('arial 28 bold'), fg='white', bg='steelblue')
self.logs.place(x=0, y=0)
self.box = Text(self.right, width=62, height=45)
self.box.place(x=20, y=60)
def add_appointment(self):
self.val1 = self.name_ent.get()
self.val2 = self.age_ent.get()
self.val3 = self.clicked.get()
self.val4 = self.location_ent.get()
self.val5 = self.clicked1.get()
self.val6 = self.clicked2.get()
self.val7 = self.phone_ent.get()
self.val8 = self.all_ent.get()
self.val9 = self.chr_ent.get()
self.val10 = self.clicked3.get()
pattern=re.compile("[7-9][0-9]{9}")
pattern=re.compile("[7-9][0-9]{9}")
pattern2=re.compile("[1-9]([0-9])*")
pattern1=re.compile(r'([A-Z])(\s*[A-Z])*$')
pattern.match(self.val7)
if self.val1 == '' or self.val2 == '' or self.val3 == '' or self.val4 == '' or self.val5 == '' or self.val6=='' or self.val7=='' or self.val10=='Select Blood Group' or self.val5=='Select Date' or self.val6=='Select Time':
print("ty",self.val3)
tkinter.messagebox.showinfo("Warning", "Please Fill Up All Boxes")
print(self.val3)
elif not(pattern1.match(self.val1)) or len(self.val1)<2:
tkinter.messagebox.showinfo("Warning","INVALID Name")
elif not(pattern2.match(self.val2)) or len(self.val2)>=3:
tkinter.messagebox.showinfo("Warning","INVALID Age")
elif not(pattern.match(self.val7)) or len(self.val7)>10:
tkinter.messagebox.showinfo("Warning", "INVALID Phone Number")
else:
sql = "INSERT INTO 'appointments' (name, age, gender, location, scheduled_time, phone,date,Allergies,Chronic_Conditions,Blood_Group) VALUES(?, ?, ?, ?, ?, ?,?,?,?,?)"
c.execute(sql, (self.val1, self.val2, self.val3, self.val4, self.val6, self.val7,self.val5,self.val8,self.val9,self.val10))
conn.commit()
tkinter.messagebox.showinfo("Success", "Appointment for " + str(self.val1) + " has been created" )
self.box.insert(END, '\n Appointment fixed for ' + str(self.val1) + '\n at ' + str(self.val5)+','+str(self.val6))
self.name_ent.delete(0,END)
self.age_ent.delete(0,END)
self.location_ent.delete(0,END)
self.phone_ent.delete(0,END)
self.clicked1.set("Select Date")
self.clicked2.set("Select Time")
self.clicked3.set("Select Blood Group")
self.chr_ent.delete(0,END)
self.all_ent.delete(0,END)
self.all_ent.insert(0, 'NONE')
self.chr_ent.insert(0, 'NONE')
def view(self):
import view
view.call()
def update(self):
import update
update.buildupdate()
def read(self):
import read
read.buildread()
def quit(self):
answer = askyesno(title='Confirm Exit', message='Are you sure you want to exit?')
if answer:
root.destroy()
root = Tk()
root.title("Shalom Clinic")
#root.geometry("1200x720+0+0")
root.attributes('-fullscreen', True)
root.resizable(0, 0)
Top = Frame(root, bd=1, relief=RIDGE)
Top.pack(side=TOP, fill=X)
Form = Frame(root, height=1)
Form.pack(side=TOP, pady=1)
lbl_title = Label(Top, text = "Shalom Clinic", font=('arial', 15))
lbl_title.pack(fill=X)
options=["Male","Female"]
options1=datesx
options2=["10:00:00","11:00:00","13:00:00"]
options3=["O+","O-","A+","A-","B+","B-","AB+","AB-"]
b = Application(root)
root.resizable(False, False)
root.mainloop()
```
|
{
"source": "jeffs2696/AnalyticalDuctModes",
"score": 3
}
|
#### File: AnalyticalDuctModes/sample/helpers.py
```python
import pychebfun
import numpy as np
from scipy import special as sp
def get_answer():
"""Get an answer."""
return True
def kradial(m,a,b):
""" Compute the bessel functions as well as the zero crossings
Inputs
------
m : int
radial mode number
a : float
starting point
b : float
ending point
Outputs
-------
f_cheb: output from the from_function method from the Chebfun class.
See "BattlesTrefethen.ipynb in the github page for help)
roots : def
a method (function) included in the Chebfun class defined in
chebfun.py (see github page for pychebfun in docs)
F :
"""
# creating a chebfun
Jp = lambda m,x : 0.5*(sp.jv(m-1,x) - sp.jv(m+1,x))
Yp = lambda m,x : 0.5*(sp.yv(m-1,x) - sp.yv(m+1,x))
F = lambda k,m,a,b :Jp(m,k*a)*Yp(m,k*b)-Jp(m,k*b)*Yp(m,k*a)
f_cheb = pychebfun.Chebfun.from_function(lambda x: F(x, m, a, b), domain = (10,100))
re_roots = f_cheb.roots().real
im_roots = f_cheb.roots().imag
roots = re_roots + im_roots*1j
print(roots)
return roots, re_roots, im_roots, F, f_cheb
def k_axial(M, krad,k_wave):
freq = 726.6
omega = 2*np.pi*freq # angular frequency
c0 = 343.15 # speed of sound
# rho0 = 1.225 # density
k_wave = omega/c0 # wave number
beta = 1-M**2
kaxial = (-M*k_wave + np.sqrt(k_wave**2 - beta*krad**2)) / beta**2
print(k_wave,kaxial)
return kaxial
```
|
{
"source": "JeffSaa/ASW",
"score": 3
}
|
#### File: JeffSaa/ASW/FR-Code-rev7.py
```python
import pandas as pd
import numpy
import os
from models import provider as provider_model
from models import purchase_process as pp_model
from models import quotation as quotation_model
def Price(pp,Quotations,Scoring):
dic={'ordered_list':[],'id_provider':[],'Purchase_P':[]}
data = pd.DataFrame(columns=('ordered_list', 'id_provider', 'Purchase_P'))
for x in range(0,len(Quotations)):
if pp == Quotations[x].Purchase_Process:
dic['ordered_list'].append(Quotations[x].Unit_Price)
dic['id_provider'].append(Quotations[x].Provider_Code)
dic['Purchase_P'].append(Quotations[x].Purchase_Process)
data= pd.DataFrame(dic, columns=dic.keys())
data = data.sort_values('ordered_list')
pos=0
for x in range(0,data.shape[0]):
for y in range(0,len(Quotations)):
if Quotations[y].Provider_Code==data.iloc[x][1] and Quotations[y].Purchase_Process==data.iloc[x][2]:
if x==1:
if data.iloc[x][0]==data.iloc[x-1][0]:
pos=pos-1
Quotations[y].Total_Points=Quotations[y].Total_Points+Scoring[pos][2]
pos=pos+1
def Time(pp,Quotations,Scoring):
dic={'ordered_list':[],'id_provider':[],'Purchase_P':[]}
data = pd.DataFrame(columns=('ordered_list', 'id_provider', 'Purchase_P'))
for x in range(0,len(Quotations)):
if pp == Quotations[x].Purchase_Process:
dic['ordered_list'].append(Quotations[x].Delivery_Time)
dic['id_provider'].append(Quotations[x].Provider_Code)
dic['Purchase_P'].append(Quotations[x].Purchase_Process)
data= pd.DataFrame(dic, columns=dic.keys())
data = data.sort_values('ordered_list')
pos=0
for x in range(0,data.shape[0]):
for y in range(0,len(Quotations)):
if Quotations[y].Provider_Code==data.iloc[x][1] and Quotations[y].Purchase_Process==data.iloc[x][2]:
if x==1:
if data.iloc[x][0]==data.iloc[x-1][0]:
pos=pos-1
Quotations[y].Total_Points=Quotations[y].Total_Points+Scoring[pos][3]
pos=pos+1
def Quality_c(Providers,Quotations,Scoring):
#"""This scores if has or not quality certification. Quality, lcompany and Vkey functions do not depend on purchase
#process (they are or not independently), but price and time score must be compared with the rest of quotations from
#the purchase process"""
for x in range(0,len(Providers)):
if Providers[x].Quality_Cert==True:
for y in range(0,len(Quotations)):
if Quotations[y].Provider_Code==Providers[x].Provider_ID:
Quotations[y].Total_Points=Quotations[y].Total_Points+Scoring[0][4]
def Lcompany(Providers,Quotations,Scoring):
for x in range(0,len(Providers)):
if Providers[x].Local_Company==True:
for y in range(0,len(Quotations)):
if Quotations[y].Provider_Code==Providers[x].Provider_ID:
Quotations[y].Total_Points=Quotations[y].Total_Points+Scoring[0][5]
def Valid_Key(Providers,Quotations,Scoring):
for x in range(0,len(Providers)):
for y in range(0,len(Quotations)):
if Quotations[y].Provider_Code==Providers[x].Provider_ID:
Quotations[y].Total_Points=Quotations[y].Total_Points+Scoring[0][1]
def Position(pp,Quotations):
#"""Assign the position or Ranking (attribute from Quotation) of the Quotation"""
dic={'ordered_list':[],'id_provider':[],'Purchase_P':[]}
data = pd.DataFrame(columns=('ordered_list', 'id_provider', 'Purchase_P'))
for x in range(0,len(Quotations)):
if pp == Quotations[x].Purchase_Process:
dic['ordered_list'].append(Quotations[x].Total_Points)
dic['id_provider'].append(Quotations[x].Provider_Code)
dic['Purchase_P'].append(Quotations[x].Purchase_Process)
data= pd.DataFrame(dic, columns=dic.keys())
data = data.sort_values('ordered_list',ascending=False)
pos=1
for x in range(0,data.shape[0]):
for y in range(0,len(Quotations)):
if Quotations[y].Provider_Code==data.iloc[x][1] and Quotations[y].Purchase_Process==data.iloc[x][2]:
if x==1:
if data.iloc[x][0]==data.iloc[x-1][0]:
pos=pos-1
Quotations[y].Rank=pos
pos=pos+1
def Show(pp,Quotations):
#"""Show every quotations of the respective purchase process, but it sorts them first and then shows them """
dic={'ordered_list':[],'pos':[]}
data = pd.DataFrame(columns=('ordered_list', 'id_provider', 'Purchase_P'))
for x in range(0,len(Quotations)):
if pp == Quotations[x].Purchase_Process:
dic['ordered_list'].append(Quotations[x].Rank)
dic['pos'].append(x)
data= pd.DataFrame(dic, columns=dic.keys())
data = data.sort_values('ordered_list')
print("Process with code "+str(pp))
for x in range(0,data.shape[0]):
if x==6:
break
print(str(Quotations[data.iloc[x][1]].Provider_Code)+" with score of " +str(Quotations[data.iloc[x][1]].Total_Points)+" and position "+str(Quotations[data.iloc[x][1]].Rank))
if data.shape[0]==0:
print("There is no purchase process or no quotes")
def Allpp(Purchase_items):
for x in range(0,len(Purchase_items)):
print("Code "+str(Purchase_items[x].Purchase_Process)+" name "+str(Purchase_items[x].Product_Name))
def Generate(Purchase_items,Quotations,Providers):
#"""This is the function to show the menu for the user"""
print("")
print("1. Show purchase processes(code and name")
print("2. Create CSV file and exit")
print("3. Show purchase process(complete)")
print("4. Show Provider(individual)")
print("5. Show Purchase_Process(individual)")
print("6. Show Quotation(individual")
print("7. Exit and not save")
while True:
try:
print("")
print("insert a value from 1-7")
print("")
elec=str(input())
break
except TypeError:
print("")
print("invalid value")
if elec=="1":
print("")
print("You chose option 1")
print("")
Allpp(Purchase_items)
Generate(Purchase_items,Quotations,Providers)
elif elec=="2":
while True:
print("")
print("Type name for the document")
print("")
Name=input()
if Name=="":
print("")
print("type a valid name")
else:
break
File=Name+".csv"
csv=open(File,"w")
Title="Purchase_Process\n"
csv.write(Title)
for x in range(0,len(Purchase_items)):
pp=Purchase_items[x].Purchase_Process
Dic={'ordered_list':[],'pos':[]}
Data = pd.DataFrame(columns=('ordered_list', 'pos'))
for x in range(0,len(Quotations)):
if pp == Quotations[x].Purchase_Process:
Dic['ordered_list'].append(Quotations[x].Rank)
Dic['pos'].append(x)
Data= pd.DataFrame(Dic, columns=Dic.keys())
Data = Data.sort_values('ordered_list')
csv.write("Process with code"+str(pp)+"\n")
for x in range(0,Data.shape[0]):
csv.write(str(Quotations[Data.iloc[x][1]].Provider_Code)+","+" with score of "+","+str(Quotations[Data.iloc[x][1]].Total_Points)+","+" and position "+","+str(Quotations[Data.iloc[x][1]].Rank)+"\n")
csv.close()
print("File created")
print("File created in the following path:"+str(os.getcwd()))
elif elec=="3":
while True:
try:
print("")
print("Type purchase process")
print("")
proce=int(input())
break
except ValueError:
print("")
print("invalid value")
Show(proce,Quotations)
Generate(Purchase_items,Quotations,Providers)
elif elec=="4":
while True:
try:
print("")
print("Type provider code")
print("")
cod=int(input())
break
except ValueError:
print("")
print("invalid value")
ver=False
for x in range(0,len(Providers)):
if Providers[x].Provider_ID==cod:
Providers[x].Show_data()
ver=True
if ver==False:
print("Provider not found")
Generate(Purchase_items,Quotations,Providers)
elif elec=="5":
while True:
try:
print("")
print("Type Purchase_Process code")
print("")
cod=int(input())
break
except ValueError:
print("")
print("invalid value")
ver=False
for x in range(0,len(Purchase_items)):
if Purchase_items[x].Purchase_Process==cod:
Purchase_items[x].Show_data()
ver=True
if ver==False:
print("")
print("Purchase_Process not found")
print("")
Generate(Purchase_items,Quotations,Providers)
elif elec=="6":
print("Type Purchase_Process(quotation)")
while True:
try:
print("")
print("Type Purchase_Process(quotation)")
print("")
cod_pp=int(input())
break
except ValueError:
print("")
print("invalid value")
while True:
try:
print("")
print("Type provider(quotation")
print("")
cod_provider=int(input())
break
except ValueError:
print("")
print("invalid value")
ver=False
for x in range(0,len(Quotations)):
if Quotations[x].Purchase_Process==cod_pp and Quotations[x].Provider_Code==cod_provider:
Quotations[x].Show_data()
ver=True
if ver==False:
print("Quotation not found")
Generate(Purchase_items,Quotations,Providers)
elif elec=="7":
print("You are leaving")
else:
print("You have selected an incorrect option")
Generate(Purchase_items,Quotations,Providers)
def Execute(Providers,Purchase_Items,Quotations,Scoring):
#"""This executes the code
#Quality, Lcompany y Valid_key do not depend on purchase process (they are or not independently), but price and time
#score must be compared with the rest of quotations from the purchase process"""
Quality_c(Providers,Quotations,Scoring)
Lcompany(Providers,Quotations,Scoring)
Valid_Key(Providers,Quotations,Scoring)
for x in range(0,len(Purchase_Items)):
Price(Purchase_Items[x].Purchase_Process,Quotations,Scoring)
Time(Purchase_Items[x].Purchase_Process,Quotations,Scoring)
Position(Purchase_Items[x].Purchase_Process,Quotations)
#show(Purchase_Items[x].Purchase_Process,Quotations)
#"""These are the Sequential Steps to get information (tables) and start executing analysis"""
#"""I create 3 lists which contain providers, purchase processes and quotations, these lists have the same name
#as classes but in plural. They are initialized as empty lists, for storing information related to providers,
#purchase_items and quotations"""
Providers=[]
Purchase_Items=[]
Quotations=[]
#"""STEP1: We enter the file names and then they get converted to numpy objects. We specify the path, it is read as pandas
#and then converted to numpy"""
File=input("Step1/4: Hi! please specify the path for uploading the Providers file: ")
while True:
try:
File=pd.read_csv(File,header=0)
break
except FileNotFoundError:
print("")
File=input("File not found, please try again")
File=File.to_numpy()
for x in range(0,File.shape[0]):
Providers.append(provider_model.Provider(File[x][0],File[x][1],File[x][2],File[x][3],File[x][4]))
print("")
print("Great! "+str(File.shape[0])+" registers of providers were created")
#"""STEP 2: Specify the path and read SCORING TABLE file into pandas and then numpy. The data is stored in a numpy array"""
print("")
Scoring=input("Step2/4: Ok, now please specify the path for uploading the Scoring Policy file: ")
print("")
while True:
try:
Scoring = pd.read_csv(Scoring, header=0)
break
except FileNotFoundError:
print("")
Scoring = input("File not found, please try again")
Scoring=Scoring.to_numpy()
print("Great!"+str(Scoring.shape[0])+" Scoring Policies were created")
#"""STEP 3: Specify the path and read PURCHASE_PROCESS file into pandas and then numpy"""
print("")
Purchase_Request=input("Step3/4: Ok, now please specify the path for uploading the Purchase Processes file: ")
print("")
while True:
try:
Purchase_Request = pd.read_csv(Purchase_Request, header=0)
break
except FileNotFoundError:
print("")
Purchase_Request = input("File not found, please try again")
Purchase_Request=Purchase_Request.to_numpy()
for x in range(0,Purchase_Request.shape[0]):
Purchase_Items.append(pp_model.Purchase_Process(Purchase_Request[x][0],Purchase_Request[x][1],Purchase_Request[x][2]))
print("Great! "+str(Purchase_Request.shape[0])+" purchase processes were created")
#"""STEP 4: Specify the path and read Quotations file into pandas and then numpy"""
print("")
Quotation_file=input("Step4/4: Finally, please specify the path for uploading the Quotations file: ")
print("")
while True:
try:
Quotation_file = pd.read_csv(Quotation_file, header=0)
break
except FileNotFoundError:
print("")
Quotation_file = input("File not found, please try again")
Quotation_file=Quotation_file.to_numpy()
for x in range(0,Quotation_file.shape[0]):
Quotations.append(quotation_model.Quotation(Quotation_file[x][0],Quotation_file[x][1],Quotation_file[x][2],Quotation_file[x][3],Quotation_file[x][4],Quotation_file[x][5]))
print("Great! "+str(Quotation_file.shape[0])+" quotations were uploaded to be scored")
print("")
#"""We assign to each quotation its respective score obtained from SCORING, and the pp parameter is the purchase process.
#This is the scoring process by price, delivery_time, local_company, Quality certification and if it is a registered
#provider."""
Execute(Providers,Purchase_Items,Quotations,Scoring)
Generate(Purchase_Items,Quotations,Providers)
```
#### File: ASW/models/purchase_process.py
```python
class Purchase_Process:
#"""Purchase_Process class will describe the objects of items needed to buy for
#the company (purchase process), that will be read from a .csv file with
#information as ProcessID, ProdName, Qty, DelTime, Loc."""
def __init__(self,Purchase_Process,Product_Name,Quantity_Required):
self.Purchase_Process=Purchase_Process
self.Product_Name=Product_Name
self.Quantity_Required=Quantity_Required
#self.Delivery_Time_Required
#self.Location_Required
def Show_data(self):
print("Purchase_Process "+str(self.Purchase_Process))
print("Product_Name "+str(self.Product_Name))
print("Quantity_Required "+str(self.Quantity_Required))
#print(self.Delivery_Time_Required)
#print(self.Location_Required)
```
|
{
"source": "jeffsawatzky/python-jsonapi",
"score": 3
}
|
#### File: core/types/links.py
```python
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from uri import URI
from python_jsonapi.core.types import Mixable
from python_jsonapi.core.types import Mixin
from python_jsonapi.core.types.meta import MetaMixin
class Link(Mixable, MetaMixin):
"""Class to represent a JSON:API link object.
Attributes:
href (URI): The uri for the link
rel (List[str], optional): The link relation. Defaults to None.
"""
def __init__(
self, *, href: URI, rel: Optional[List[str]] = None, **kwargs: Any
) -> None:
"""Initializes a link.
Args:
href: The uri for the link
rel: The link relation. Defaults to None.
kwargs: Extra kwargs to pass along.
"""
self.href = href
self.rel = rel
super().__init__(**kwargs)
class LinksMixin(Mixin):
"""Mixin to add to all types that support a JSON:API links object.
Attributes:
links (Dict[str, Link], optional):
The link object.
"""
def __init__(
self, *, links: Optional[Dict[str, Link]] = None, **kwargs: Any
) -> None:
"""Initializes the links mixin.
Args:
links: The links object. Defaults to None.
kwargs: Extra kwargs to pass along.
"""
self.links = links
super().__init__(**kwargs)
def add_link(self, *, key: str, link: Link) -> None:
"""Adds a new link to the links object.
Args:
key: They key to use in the links object dictionary.
link: The link to add.
"""
if not self.links:
self.links = {}
self.links[key] = link
```
#### File: core/types/resource.py
```python
from typing import Any
from typing import Optional
from typing import TypedDict
from python_jsonapi.core.types import Mixable
from python_jsonapi.core.types.links import LinksMixin
from python_jsonapi.core.types.meta import MetaMixin
from python_jsonapi.core.types.relationships import RelationshipsMixin
from python_jsonapi.core.types.resource_identifier import ResourceIdentifiableMixin
class Resource(
Mixable, ResourceIdentifiableMixin, LinksMixin, RelationshipsMixin, MetaMixin
):
"""A class for a JSON:API resource object.
Attributes:
attributes (Attributes, optional):
The attributes for the resource. Defaults to None.
"""
class Attributes(TypedDict):
"""Class to represent a JSON:API resource attributes object entry."""
pass
def __init__(self, attributes: Optional[Attributes] = None, **kwargs: Any) -> None:
"""Initializes the resource identifer object.
Args:
attributes: The attributes for the resource. Defaults to None.
kwargs: Extra kwargs to pass along.
"""
self.attributes = attributes
super().__init__(**kwargs)
```
#### File: core/types/test_relationships.py
```python
from python_jsonapi.core.types.relationships import Relationship
from python_jsonapi.core.types.relationships import RelationshipsMixin
def test_relationship_init() -> None:
"""Can init a new relationships."""
sut = Relationship()
assert sut is not None
def test_mixin_init() -> None:
"""Can init a new mixin."""
sut = RelationshipsMixin()
assert sut is not None
relationship = Relationship()
sut = RelationshipsMixin(relationships={"self": relationship})
assert sut is not None
assert sut.relationships is not None
assert sut.relationships["self"] == relationship
def test_mixin_add_relationship() -> None:
"""Can add a new entry."""
sut = RelationshipsMixin()
sut.add_relationship(key="relationship1", relationship=Relationship())
sut.add_relationship(key="relationship2", relationship=Relationship())
assert sut.relationships is not None
assert sut.relationships["relationship1"] is not None
assert sut.relationships["relationship2"] is not None
```
|
{
"source": "jeffschriber/QCEngine",
"score": 2
}
|
#### File: qcengine/programs/qcore.py
```python
from typing import Any, Dict, List, Set, TYPE_CHECKING
import numpy as np
from qcelemental.models import AtomicResult, BasisSet
from qcelemental.util import parse_version, safe_version, which_import
from ..exceptions import InputError, UnknownError
from .model import ProgramHarness
from .util import (
cca_ao_order_spherical,
get_ao_conversion,
reorder_column_ao_indices,
reorder_row_and_column_ao_indices,
)
if TYPE_CHECKING:
from qcelemental.models import AtomicInput
from ..config import TaskConfig
def qcore_ao_order_spherical(max_angular_momentum: int) -> Dict[int, List[int]]:
ao_order = {}
for ang_mom in range(max_angular_momentum):
ao_order[ang_mom] = [x for x in range(ang_mom, -1 * ang_mom - 1, -1)]
return ao_order
class QcoreHarness(ProgramHarness):
_defaults: Dict[str, Any] = {
"name": "Qcore",
"scratch": True,
"thread_safe": False,
"thread_parallel": True,
"node_parallel": False,
"managed_memory": True,
}
version_cache: Dict[str, str] = {}
# List of DFT functionals
_dft_functionals: Set[str] = {
"SLATER",
"DIRAC",
"SLATERD3",
"DIRACD3",
"VWN5",
"VWN",
"VWN1",
"SVWN",
"LDA",
"BLYP",
"BPW91",
"BLYPD3",
"B88",
"PBEX",
"PBERX",
"PBEC",
"LYP",
"PW91",
"P86",
"PBE",
"PBER",
"PBED3",
"PBERD3",
"B3LYP3",
"B3LYP",
"B3LYP5",
"PBE0",
"PBE1PBE",
"B3LYP3D3",
"B3LYPD3",
"B3LYP5D3",
"PBE0D3",
"PBE1PBED3",
"CAMB3LYP",
"WB97X",
"CAMB3LYPD3",
"WB97XD3",
}
_xtb_models: Set[str] = {"GFN1", "GFN0"}
# This map order converts qcore ordering to CCA ordering
# Entos spherical basis ordering for each angular momentum. Follows reverse order of CCA.
_qcore_to_cca_ao_order = {"spherical": get_ao_conversion(cca_ao_order_spherical(10), qcore_ao_order_spherical(10))}
class Config(ProgramHarness.Config):
pass
def found(self, raise_error: bool = False) -> bool:
return which_import(
"qcore",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install py-qcore -c entos -c conda-forge`.",
)
def get_version(self) -> str:
self.found(raise_error=True)
which_prog = which_import("qcore")
if which_prog not in self.version_cache:
import qcore
self.version_cache[which_prog] = safe_version(qcore.__version__)
return self.version_cache[which_prog]
def compute(self, input_data: "AtomicInput", config: "TaskConfig") -> "AtomicResult":
"""
Run qcore
"""
# Check if qcore executable is found
self.found(raise_error=True)
# Check qcore version
if parse_version(self.get_version()) < parse_version("0.8.9"):
raise TypeError(f"qcore version {self.get_version()} not supported")
import qcore
method = input_data.model.method.upper()
if method in self._dft_functionals:
method = {"kind": "dft", "xc": method, "ao": input_data.model.basis}
elif method == "HF":
method = {"kind": "hf", "ao": input_data.model.basis}
elif method in self._xtb_models:
method = {"kind": "xtb", "model": method}
else:
raise InputError(f"Method is not valid: {method}")
method["details"] = input_data.keywords
qcore_input = {
# "schema_name": "single_input",
"molecule": {
"geometry": input_data.molecule.geometry,
"atomic_numbers": input_data.molecule.atomic_numbers,
"charge": input_data.molecule.molecular_charge,
"multiplicity": input_data.molecule.molecular_multiplicity,
},
"method": method,
"result_contract": {"wavefunction": "all"},
"result_type": input_data.driver,
}
try:
result = qcore.run(qcore_input, ncores=config.ncores)
except Exception as exc:
return UnknownError(str(exc))
return self.parse_output(result.dict(), input_data)
def parse_output(self, output: Dict[str, Any], input_model: "AtomicInput") -> "AtomicResult":
wavefunction_map = {
"orbitals_alpha": "scf_orbitals_a",
"orbitals_beta": "scf_orbitals_b",
"density_alpha": "scf_density_a",
"density_beta": "scf_density_b",
"fock_alpha": "scf_fock_a",
"fock_beta": "scf_fock_b",
"eigenvalues_alpha": "scf_eigenvalues_a",
"eigenvalues_beta": "scf_eigenvalues_b",
"occupations_alpha": "scf_occupations_a",
"occupations_beta": "scf_occupations_b",
}
output_data = input_model.dict()
output_data["return_result"] = output[input_model.driver.value]
# Always build a wavefunction, it will be stripped
obas = output["wavefunction"]["ao_basis"]
for k, center in obas["center_data"].items():
# Convert basis set, cannot handle arrays
for shell in center["electron_shells"]:
shell.pop("normalized_primitives", None)
for el_k in ["coefficients", "exponents", "angular_momentum"]:
shell[el_k] = shell[el_k].tolist()
if center["ecp_potentials"] is not None:
for shell in center["ecp_potentials"]:
shell.pop("ecp_potentials", None)
for ecp_k in ["angular_momentum", "r_exponents", "gaussian_exponents", "coefficients"]:
shell[ecp_k] = shell[ecp_k].tolist()
basis_set = BasisSet(
name=str(input_model.model.basis), center_data=obas["center_data"], atom_map=obas["atom_map"]
)
wavefunction = {"basis": basis_set}
for key, qcschema_key in wavefunction_map.items():
qcore_data = output["wavefunction"].get(key, None)
if qcore_data is None:
continue
if ("density" in key) or ("fock" in key):
qcore_data = reorder_row_and_column_ao_indices(qcore_data, basis_set, self._qcore_to_cca_ao_order)
# Handles orbitals and 1D
elif "orbitals" in key:
qcore_data = reorder_column_ao_indices(qcore_data, basis_set, self._qcore_to_cca_ao_order)
elif "eigenvalues" in key:
qcore_data = reorder_column_ao_indices(
qcore_data.reshape(1, -1), basis_set, self._qcore_to_cca_ao_order
).ravel()
elif "occupations" in key:
tmp = np.zeros(basis_set.nbf)
tmp[: qcore_data.shape[0]] = qcore_data
qcore_data = reorder_column_ao_indices(
tmp.reshape(1, -1), basis_set, self._qcore_to_cca_ao_order
).ravel()
else:
raise KeyError("Wavefunction conversion key not understood")
wavefunction[qcschema_key] = qcore_data
wavefunction["restricted"] = True
if "scf_eigenvalues_b" in wavefunction:
wavefunction["restricted"] = False
output_data["wavefunction"] = wavefunction
# Handle remaining top level keys
properties = {
"calcinfo_nbasis": basis_set.nbf,
"calcinfo_nmo": basis_set.nbf,
"calcinfo_nalpha": np.sum(wavefunction["scf_occupations_a"] > 0),
"calcinfo_natom": input_model.molecule.symbols.shape[0],
"return_energy": output["energy"],
}
if wavefunction["restricted"]:
properties["calcinfo_nbeta"] = properties["calcinfo_nalpha"]
else:
properties["calcinfo_nbeta"] = np.sum(wavefunction["scf_occupations_b"] > 0)
output_data["properties"] = properties
output_data["schema_name"] = "qcschema_output"
output_data["success"] = True
return AtomicResult(**output_data)
class EntosHarness(QcoreHarness):
_defaults: Dict[str, Any] = {
"name": "Entos",
"scratch": True,
"thread_safe": False,
"thread_parallel": True,
"node_parallel": False,
"managed_memory": True,
}
```
|
{
"source": "jeffschulthies/netbox-servicenow",
"score": 2
}
|
#### File: service_now_cmdb/models/cmdb.py
```python
import json
import requests
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from requests import TooManyRedirects, HTTPError, ConnectionError, Timeout
from config import settings
class CMDBObjectType(models.Model):
"""
The type of object you want to model from ServiceNow.
"""
name = models.CharField(max_length=255, unique=False, blank=False)
endpoint = models.CharField(max_length=255, unique=False, blank=False)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
def __str__(self):
return "{}:{}".format(self.id, self.name)
class CMDBObjectField(models.Model):
"""
The fields that object contains in ServiceNow.
"""
name = models.CharField(max_length=255, unique=False, blank=False)
type = models.ForeignKey('CMDBObjectType', on_delete=models.CASCADE, blank=False)
order = models.PositiveIntegerField(blank=True)
def __str__(self):
return "{}:{}:{}".format(self.id, self.name, self.type)
def clean(self):
if CMDBObjectField.objects.filter(name=self.name, type=self.type).exists():
raise ValidationError("There already exists a field '{}' associated with this object type '{}'.".format(self.name, self.type.name))
def save(self, *args, **kwargs):
self.clean()
super(CMDBObjectField, self).save(*args, **kwargs)
class CMDBObject(models.Model):
"""
The object you want to model 1:1 to your ServiceNow CMDB object.
"""
type = models.ForeignKey('CMDBObjectType', on_delete=models.CASCADE, blank=False)
service_now_id = models.CharField(max_length=255)
object_id = models.PositiveIntegerField()
def __str__(self):
return "{}:{}:{}".format(self.id, self.type.name, self.service_now_id)
def save(self, *args, **kwargs):
super(CMDBObject, self).save(*args, **kwargs)
@property
def fields(self):
"""
:return: QuerySet
"""
values = CMDBObjectValue.objects.filter(object=self).values_list('field_id')
field_names = CMDBObjectField.objects.filter(pk__in=values).values_list('name', flat=True)
return field_names
@property
def key_value(self):
"""
:return: Dictionary
"""
values = CMDBObjectValue.objects.filter(object=self).values()
d = dict()
for i in values:
object_field = CMDBObjectField.objects.get(id=i['field_id'])
field_name = object_field.name
d[field_name] = i['value']
return d
def post(self, access_token):
"""
:param access_token:
:return:
"""
service_now_headers = {
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': "application/json"
}
try:
r = requests.post(
url="https://{}.service-now.com/api/now/table/{}".format(settings.SERVICE_NOW_DOMAIN, self.type.endpoint),
headers=service_now_headers,
data=json.dumps(self.key_value)
)
except (ConnectionError, Timeout, HTTPError, TooManyRedirects) as e:
raise ValueError("Invalid Endpoint. Error: {}".format(e))
if r.status_code == 401:
raise ValueError("Bad Access Token")
if r.status_code != 201:
# Invalid Input
return False
resp = json.loads(r.text)
self.service_now_id = resp['result']['sys_id']
return True
def put(self, access_token):
"""
:param access_token:
:return:
"""
if not self.service_now_id:
raise ValueError("There is no ServiceNow ID associated with this object. Try creating the object first.")
service_now_headers = {
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': "application/json"
}
try:
r = requests.put(
url="https://{}.service-now.com/api/now/table/{}/{}".format(settings.SERVICE_NOW_DOMAIN, self.type.endpoint, str(self.service_now_id)),
headers=service_now_headers,
data=json.dumps(self.key_value)
)
except (ConnectionError, Timeout, HTTPError, TooManyRedirects) as e:
raise ValueError("Invalid Endpoint. Error: {}".format(e))
if r.status_code == 401:
raise ValueError("Bad Access Token")
if r.status_code != 200:
return False
resp = json.loads(r.text)
self.service_now_id = resp['result']['sys_id']
return True
def get(self, access_token):
"""
:param access_token:
:return:
"""
if not self.service_now_id:
raise ValueError("There is no ServiceNow ID associated with this object. Try creating the object first.")
service_now_headers = {
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': "application/json"
}
try:
r = requests.get(
url="https://{}.service-now.com/api/now/table/{}/{}".format(settings.SERVICE_NOW_DOMAIN, self.type.endpoint, str(self.service_now_id)),
headers=service_now_headers
)
except (ConnectionError, Timeout, HTTPError, TooManyRedirects) as e:
raise ValueError("Invalid Endpoint. Error: {}".format(e))
if r.status_code == 401:
raise ValueError("Bad Access Token")
if r.status_code != 200:
return False
return r.text
def get_field(self, name):
"""
:param name:
:return:
"""
values = CMDBObjectValue.objects.filter(object=self)
for i in values:
if i.field.name == name:
return i
return None
def set_field(self, name, value):
"""
:param name: field name
:param value:
:return:
"""
object_value = CMDBObjectValue.objects.create(object=self, field=name, value=value)
object_value.save()
class CMDBObjectValue(models.Model):
"""
The values of the object. The values are limited the the fields of the object type.
"""
object = models.ForeignKey('CMDBObject', on_delete=models.CASCADE, blank=False)
field = models.ForeignKey('CMDBObjectField', on_delete=models.CASCADE, blank=False)
value = models.CharField(max_length=255, unique=False)
def __str__(self):
return "{}:{}:{}".format(self.object.id, self.field, self.value)
def save(self, *args, **kwargs):
super(CMDBObjectValue, self).save(*args, **kwargs)
@property
def object_field(self):
return CMDBObjectField.objects.get(id=self.field)
```
|
{
"source": "jeffscottlevine/mpc",
"score": 2
}
|
#### File: mpc/mpc/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
# def home(request):
# return HttpResponse("Hello, you are at the default home page.")
def home(request):
return render(request, 'polls/home.html')
```
|
{
"source": "jeffseif/cakebot",
"score": 2
}
|
#### File: cakebot/cakebot/logging.py
```python
import datetime
import logging
from cakebot import __program__
def setup(verbose):
FORMAT = '%(asctime)s:%(levelname)s:{program}:%(process)d %(message)s'.format(program=__program__)
if verbose == 0:
logging.basicConfig(format=FORMAT, level=logging.WARNING)
elif verbose == 1:
logging.basicConfig(format=FORMAT, level=logging.INFO)
elif verbose > 1:
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
def warning(message):
logging.getLogger(__name__).warning(message)
def info(message):
logging.getLogger(__name__).info(message)
def debug(message):
logging.getLogger(__name__).debug(message)
```
#### File: cakebot/cakebot/mods.py
```python
import cakebot.logging
from cakebot import KILL_SWITCH
from cakebot.bind import bind
ZERO_WIDTH_SPACE = '\u200b'
class DeathException(Exception):
pass
@bind('hear', '^{kill}$'.format(kill=KILL_SWITCH))
def die(self, conn, event, message, match):
warning = '[{nickname}] Aaaargh -- {reply} -- my only weakness!'.format(
nickname=conn.get_nickname(),
reply=match.group(0),
)
cakebot.logging.warning(warning)
raise DeathException(warning)
@bind('reply', '^echo (.*)')
def echo(self, conn, event, message, match):
reply = match.group(1)
self.send(conn, event, reply)
def pingless_nick(nick):
return nick[0] + ZERO_WIDTH_SPACE + nick[1:]
def forward(self, conn, event, message, match):
if event.target in self.listens:
prefix = '.'.join((
event.target,
pingless_nick(event.source.nick),
))
message = '{prefix}: `{message}`'.format(prefix=prefix, message=message)
for channel in self.forwards:
event.target = channel
self.send(conn, event, message)
```
#### File: cakebot/cakebot/swarm.py
```python
import concurrent.futures
import irc.bot
import irc.connection
import ssl
import cakebot.bot
import cakebot.config
import cakebot.logging
class Swarm:
bots = []
def __init__(self, config_path):
self.config = cakebot.config.Swarm.from_json_path(config_path)
server_kwargs = {
'server_list': [self.config.server],
}
if self.config.ssl:
server_kwargs['connect_factory'] = irc.connection.Factory(wrapper=ssl.wrap_socket)
for bot_config in self.config.bots:
kwargs = bot_config.to_dict()
kwargs.update(server_kwargs)
self.bots.append(cakebot.bot.Bot.from_dict(kwargs))
def start(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.bots)) as executor:
futures = executor.map(lambda bot: bot.start(), self.bots)
for future in concurrent.futures.as_completed(futures):
if future.exception() is not None:
exit(255)
```
|
{
"source": "jeffseif/colors",
"score": 3
}
|
#### File: colors/colors/colorizer.py
```python
def Colorizer(color, weight=1):
"""Function for bash-style color formatting."""
def inner(value):
return template.format(value)
template = '\033[{:d};{:d}m{{:s}}\033[0m'.format(weight, color)
return inner
```
|
{
"source": "jeffseif/lunations",
"score": 3
}
|
#### File: lunations/lunations/__main__.py
```python
import argparse
import datetime
from lunations import forecaster
from lunations import modeler
DEFAULT_HARMONIC_PEAKS = 2
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# Model
model_parser = subparsers.add_parser('model', help='Model lunations')
model_parser.add_argument('--path-to-csv-input', required=True, type=str)
model_parser.add_argument('--harmonic-peaks', default=DEFAULT_HARMONIC_PEAKS, required=False, type=int)
model_parser.add_argument('--path-to-json-output', required=True, type=str)
model_parser.set_defaults(func=modeler.pipeline)
# Forecast
forecast_parser = subparsers.add_parser('forecast', help='Lookup lunations')
forecast_parser.add_argument('--forecast-epoch-timestamp', default=datetime.datetime.now().timestamp(), type=float)
forecast_parser.set_defaults(func=forecaster.cli)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
```
#### File: lunations/lunations/validator.py
```python
import datetime
import numpy
def dt64_to_str(dt64):
if isinstance(dt64, numpy.datetime64):
dt64 = dt64.astype(float) / 1e9
return str(
datetime
.datetime
.utcfromtimestamp(dt64)
)
def _validate_iter(model, dt, x, y, name):
residual = y.copy() - numpy.array(list(map(model.predict, x)))
yield f'> Validation report: {name:s}'
yield ''
yield f'- Evaluating {len(residual):d} lunations'
yield f'- Between {dt64_to_str(dt.min()):s} and {dt64_to_str(dt.max()):s}'
yield f'- MAE: {numpy.abs(residual / 3600).mean():.1f}h'
yield f'- bias: {residual.mean():+.0f}s'
yield ''
def report(*args, **kwargs):
print('\n'.join(_validate_iter(*args, **kwargs)))
def _sample_iter(model, dt, x, y, index=120):
yield f'> Sampling lunation {x[index]:d}'
yield ''
yield f'- Predicted: {dt64_to_str(model.predict(x[index])):s}'
yield f'- Observed : {dt64_to_str(y[index]):s}'
yield ''
def sample(*args, **kwargs):
print('\n'.join(_sample_iter(*args, **kwargs)))
```
|
{
"source": "jeffseif/sudoku",
"score": 3
}
|
#### File: sudoku/tests/test_bit_board.py
```python
import random
import pytest
from sudoku.bitBoard import BitBoard
class TestBitBoard:
@pytest.fixture
def size(self):
return 9
@pytest.fixture
def iterator(self, size):
return range(size)
@pytest.fixture
def bit_false(self, iterator):
return [[[False for each in iterator] for each in iterator] for each in iterator]
@pytest.fixture
def bit_true(self, iterator):
return [[[True for each in iterator] for each in iterator] for each in iterator]
@pytest.fixture
def bit_random(self, iterator):
return [[[random.choice((True, False)) for each in iterator] for each in iterator] for each in iterator]
def test_bit_board_init(self, size, bit_true):
bitBoard = BitBoard(size)
assert bitBoard.bits == bit_true
def test_bit_board_init_false(self, size, bit_false):
bitBoard = BitBoard(size, bit_false)
assert bitBoard.bits is bit_false
def test_bit_board_len(self, size, bit_false):
bitBoard = BitBoard(size, bit_false)
assert len(bitBoard) == 0
def test_bit_board_zero_direct(self, size, bit_random):
bitBoard = BitBoard(size, bit_random)
ijk = next(iter(bitBoard.trues()))
assert bitBoard[ijk] is True
bitBoard.zeroDirect(ijk)
assert bitBoard[ijk] is True
index, jndex, kndex = ijk
assert sum(bitBoard[:, jndex, kndex]) == 1
assert sum(bitBoard[index, :, kndex]) == 1
assert sum(bitBoard[index, jndex, :]) == 1
def test_bit_board_zero_indirect(self, size, bit_random):
bitBoard = BitBoard(size, bit_random)
ijk = next(iter(bitBoard.trues()))
assert bitBoard[ijk] is True
bitBoard.zeroIndirect(ijk)
assert bitBoard[ijk] is False
```
|
{
"source": "jeffseif/switch",
"score": 2
}
|
#### File: switch/switch/main.py
```python
from switch import __author__
from switch import __description__
from switch import __version__
from switch import __year__
from switch.amazon import amazon
from switch.ifttt import maybe_load_configs
from switch.ifttt import end_to_end
from switch.logger import set_logging_verbosity
from switch.walmart import walmart
from switch.target import target
ZIPCODE = 94703
def main():
import argparse
__version__author__year__ = '{} | {} {}'.format(
__version__,
__author__,
__year__,
)
parser = argparse.ArgumentParser(
description=__description__,
epilog='Version {}'.format(__version__author__year__),
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(__version__author__year__),
)
subparsers = parser.add_subparsers()
# Parent
parent = argparse.ArgumentParser(add_help=False)
parent.add_argument(
'-b',
'--beyond-console',
action='store_true',
default=False,
help='Check products beyond just the console',
)
parent.add_argument(
'-c',
'--config-path',
default=None,
help='Path to IFTTT config json file',
)
parent.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='Increase output verbosity',
)
parent.add_argument(
'-z',
'--zipcode',
default=ZIPCODE,
type=int,
help='Zipcode for location searching (e.g., %(default)s)',
)
parents = (parent,)
# All
all_parser = subparsers.add_parser(
'all',
parents=parents,
help='Check all',
)
all_parser.set_defaults(funcs=(amazon, target, walmart))
# Amazon
amazon_parser = subparsers.add_parser(
'amazon',
parents=parents,
help='Check amazon',
)
amazon_parser.set_defaults(func=amazon)
# IFTTT test
ifttt_parser = subparsers.add_parser(
'ifttt',
parents=parents,
help='Test ifttt setup',
)
ifttt_parser.set_defaults(func=end_to_end)
# Target
target_parser = subparsers.add_parser(
'target',
parents=parents,
help='Check target',
)
target_parser.set_defaults(func=target)
# Walmart
walmart_parser = subparsers.add_parser(
'walmart',
parents=parents,
help='Check walmart',
)
walmart_parser.set_defaults(func=walmart)
args = parser.parse_args()
if hasattr(args, 'verbose'):
set_logging_verbosity(args.verbose)
if hasattr(args, 'config_path'):
maybe_load_configs(args)
if hasattr(args, 'func'):
args.func(args)
if hasattr(args, 'funcs'):
for func in args.funcs:
func(args)
if __name__ == '__main__':
import sys
sys.exit(main())
```
#### File: switch/switch/web_session.py
```python
from colors import BLUE
from colors import GREEN
from colors import RED
from colors import YELLOW
from colors import WHITE
from switch.ifttt import IFTTT
from switch.logger import Logger
FAILURE = RED('✗')
SUCCESS = GREEN('✓')
class WebSession(Logger):
def check_for_zipcode(self, args):
if not args.beyond_console and not self.is_console:
return
response = self.run_session_for_zipcode(args.zipcode, self.prompt)
results = list(self.check_response_for_product(response, self.product_id))
status = SUCCESS if results else FAILURE
print(' '.join((status, WHITE(self.product_description))))
for left, right in results:
print(YELLOW(left) + ': ' + BLUE(right))
IFTTT(self.product_description, ' '.join((left, self.__class__.__name__)), right)
```
|
{
"source": "jeffsf/flent-sqm",
"score": 2
}
|
#### File: flent-sqm/flentsqm/controller.py
```python
import math
import sys
from flentsqm.runs import RunCollector, FlentRun, execute_one_test
from flentsqm.router import Router
from flentsqm.naming import protect_for_filename
###
### TODO: Handle failed flent run (more than logging error?)
###
### TODO: Up pass not morking selected on first run?
# Data file written to EA8300_no_irqbalance_None_2019-09-13_0643/tcp_8down-2019-09-13T065656.371916.EA8300_no_irqbalance_None_SQM_1933_1933.flent.gz.
#
# Summary of tcp_8down test run from 2019-09-13 13:56:56.371916
# Title: 'EA8300_no_irqbalance_None_SQM_1933_1933'
#
# avg median # data pts
# Ping (ms) ICMP : 3.07 3.04 ms 349
# TCP download avg : 27.13 27.19 Mbits/s 301
# TCP download sum : 217.00 217.50 Mbits/s 301
# TCP download::1 : 27.11 27.16 Mbits/s 299
# TCP download::2 : 27.17 27.16 Mbits/s 299
# TCP download::3 : 27.12 27.17 Mbits/s 299
# TCP download::4 : 27.11 27.16 Mbits/s 299
# TCP download::5 : 27.12 27.16 Mbits/s 299
# TCP download::6 : 27.11 27.17 Mbits/s 299
# TCP download::7 : 27.11 27.16 Mbits/s 299
# TCP download::8 : 27.11 27.16 Mbits/s 299
#
# EA8300 no irqbalance None tcp_8down
# Total Ping CoV down up Target sigma
# 216.49 Mbps 3.09 ms 0.06 % 0.06 % 1841 Mbps 0.0151
# => 216.64 Mbps 3.09 ms 0.00 % 0.00 % 1841 Mbps 0.0000
# 216.14 Mbps 3.16 ms 0.01 % 0.01 % 2117 Mbps 0.0035
# 215.87 Mbps 3.13 ms 0.07 % 0.07 % 2117 Mbps 0.0200
# 217.75 Mbps 3.02 ms 0.02 % 0.02 % 1933 Mbps 0.0053
# 217.00 Mbps 3.04 ms 0.08 % 0.08 % 1933 Mbps 0.0207
#
# SQM: Stopping SQM on eth1
###
class PassController:
def _default_test_run_successful(self, run: FlentRun):
var_ok = (run.coefvar_both < 0.01) or (run.stddev_both < 0.02)
###
### TODO: Handle misbehaving SQM better?
###
sqm_fudge_factor = 0.7
if self.current_target:
if run.download and run.download < self.current_target * sqm_fudge_factor:
return False
if run.upload and run.upload < self.current_target * sqm_fudge_factor:
return False
if self.ping_limit:
return var_ok and run.ping < self.ping_limit
else:
return var_ok
def _default_test_run_no_progress(self, run: FlentRun):
return False
def _default_create_sqm_string(self, sqm):
if sqm and sqm >= self.sqm_fractional_threshold:
sqmf = f"{sqm}"
elif sqm:
sqmf = f"{sqm:0.1f}"
else:
sqmf = "None"
return sqmf
def _default_create_title(self):
sqmf = self.create_sqm_string(self.current_target)
return protect_for_filename(f"{self.device}_{self.tunnel}_SQM_{sqmf}_{sqmf}")
def _default_create_note(self):
sqmf = self.create_sqm_string(self.current_target)
return f"{self.device} {self.tunnel} {self.test} {sqmf}/{sqmf}"
def _default_prepare_sqm(self):
self.router.sqm_set_params(interface=self.iface, overhead=self.overhead)
self.router.sqm_enable(yes=(self.current_target is not None), print_output=True)
def _default_after_run(self, run):
print(run.flent_output_string)
def _default_dump(self):
print(self.collected.dump(with_output_filename=False))
if self.logname:
with open(self.logname, mode='w') as logfile:
print(self.collected.dump(), file=logfile)
def _default_before_continue(self):
self._default_dump()
def __init__(self, run_collector: RunCollector, router: Router, device, test, tunnel, destdir, logname):
self.collected = run_collector
self.router = router
self.device = device
self.test = test
self.tunnel = tunnel
self.destdir = destdir
self.logname = logname
self.host = None
self.iface = None
self.overhead = None
self.ping_limit = None
self.run_successful_test = self._default_test_run_successful
self.no_progress_test = self._default_test_run_no_progress
self.create_sqm_string = self._default_create_sqm_string
self.create_title = self._default_create_title
self.create_note = self._default_create_note
self.prepare_sqm = self._default_prepare_sqm
self.after_run = self._default_after_run
self.before_continue = self._default_before_continue
self.pass_started = False
self.pass_complete = False
self.sqm_fractional_threshold = 20
self.sqm_fractional_increment = 0.1
self.highest_successful_run = None
self.current_target = None
if self.tunnel is None or self.tunnel.lower() == "none":
self.tunnel = None
self.host = '10.0.0.2'
self.iface = router.run_cmd("uci get network.wan.ifname").stdout.decode('utf-8').strip()
if not self.iface:
print("No interface for wan returned. Exiting")
exit(1)
self.overhead = 22
elif self.tunnel.lower() == "wireguard":
self.tunnel = "WireGuard"
self.host = "172.16.0.2"
self.iface = "wg0"
self.overhead = 82
elif self.tunnel.lower() == "openvpn":
self.tunnel = "OpenVPN"
self.host = "172.16.1.2"
self.iface = "tun0"
self.overhead = 95
else:
print(f"Unrecognized tunnel: '{tunnel}'. Exiting")
exit(2)
def start(self):
raise NotImplementedError
class DownPassController(PassController):
def __init__(self, run_collector: RunCollector, router: Router, device, test, tunnel, destdir, logname,
factor=0.7, start_at=1024):
super().__init__(run_collector, router, device, test, tunnel, destdir, logname)
self.factor = factor
self.start_at = start_at
self.target_success_requires = 2
self.target_failure_requires = 1
self.sqm_lower_limit = 1
def start(self):
self.current_target = self.start_at
self.prepare_sqm()
done = False
found_success = False
count_failures = 0
count_successes = 0
while not done:
this_run = execute_one_test(router=self.router,
sqm=self.current_target,
dest_dir = self.destdir,
host = self.host,
test = self.test,
title = self.create_title(),
note = self.create_note(),
)
self.after_run(this_run)
run_successful = self.run_successful_test(this_run)
no_progress = self.no_progress_test(this_run)
cr = self.collected.add(this_run)
if run_successful:
count_successes += 1
cr.marked_good = True
else:
count_failures += 1
cr.marked_bad = True
# Typical condition where additional runs are required
if count_successes < self.target_success_requires and count_failures < self.target_failure_requires:
self.before_continue()
continue
# Success at this SQM target
if count_successes >= self.target_success_requires:
if not found_success:
found_success = True
cr.marked_selected = True
self.highest_successful_run = self.collected.runs[-1]
# Failure at this SQM target
if count_failures >= self.target_failure_requires:
pass # Well, not really, more below
if found_success:
done = True
self.before_continue()
continue
# Set up next target
if self.current_target > 20:
next_target = self.current_target * self.factor
else: # "Slow" devices failed to find a suitable target for RRUL
next_target = self.current_target * math.sqrt(self.factor)
if next_target < self.sqm_fractional_threshold:
increment = self.sqm_fractional_increment
else:
increment = 1
next_target = round(next_target/increment) * increment
if next_target == self.current_target:
next_target = self.current_target - increment
if next_target < self.sqm_lower_limit:
done = True
self.before_continue()
continue
self.current_target = next_target
count_failures = 0
count_successes = 0
self.before_continue()
continue
return self.highest_successful_run
###
### TODO: What about the non-improvement case?
### tcp_8down, None, EA8300
class UpPassController(PassController):
def __init__(self, run_collector: RunCollector, router: Router, device, test, tunnel, destdir, logname,
factor, start_at):
super().__init__(run_collector, router, device, test, tunnel, destdir, logname)
self.factor = factor
self.start_at = start_at
self.target_success_requires = 2
self.target_failure_requires = 2
self.sqm_upper_limit = 2000
def start(self):
self.current_target = self.start_at
self.prepare_sqm()
done = False
found_success = False
count_failures = 0
count_successes = 0
while not done:
this_run = execute_one_test(router=self.router,
sqm=self.current_target,
dest_dir = self.destdir,
host = self.host,
test = self.test,
title = self.create_title(),
note = self.create_note(),
)
self.after_run(this_run)
run_successful = self.run_successful_test(this_run)
no_progress = self.no_progress_test(this_run)
cr = self.collected.add(this_run)
if run_successful:
count_successes += 1
cr.marked_good = True
else:
count_failures += 1
cr.marked_bad = True
# Typical condition where additional runs are required
if count_successes < self.target_success_requires and count_failures < self.target_failure_requires:
self.before_continue()
continue
# Success at this SQM target
if count_successes >= self.target_success_requires:
self.highest_successful_run = self.collected.runs[-1]
# Failure at this SQM target
if count_failures >= self.target_failure_requires:
if self.highest_successful_run:
self.highest_successful_run.marked_selected = True
done = True
self.before_continue()
continue
else:
# Need to back off and try again
self.current_target = self.current_target / (self.factor**5) # **4 would be 3 steps back
# Set up next target
next_target = self.current_target * self.factor
if next_target < self.sqm_fractional_threshold:
increment = self.sqm_fractional_increment
else:
increment = 1
next_target = round(next_target/increment) * increment
if next_target == self.current_target:
next_target = self.current_target + increment
if next_target > self.sqm_upper_limit:
done = True
self.before_continue()
continue
self.current_target = next_target
count_failures = 0
count_successes = 0
self.before_continue()
continue
return self.highest_successful_run
class MultiRunController(PassController):
def __init__(self, run_collector: RunCollector, router: Router, device, test, tunnel, destdir, logname,
runs=5, sqm=None):
super().__init__(run_collector, router, device, test, tunnel, destdir, logname)
self.runs = runs
self.sqm = sqm
if (int(runs) != runs) or (runs % 2 != 1):
new_runs = int(runs)
if (runs % 2 != 1):
runs += 1
print(f"runs={runs} not an odd integer. Changed to {new_runs}.", file=sys.stderr)
def start(self):
self.current_target = self.sqm
self.prepare_sqm()
for run in range(self.runs):
this_run = execute_one_test(router=self.router,
sqm=self.current_target,
dest_dir = self.destdir,
host = self.host,
test = self.test,
title = self.create_title(),
note = self.create_note(),
)
self.after_run(this_run)
cr = self.collected.add(this_run)
median_index = int(self.runs / 2)
by_totals = lambda run: run.totals
sorted_runs = sorted(self.collected.runs, key=by_totals)
sorted_runs[median_index].marked_selected = True
print(self.collected.dump(sort=by_totals, with_output_filename=False))
if self.logname:
with open(self.logname, mode='w') as logfile:
print(self.collected.dump(sort=by_totals), file=logfile)
return sorted_runs[median_index]
```
#### File: flent-sqm/flentsqm/router.py
```python
import subprocess
class Router:
def __init__(self, ip="192.168.1.1", user="root"):
self._ip = ip
self._user = user
def run_cmd(self, cmd, print_output=False):
if isinstance(cmd, str):
cmd = cmd.split()
sp = subprocess.run(["ssh", f"{self._user}@{self._ip}"] + cmd, capture_output=True)
if sp.stderr:
print(sp.stderr.decode('utf-8'))
if print_output:
print(sp.stdout.decode('utf-8'))
return sp
def sqm_show(self, print_output=True):
self.run_cmd("uci show sqm", print_output)
def sqm_restart(self, print_output=False):
self.run_cmd("/etc/init.d/sqm restart", print_output)
def sqm_enable(self, yes=True, print_output=False):
if yes:
state = 1
else:
state = 0
self.run_cmd(f"uci set sqm.test.enabled='{state}'")
self.run_cmd("uci commit")
self.sqm_restart(print_output)
def sqm_set_params(self, interface, overhead):
self.run_cmd(f"uci set sqm.test.interface='{interface}'")
self.run_cmd(f"uci set sqm.test.overhead='{overhead}'")
self.run_cmd("uci commit")
def sqm_set_targets(self, download, upload):
self.run_cmd(f"uci set sqm.test.download='{download}'")
self.run_cmd(f"uci set sqm.test.upload='{upload}'")
self.run_cmd("uci commit")
```
#### File: jeffsf/flent-sqm/test_sqm_range.py
```python
import os
import sys
import time
from flentsqm.controller import DownPassController
from flentsqm.naming import protect_for_filename
from flentsqm.runs import RunCollector
from flentsqm.router import Router
try:
device = sys.argv[1]
except IndexError:
print(f"Usage: {sys.argv[0]} 'Device Name'")
exit(1)
tunnels = ["WireGuard"]
tests = ["tcp_8down"]
router = Router()
nowstr = time.strftime("%Y-%m-%d_%H%M")
basedir = protect_for_filename(f"{device}_{nowstr}")
try:
os.mkdir(basedir)
except FileExistsError as e:
print(f"Output directory '{basedir}' already exists. Wait a minute and try again.")
exit(1)
class DownScan(DownPassController):
def always_fail(self, run):
return self.current_target < 10
def __init__(self, run_collector: RunCollector, router: Router, device, test, tunnel, destdir, logname,
factor=0.7, start_at=380):
super().__init__(run_collector, router, device, test, tunnel, destdir, logname, factor, start_at)
self.run_successful_test = self.always_fail
self.target_failure_requires = 1
self.target_success_requires = 1
for tunnel in tunnels:
destdir = os.path.join(basedir,
protect_for_filename(tunnel or "None"))
try:
os.mkdir(destdir)
except FileExistsError as e:
pass
for test in tests:
rc_sqm = RunCollector(device=device, tunnel=tunnel, test=test)
down_pass = DownScan(run_collector=rc_sqm,
router=router, device=device, test=test, tunnel=tunnel,
destdir=destdir, logname=f"{destdir}/{test}_sqm.log")
down_pass.start()
```
|
{
"source": "jeffsgrover/twitter-bot-python-heroku",
"score": 3
}
|
#### File: jeffsgrover/twitter-bot-python-heroku/twitter_bot_original.py
```python
import json
import random
import time
import sys
import tweepy
import credentials
import urllib.request
from os import environ
import gc
CONSUMER_KEY = credentials.CONSUMER_KEY
CONSUMER_SECRET = credentials.CONSUMER_SECRET
ACCESS_KEY = credentials.ACCESS_KEY
ACCESS_SECRET = credentials.ACCESS_KEY_SECRET
# CONSUMER_KEY = environ['CONSUMER_KEY']
# CONSUMER_SECRET = environ['CONSUMER_SECRET']
# ACCESS_KEY = environ['ACCESS_KEY']
# ACCESS_SECRET = environ['ACCESS_SECRET']
def get_quotes():
with open('quotes.json') as f:
quotes_json = json.load(f)
return quotes_json['quotes']
def get_random_quote():
quotes = get_quotes()
random_quote = random.choice(quotes)
return random_quote
def create_quote():
quote = get_random_quote()
quote = """
{}
~{}
""".format(quote['quote'], quote['author'])
return quote
def get_weather():
req = urllib.request.Request(url=f'https://api.openweathermap.org/data/2.5/weather?q=Atlanta&units=imperial&appid='+FORECAST_APIKEY)
with urllib.request.urlopen(req) as resp:
data = json.loads(resp.read().decode("utf-8"))
gc.collect()
return data
def create_tweet():
data=get_weather()
temperature = str(int(round(data['main']['temp'])))
degree_sign = u'\N{DEGREE SIGN}'
description = data['weather'][0]['description']
#description = data['current']['weather'][0]['description']
tweet = "Rise Up ATL Runners! It's currently " + temperature + degree_sign + "F and " + str(description) +". Time for a run!" + create_quote()+"\n #morningmotivation #running #atlanta #atlantatrackclub"
if len(tweet) > 280:
tweet = "Rise Up ATL Runners! It's currently " + temperature + degree_sign + "F and " + str(description)+". Time for a run! \n #morningmotivation #running #atlanta #atlantatrackclub"
return tweet
def tweet_quote():
#interval = 60 * 60 * 12 # tweet every 12 hours
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
tweet = create_tweet()
api.update_status(tweet)
#while True:
# print('getting a random quote...')
# tweet = create_tweet()
# api.update_status(tweet)
#time.sleep(interval)
if __name__ == "__main__":
tweet_quote()
```
#### File: jeffsgrover/twitter-bot-python-heroku/twitter_bot.py
```python
import json, random, tweepy, credentials, sys
from os import environ
CONSUMER_KEY = environ['CONSUMER_KEY']
CONSUMER_SECRET = environ['CONSUMER_SECRET']
ACCESS_KEY = environ['ACCESS_KEY']
ACCESS_SECRET = environ['ACCESS_SECRET']
def get_headlines():
with open('headlines.json') as f:
headlines = json.load(f)
return headlines['headlines']
def get_random_headline():
headlines = get_headlines()
random_headline = random.choice(headlines)
return random_headline['headline']
def post_headline():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
headline = get_random_headline()
api.update_status(headline)
if __name__ == "__main__":
post_headline()
```
|
{
"source": "jeffshek/betterself",
"score": 3
}
|
#### File: analytics/events/analytics.py
```python
from django.core.exceptions import ValidationError
from numpy import dtype
VALID_CORRELATION_METHODS = ['pearson', 'spearman', 'kendall']
ROLLABLE_COLUMN_TYPES = {dtype('float64'), dtype('int64')}
class DataFrameEventsAnalyzer(object):
"""
Takes a DataFrame and returns analytics on top of it.
"""
def __init__(self, dataframe, ignore_columns=None, rest_day_column=None):
# certain columns might be just notes or useless information that can be ignored
dataframe_cols = dataframe.columns
# maybe ignore is a bad name ... non-weighted columns? something that implies
# that it shouldn't be used in correlation analysis
if ignore_columns:
assert isinstance(ignore_columns, list)
self.ignore_columns = ignore_columns
self.valid_columns = [item for item in dataframe_cols if item not in ignore_columns]
else:
self.ignore_columns = []
self.valid_columns = dataframe_cols
# if it's a rest day, the correlations shouldn't be used. ie. if you're drinking caffeine on
# Sunday and wasn't intending to get any work done, then those days shouldn't be used to measure how effective
# caffeine is.
if rest_day_column:
assert isinstance(rest_day_column, str)
dataframe = dataframe[dataframe[rest_day_column] == False] # noqa
self.dataframe = dataframe
@classmethod
def add_yesterday_shifted_to_dataframe(cls, dataframe):
valid_columns = cls.get_rollable_columns(dataframe)
dataframe = dataframe.copy()
for col in valid_columns:
# Advil - Yesterday
shifted_col_name = '{0} - Yesterday'.format(col)
series = dataframe[col]
shifted_series = series.shift(1)
dataframe[shifted_col_name] = shifted_series
return dataframe
def _get_cleaned_dataframe_copy(self, measurement, add_yesterday_lag, method):
self._validate_correlation_method(method)
# copy lets me be certain each result doesn't mess up state
dataframe = self.dataframe.copy()
if add_yesterday_lag:
dataframe = self.add_yesterday_shifted_to_dataframe(dataframe)
if dataframe.empty:
dataframe = self._remove_invalid_measurement_days(dataframe, measurement)
return dataframe
@staticmethod
def _get_correlation_from_dataframe(dataframe, measurement, method, min_periods):
correlation_results = dataframe.corr(method, min_periods)[measurement]
correlation_results_sorted = correlation_results.sort_values(inplace=False)
return correlation_results_sorted
def get_correlation_for_measurement(self, measurement, add_yesterday_lag=False, method='pearson', min_periods=1):
"""
:param measurement: Measurement is the column name of what you're trying to improve / correlate
:param add_yesterday_lag: factor if you drank coffee yesterday
:param method: see pandas documentation
:param min_periods: see pandas documentation
:return: correlation series
"""
dataframe = self._get_cleaned_dataframe_copy(measurement, add_yesterday_lag=add_yesterday_lag, method=method)
correlation_results_sorted = self._get_correlation_from_dataframe(dataframe, measurement, method, min_periods)
return correlation_results_sorted
def get_correlation_across_summed_days_for_measurement(self, measurement, add_yesterday_lag=False, window=7,
method='pearson', min_periods=1):
dataframe = self._get_cleaned_dataframe_copy(measurement, add_yesterday_lag=add_yesterday_lag, method=method)
rolled_dataframe = self.get_rolled_dataframe(dataframe, window)
# update means any of the valid columns that could be updated ... are
dataframe.update(rolled_dataframe)
# don't care about entire rows that are NaN because they won't have a sum
dataframe = dataframe.dropna(how='all')
# for anything that isn't filled in, assume those are zeros, kind of have to because we're summing
dataframe = dataframe.fillna(0)
correlation_results_sorted = self._get_correlation_from_dataframe(dataframe, measurement, method, min_periods)
return correlation_results_sorted
@staticmethod
def get_rollable_columns(dataframe):
# not all dataframe columns are rollable ... the original source (excel) should already have them
# listed as minutes, so don't try to sum up Time objects
dataframe_col_types = dataframe.dtypes
dataframe_rollable_columns = [col_name for col_name, col_type in dataframe_col_types.items() if col_type in
ROLLABLE_COLUMN_TYPES]
return dataframe_rollable_columns
@classmethod
def get_rolled_dataframe(cls, dataframe, window, min_periods=None):
dataframe_rollable_columns = cls.get_rollable_columns(dataframe)
rollable_dataframe = dataframe[dataframe_rollable_columns]
# haven't figured out the right way to deal with min_periods
# the thinking is that this rolling function should not be as forgiving
# instead, maybe have the serializer replace NaN data with zeroes at that step
# since "unfilled data" frequently just means None
rolled_dataframe = rollable_dataframe.rolling(window=window, center=False).sum()
return rolled_dataframe
@staticmethod
def _remove_invalid_measurement_days(dataframe, measurement):
"""
if productivity is marked as zero, that shouldn't be used to measure effectiveness
since it's almost impossible to get a zero using RescueTime as a productive driver
this might not be the best measurement ... perhaps serializer should just fill with np.NaN
so when you get the data here you can make a more accurate decision to scrap
"""
valid_days = dataframe[measurement] != 0
dataframe = dataframe[valid_days]
return dataframe
@staticmethod
def _validate_correlation_method(method):
if not isinstance(method, str):
raise ValidationError('Correlation must be a string')
if method not in VALID_CORRELATION_METHODS:
raise ValidationError('Correlation must be one of {0} methods'.format(VALID_CORRELATION_METHODS))
@classmethod
def get_dataframe_event_count(cls, dataframe):
rollable_columns = cls.get_rollable_columns(dataframe)
event_count = {}
for col in rollable_columns:
series = dataframe[col]
series_count = series[series != 0].count()
event_count[col] = series_count
return event_count
```
#### File: v1/analytics/tests.py
```python
import math
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from apis.betterself.v1.constants import UNIQUE_KEY_CONSTANT
from apis.betterself.v1.signup.fixtures.builders import DemoHistoricalDataBuilder
from events.models import SupplementLog, SleepLog, DailyProductivityLog
from supplements.models import Supplement
User = get_user_model()
class BaseSupplementAnalyticsTests(TestCase):
@classmethod
def setUpAnalyticsData(cls):
cls.default_user, _ = User.objects.get_or_create(username='default')
builder = DemoHistoricalDataBuilder(cls.default_user)
builder.create_historical_fixtures()
supplement = Supplement.objects.filter(user=cls.default_user).first()
cls.supplement = supplement
def setUp(self):
self.client = APIClient()
self.client.force_login(self.default_user)
class BaseSupplementsAnalyticsTestCasesMixin(object):
def test_view_with_no_sleep_data(self):
SleepLog.objects.filter(user=self.default_user).delete()
response = self.client.get(self.url)
# make sure that no nans come across the data, should always be none
values_returned = [item['value'] for item in response.data]
for value in values_returned:
if isinstance(value, float):
self.assertFalse(math.isnan(value))
self.assertEqual(response.status_code, 200)
def test_view_with_no_productivity_data(self):
DailyProductivityLog.objects.filter(user=self.default_user).delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_view_with_no_supplement_data(self):
SupplementLog.objects.filter(user=self.default_user).delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class SupplementAnalyticsSummaryTests(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-summary', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
expected_keys = {'productivity_correlation',
'sleep_correlation',
'most_taken',
'most_taken_dates',
'creation_date'}
response_keys = set([item[UNIQUE_KEY_CONSTANT] for item in response.data])
self.assertEqual(expected_keys, response_keys)
first_event = SupplementLog.objects.filter(supplement=self.supplement).order_by('time').first()
for data in response.data:
if data[UNIQUE_KEY_CONSTANT] == 'creation_date':
self.assertEqual(first_event.time.isoformat(), data['value'])
class SupplementAnalyticsSleepTest(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-sleep', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class SupplementAnalyticsProductivityTest(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-productivity', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class SupplementDosagesAnalyticsTest(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-dosages', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
```
#### File: v1/analytics/views.py
```python
import datetime
import pandas as pd
import numpy as np
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from analytics.events.utils.dataframe_builders import SupplementEventsDataframeBuilder, SleepActivityDataframeBuilder, \
ProductivityLogEventsDataframeBuilder
from betterself.utils.api_utils import get_api_value_formatted
from constants import VERY_PRODUCTIVE_TIME_LABEL
from betterself.utils.date_utils import get_current_date_years_ago
from events.models import SupplementLog, SleepLog, DailyProductivityLog
from supplements.models import Supplement
class SupplementAnalyticsMixin(object):
@classmethod
def _get_analytics_dataframe(cls, user, supplement_uuid):
supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=user)
supplement_series = cls._get_daily_supplement_events_series_last_year(user, supplement)
sleep_series = cls._get_sleep_series_last_year(user)
productivity_series = cls._get_productivity_series_last_year(user)
# if either sleep or productivity are empty, create an empty series that is timezone
# aware (hence, matching the supplement index)
if sleep_series.empty:
sleep_series = pd.Series(index=supplement_series.index)
if productivity_series.empty:
productivity_series = pd.Series(index=supplement_series.index)
dataframe_details = {
'supplement': supplement_series,
'sleep': sleep_series,
'productivity': productivity_series
}
dataframe = pd.DataFrame(dataframe_details)
return dataframe
@staticmethod
def _get_daily_supplement_events_series_last_year(user, supplement):
# TODO - This may serve better as a supplement fetcher mixin
"""
:param user:
:param supplement:
:return: TimeSeries data of how many of that particular supplement was taken that day
"""
start_date = get_current_date_years_ago(1)
supplement_events = SupplementLog.objects.filter(user=user, supplement=supplement, time__date__gte=start_date)
builder = SupplementEventsDataframeBuilder(supplement_events)
try:
series = builder.get_flat_daily_dataframe()[supplement.name]
except KeyError:
# KeyError means it doesn't exist, so create an index that can be used for everything else
date_range_index = pd.date_range(start=start_date, end=datetime.date.today(), tz=user.pytz_timezone)
series = pd.Series(index=date_range_index)
return series
@staticmethod
def _get_sleep_series_last_year(user):
"""
:param user:
:return: Series data of how much sleep that person has gotten minutes
"""
start_date = get_current_date_years_ago(1)
sleep_events = SleepLog.objects.filter(user=user, start_time__date__gte=start_date)
builder = SleepActivityDataframeBuilder(sleep_events)
series = builder.get_sleep_history_series()
# anytime sleep is actually set at zero, the value should be NaN
series[series == 0] = np.NaN
return series
@staticmethod
def _get_productivity_series_last_year(user):
start_date = get_current_date_years_ago(1)
logs = DailyProductivityLog.objects.filter(user=user, date__gte=start_date)
builder = ProductivityLogEventsDataframeBuilder(logs)
try:
series = builder.get_flat_daily_dataframe()[VERY_PRODUCTIVE_TIME_LABEL]
except KeyError:
return pd.Series()
return series
class SupplementAnalyticsSummary(APIView, SupplementAnalyticsMixin):
def get(self, request, supplement_uuid):
dataframe = self._get_analytics_dataframe(request.user, supplement_uuid)
supplement_series = dataframe['supplement']
# i find a week is generally the best analysis to use for correlation, otherwise
# you have odd days like sunday when everyone is lazy and mondays when everyone is trying
# to do as much as possible interfering with correlations
dataframe_rolling_week = dataframe.rolling(window=7, min_periods=1).sum()
supplement_correlation_series = dataframe_rolling_week.corr()['supplement']
# TODO - What should happen if any of these results are null / none?
productivity_correlation_value = supplement_correlation_series['productivity']
sleep_correlation_value = supplement_correlation_series['sleep']
most_taken_value = supplement_series.max()
# there are multi possibilities that the most caffeine was ever drank
most_taken_dates = supplement_series[supplement_series == most_taken_value].index
most_taken_dates = [item.isoformat() for item in most_taken_dates]
# order by time because we don't really care about create time, rather the time the event is representing
supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=request.user)
try:
creation_date = SupplementLog.objects.filter(supplement=supplement).order_by('time').first().time. \
isoformat()
except AttributeError:
# no creation_date found
creation_date = None
results = [
get_api_value_formatted(
'productivity_correlation', productivity_correlation_value, 'Productivity Correlation'
),
get_api_value_formatted(
'sleep_correlation', sleep_correlation_value, 'Sleep Correlation'
),
get_api_value_formatted(
'most_taken', most_taken_value, 'Most Servings Taken (1 Day)'
),
get_api_value_formatted(
'most_taken_dates', most_taken_dates, 'Most Taken Dates', data_type='list-datetime'
),
get_api_value_formatted(
'creation_date', creation_date, 'Date of First Use', data_type='string-datetime'
),
]
return Response(results)
class SupplementSleepAnalytics(APIView, SupplementAnalyticsMixin):
def get(self, request, supplement_uuid):
dataframe = self._get_analytics_dataframe(request.user, supplement_uuid)
index_of_supplement_taken_at_least_once = dataframe['supplement'].dropna().index
dataframe_of_supplement_taken_at_least_once = dataframe.ix[index_of_supplement_taken_at_least_once]
supplement_series = dataframe_of_supplement_taken_at_least_once['supplement']
most_taken_value = supplement_series.max()
most_taken_dates = supplement_series[supplement_series == most_taken_value].index
most_taken_dataframe = dataframe_of_supplement_taken_at_least_once.ix[most_taken_dates]
results = []
most_taken_sleep_mean = most_taken_dataframe['sleep'].max()
most_taken_sleep_mean = get_api_value_formatted(
'most_taken_sleep_mean', most_taken_sleep_mean, 'Mean Time Slept ({} Servings)'.format(
most_taken_value))
results.append(most_taken_sleep_mean)
most_taken_sleep_median = most_taken_dataframe['sleep'].median()
most_taken_sleep_median = get_api_value_formatted(
'most_taken_sleep_median', most_taken_sleep_median, 'Median Time Slept ({} Servings)'.format(
most_taken_value))
results.append(most_taken_sleep_median)
dates_where_no_supplement_taken = dataframe['supplement'].isnull()
dataframe_of_no_supplement_taken = dataframe.ix[dates_where_no_supplement_taken]
median_sleep_taken_once = dataframe_of_supplement_taken_at_least_once['sleep'].median()
median_sleep_taken_once = get_api_value_formatted(
'median_sleep_taken_once', median_sleep_taken_once,
'Median Time Slept (Min 1 Serving)')
results.append(median_sleep_taken_once)
mean_sleep_taken_once = dataframe_of_supplement_taken_at_least_once['sleep'].mean()
mean_sleep_taken_once = get_api_value_formatted(
'mean_sleep_taken_once', mean_sleep_taken_once,
'Mean Time Slept (Min 1 Serving)')
results.append(mean_sleep_taken_once)
mean_sleep_no_supplement = dataframe_of_no_supplement_taken['sleep'].mean()
mean_sleep_no_supplement = get_api_value_formatted(
'mean_sleep_no_supplement', mean_sleep_no_supplement,
'Mean Time Slept (0 Servings)')
results.append(mean_sleep_no_supplement)
median_sleep_of_no_supplement = dataframe_of_no_supplement_taken['sleep'].median()
median_sleep_of_no_supplement = get_api_value_formatted(
'median_sleep_of_no_supplement', median_sleep_of_no_supplement,
'Median Time Slept (0 Servings)')
results.append(median_sleep_of_no_supplement)
return Response(results)
class SupplementProductivityAnalytics(APIView, SupplementAnalyticsMixin):
def get(self, request, supplement_uuid):
dataframe = self._get_analytics_dataframe(request.user, supplement_uuid)
index_of_supplement_taken_at_least_once = dataframe['supplement'].dropna().index
dataframe_of_supplement_taken_at_least_once = dataframe.ix[index_of_supplement_taken_at_least_once]
dates_where_no_supplement_taken = dataframe['supplement'].isnull()
dataframe_of_no_supplement_taken = dataframe.ix[dates_where_no_supplement_taken]
results = []
productivity_series_with_supplement = dataframe_of_supplement_taken_at_least_once['productivity']
productivity_series_without_supplement = dataframe_of_no_supplement_taken['productivity']
# no point
if productivity_series_with_supplement.dropna().empty:
return Response(results)
most_productive_time_with_supplement_raw = productivity_series_with_supplement.max()
most_productive_time_with_supplement = get_api_value_formatted(
'most_productive_time_with_supplement', most_productive_time_with_supplement_raw,
'Most Productive Time (Min 1 Serving)')
results.append(most_productive_time_with_supplement)
most_productive_date_with_supplement = productivity_series_with_supplement.idxmax()
most_productive_date_with_supplement = get_api_value_formatted(
'most_productive_date_with_supplement', most_productive_date_with_supplement,
'Most Productive Date', 'string-datetime')
results.append(most_productive_date_with_supplement)
least_productive_time_with_supplement = productivity_series_with_supplement.min()
least_productive_time_with_supplement = get_api_value_formatted(
'least_productive_time_with_supplement', least_productive_time_with_supplement,
'Least Productive Time (Min 1 Serving)')
results.append(least_productive_time_with_supplement)
least_productive_date_with_supplement = productivity_series_with_supplement.idxmin()
least_productive_date_with_supplement = get_api_value_formatted(
'least_productive_date_with_supplement', least_productive_date_with_supplement,
'Least Productive Date', 'string-datetime')
results.append(least_productive_date_with_supplement)
median_productive_time_with_supplement = productivity_series_with_supplement.median()
median_productive_time_with_supplement = get_api_value_formatted(
'median_productive_time_with_supplement', median_productive_time_with_supplement,
'Median Productive Time (Min 1 Serving)')
results.append(median_productive_time_with_supplement)
mean_productive_time_with_supplement = productivity_series_with_supplement.mean()
mean_productive_time_with_supplement = get_api_value_formatted(
'mean_productive_time_with_supplement', mean_productive_time_with_supplement,
'Mean Productive Time (Min 1 Serving)')
results.append(mean_productive_time_with_supplement)
median_productive_time_without_supplement = productivity_series_without_supplement.median()
median_productive_time_without_supplement = get_api_value_formatted(
'median_productive_time_without_supplement', median_productive_time_without_supplement,
'Median Productive Time (0 Servings)')
results.append(median_productive_time_without_supplement)
mean_productive_time_without_supplement = productivity_series_without_supplement.mean()
mean_productive_time_without_supplement = get_api_value_formatted(
'mean_productive_time_without_supplement', mean_productive_time_without_supplement,
'Mean Productive Time (0 Servings)')
results.append(mean_productive_time_without_supplement)
return Response(results)
class SupplementDosageAnalytics(APIView, SupplementAnalyticsMixin):
def get(self, request, supplement_uuid):
dataframe = self._get_analytics_dataframe(request.user, supplement_uuid)
index_of_supplement_taken_at_least_once = dataframe['supplement'].dropna().index
dataframe_of_supplement_taken_at_least_once = dataframe.ix[index_of_supplement_taken_at_least_once]
results = []
mean_serving_size_last_365_days = dataframe['supplement'].fillna(0).mean()
mean_serving_size_last_365_days = get_api_value_formatted(
'mean_serving_size_last_365_days', mean_serving_size_last_365_days,
'Mean Serving Size (All Days)')
results.append(mean_serving_size_last_365_days)
median_serving_size = dataframe_of_supplement_taken_at_least_once['supplement'].median()
median_serving_size = get_api_value_formatted(
'median_serving_size', median_serving_size,
'Median Serving Size (Min 1 Serving)')
results.append(median_serving_size)
mean_serving_size = dataframe_of_supplement_taken_at_least_once['supplement'].mean()
mean_serving_size = get_api_value_formatted(
'mean_serving_size', mean_serving_size,
'Mean Serving Size (Min 1 Serving)')
results.append(mean_serving_size)
return Response(results)
```
#### File: v1/correlations/tests.py
```python
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from apis.betterself.v1.signup.fixtures.builders import DemoHistoricalDataBuilder
from constants import SLEEP_MINUTES_COLUMN
from events.models import SupplementLog, DailyProductivityLog
from supplements.models import Supplement
User = get_user_model()
# python manage.py test apis.betterself.v1.correlations.tests
class BaseCorrelationsMixin(object):
def test_view_with_user_and_no_data(self):
user = User.objects.create(username='something-new')
client = APIClient()
client.force_authenticate(user)
response = client.get(self.url)
# using a standard response for no data, that way the logic for front and back
# end can be replicated and similar
self.assertEqual([], response.data)
self.assertEqual(response.status_code, 200)
class BaseCorrelationsTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.url = reverse(cls.url_namespace)
super().setUpClass()
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='demo')
builder = DemoHistoricalDataBuilder(cls.user)
builder.create_historical_fixtures()
super().setUpTestData()
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
super().setUp()
class ProductivitySupplementsCorrelationsTests(BaseCorrelationsTestCase, BaseCorrelationsMixin):
url_namespace = 'productivity-supplements-correlations'
def test_productivity_supplements_correlation_view_no_correlations(self):
DailyProductivityLog.objects.filter(user=self.user).delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_productivity_supplements_correlation_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# the correlation of the productivity driver (which will be the first result of the dataset) will be 1
self.assertEqual(response.data[0][1], 1)
def test_productivity_supplements_response_includes_correct_supplements(self):
response = self.client.get(self.url)
supplements_in_response = [item[0] for item in response.data]
# don't include the productivity driver since that's not a supplement
supplements_in_response = supplements_in_response[1:]
user_supplements_ids = SupplementLog.objects.filter(user=self.user).values_list('supplement_id', flat=True)
user_supplements_ids = set(user_supplements_ids)
# the user filter is just for safe keeping, shouldn't really be necessary
user_supplements = Supplement.objects.filter(
id__in=user_supplements_ids, user=self.user).values_list('name', flat=True)
self.assertCountEqual(supplements_in_response, user_supplements)
def test_productivity_supplements_correlation_view_with_correlation_lookback_parameters(self):
# test by seeing the original output without params
no_params_response = self.client.get(self.url)
no_params_data = no_params_response.data
# now modify the parameter to only back 7 days
params = {
'correlation_lookback': 7,
}
params_response = self.client.get(self.url, params)
params_data = params_response.data
default_params = {
'correlation_lookback': 60,
}
default_params_response = self.client.get(self.url, default_params)
default_params_data = default_params_response.data
self.assertNotEqual(no_params_data, params_data)
# if we pass parameters that are just the defaults, should be the same
self.assertEqual(no_params_data, default_params_data)
def test_productivity_supplements_correlation_view_with_cumulative_lookback_parameters(self):
no_params_response = self.client.get(self.url)
no_params_data = no_params_response.data
# now modify the parameter to only back 7 days
params = {
'cumulative_lookback': 7,
}
params_response = self.client.get(self.url, params)
params_data = params_response.data
default_params = {
'cumulative_lookback': 1,
}
default_params_response = self.client.get(self.url, default_params)
default_params_data = default_params_response.data
self.assertNotEqual(no_params_data, params_data)
# if we pass parameters that are just the defaults, should be the same
self.assertEqual(no_params_data, default_params_data)
def test_productivity_supplements_correlation_view_with_invalid_correlation_parameters(self):
params = {
'correlation_lookback': 'cheeseburger',
}
params_response = self.client.get(self.url, params)
self.assertEqual(params_response.status_code, 400)
def test_productivity_supplements_correlation_view_with_invalid_cumulative_parameters(self):
params = {
'cumulative_lookback': 'cheeseburger',
}
params_response = self.client.get(self.url, params)
self.assertEqual(params_response.status_code, 400)
class SleepSupplementsCorrelationsTests(BaseCorrelationsTestCase, BaseCorrelationsMixin):
url_namespace = 'sleep-supplements-correlations'
def test_sleep_supplements_view(self):
response = self.client.get(self.url)
self.assertTrue(SLEEP_MINUTES_COLUMN in response.data[0])
# Return back in a tuple format to preserve order when transmitting as JSON
self.assertEqual(response.data[0][0], SLEEP_MINUTES_COLUMN)
self.assertEqual(response.data[0][1], 1)
self.assertEqual(response.status_code, 200)
class SleepUserActivitiesCorrelationsTests(BaseCorrelationsTestCase, BaseCorrelationsMixin):
url_namespace = 'sleep-user-activities-correlations'
def test_sleep_activities_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# the correlation of sleep to itself will be one
self.assertEqual(response.data[0][1], 1)
class ProductivityLogsUserActivitiesCorrelationsTests(BaseCorrelationsTestCase, BaseCorrelationsMixin):
url_namespace = 'productivity-user-activities-correlations'
def test_correlations_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# the correlation of a variable to itself will be one
self.assertEqual(response.data[0][1], 1)
```
#### File: v1/correlations/views.py
```python
import pandas as pd
from rest_framework.response import Response
from rest_framework.views import APIView
from analytics.events.utils.aggregate_dataframe_builders import AggregateSupplementProductivityDataframeBuilder, \
AggregateUserActivitiesEventsProductivityActivitiesBuilder, AggregateSleepActivitiesUserActivitiesBuilder, \
AggregateSleepActivitiesSupplementsBuilder
from apis.betterself.v1.correlations.serializers import ProductivityRequestParamsSerializer, \
SleepRequestParamsSerializer
from betterself.utils.date_utils import days_ago_from_current_day
from constants import SLEEP_MINUTES_COLUMN, PRODUCTIVITY_DRIVERS_LABELS
NO_DATA_RESPONSE = Response([])
def get_sorted_response(series):
if series.dropna().empty:
return NO_DATA_RESPONSE
# Do a odd sorted tuple response because Javascript sorting is an oddly difficult problem
# sorted_response = [item for item in series.iteritems()]
sorted_response = []
for index, value in series.iteritems():
if not pd.notnull(value):
value = None
data_point = (index, value)
sorted_response.append(data_point)
return Response(sorted_response)
class CorrelationsAPIView(APIView):
""" Centralizes all the logic for getting dataframe and correlating them to Productivity """
def get(self, request):
# TODO - This function could be way better.
user = request.user
serializer = self.request_serializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
correlation_lookback = serializer.validated_data['correlation_lookback']
cumulative_lookback = serializer.validated_data['cumulative_lookback']
correlation_driver = serializer.validated_data['correlation_driver']
# if we sum up cumulative days, need to look back even further to sum up the data
days_to_look_back = correlation_lookback * cumulative_lookback
cutoff_date = days_ago_from_current_day(days_to_look_back)
aggregate_dataframe = self.dataframe_builder.get_aggregate_dataframe_for_user(user, cutoff_date)
if aggregate_dataframe.empty:
return NO_DATA_RESPONSE
# not really going to correlate source
if 'Source' in aggregate_dataframe:
aggregate_dataframe = aggregate_dataframe.drop('Source', axis=1)
if cumulative_lookback > 1:
# min_periods of 1 allows for periods with no data to still be summed
aggregate_dataframe = aggregate_dataframe.rolling(cumulative_lookback, min_periods=1).sum()
# only include up to how many days the correlation lookback, otherwise incorrect overlap of correlations
aggregate_dataframe = aggregate_dataframe[-correlation_lookback:]
df_correlation = aggregate_dataframe.corr()
try:
df_correlation_series = df_correlation[correlation_driver]
except KeyError:
return NO_DATA_RESPONSE
# disregard all other valid correlation drivers and only care about the variables
# ie. distracting minutes, neutral minutes might correlate with whatever is the productivity driver
valid_index = [item for item in df_correlation_series.index if item not in self.valid_correlations]
# but still include the correlation driver to make sure that the correlation of a variable with itself is 1
# seeing something correlate with itself of 1 is soothing to know its not flawed
valid_index.append(correlation_driver)
filtered_correlation_series = df_correlation_series[valid_index]
filtered_correlation_series = filtered_correlation_series.sort_values(ascending=False)
return get_sorted_response(filtered_correlation_series)
class ProductivityLogsSupplementsCorrelationsView(CorrelationsAPIView):
dataframe_builder = AggregateSupplementProductivityDataframeBuilder
valid_correlations = PRODUCTIVITY_DRIVERS_LABELS
request_serializer = ProductivityRequestParamsSerializer
class ProductivityLogsUserActivitiesCorrelationsView(CorrelationsAPIView):
dataframe_builder = AggregateUserActivitiesEventsProductivityActivitiesBuilder
valid_correlations = PRODUCTIVITY_DRIVERS_LABELS
request_serializer = ProductivityRequestParamsSerializer
class SleepActivitiesUserActivitiesCorrelationsView(CorrelationsAPIView):
dataframe_builder = AggregateSleepActivitiesUserActivitiesBuilder
valid_correlations = [SLEEP_MINUTES_COLUMN]
request_serializer = SleepRequestParamsSerializer
class SleepActivitiesSupplementsCorrelationsView(CorrelationsAPIView):
dataframe_builder = AggregateSleepActivitiesSupplementsBuilder
valid_correlations = [SLEEP_MINUTES_COLUMN]
request_serializer = SleepRequestParamsSerializer
```
#### File: events/tests/test_serializers.py
```python
from django.test import TestCase
from rest_framework import serializers
from apis.betterself.v1.events.serializers import valid_daily_max_minutes, SupplementLogReadOnlySerializer
from events.fixtures.factories import SupplementEventFactory
from supplements.fixtures.factories import SupplementFactory
class TestSerializerUtils(TestCase):
@staticmethod
def test_regular_max_minutes():
valid_daily_max_minutes(600)
def test_more_than_daily_max_minutes(self):
with self.assertRaises(serializers.ValidationError):
valid_daily_max_minutes(3601)
def test_less_than_zero_max_minutes(self):
with self.assertRaises(serializers.ValidationError):
valid_daily_max_minutes(-50)
class TestSupplementEventSerializer(TestCase):
def test_supplement_serializer(self):
supplement = SupplementFactory(notes='gibberish')
event = SupplementEventFactory(supplement=supplement)
serializer = SupplementLogReadOnlySerializer(event)
dict_responses = serializer.data
self.assertEqual(dict_responses['uuid'], str(event.uuid))
self.assertEqual(dict_responses['notes'], event.notes)
self.assertEqual(dict_responses['quantity'], event.quantity)
```
#### File: tests/views/test_supplement_reminders.py
```python
from django.urls import reverse
from rest_framework.test import APIClient
from apis.betterself.v1.events.tests.views.test_views import User
from apis.betterself.v1.signup.fixtures.builders import DemoHistoricalDataBuilder
from apis.betterself.v1.tests.mixins.test_post_requests import PostRequestsTestsMixin
from apis.betterself.v1.tests.test_base import BaseAPIv1Tests
from events.models import SupplementReminder
from supplements.models import Supplement
class TestSupplementReminderViews(BaseAPIv1Tests, PostRequestsTestsMixin):
TEST_MODEL = SupplementReminder
PAGINATION = False
@classmethod
def setUpTestData(cls):
cls.user_1, _ = User.objects.get_or_create(username='default')
builder = DemoHistoricalDataBuilder(cls.user_1)
builder.create_historical_fixtures()
builder.create_supplement_reminders(limit=4)
cls.url = reverse(SupplementReminder.RESOURCE_NAME)
super().setUpTestData()
def setUp(self):
supplement = Supplement.objects.filter(user=self.user_1).first()
supplement_uuid = str(supplement.uuid)
self.DEFAULT_POST_PARAMS = {
'reminder_time': '15:20',
'quantity': 5,
'supplement_uuid': supplement_uuid
}
self.client_1 = self.create_authenticated_user_on_client(APIClient(), self.user_1)
self.client_2 = self.create_authenticated_user_on_client(APIClient(), self.user_2)
def test_post_when_over_limit(self):
# hardcoded value of 5 to prevent spam
supplements = Supplement.objects.filter(user=self.user_1)
for supplement in supplements:
params = {
'reminder_time': '10:20',
'quantity': 5,
'supplement_uuid': str(supplement.uuid)
}
self.client_1.post(self.url, data=params)
cutoff_limit = 5
user_supplement_reminders = SupplementReminder.objects.filter(user=self.user_1).count()
self.assertEqual(cutoff_limit, user_supplement_reminders)
def test_view_no_auth(self):
client = APIClient()
response = client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_view_no_data(self):
new_user, _ = User.objects.get_or_create(username='no-data')
client = APIClient()
client.force_login(new_user)
response = client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_view(self):
response = self.client_1.get(self.url)
self.assertEqual(response.status_code, 200)
supplement_reminder_count = SupplementReminder.objects.filter(user=self.user_1).count()
self.assertEqual(supplement_reminder_count, len(response.data))
```
#### File: v1/mood/views.py
```python
from rest_framework.generics import ListCreateAPIView
from apis.betterself.v1.mood.filters import UserMoodLogFilter
from apis.betterself.v1.mood.serializers import MoodReadOnlySerializer, MoodCreateUpdateSerializer
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
from config.pagination import ModifiedPageNumberPagination
from events.models import UserMoodLog
class UserMoodViewSet(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
model = UserMoodLog
pagination_class = ModifiedPageNumberPagination
read_serializer_class = MoodReadOnlySerializer
write_serializer_class = MoodCreateUpdateSerializer
update_serializer_class = MoodCreateUpdateSerializer
filter_class = UserMoodLogFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
```
#### File: v1/signout/views.py
```python
from django.contrib.auth import logout
from django.views.generic import RedirectView
class SessionLogoutView(RedirectView):
"""
A view that will logout a user out and redirect to homepage.
"""
permanent = False
query_string = True
pattern_name = 'home'
def get_redirect_url(self, *args, **kwargs):
"""
Logout user and redirect to target url.
"""
if self.request.user.is_authenticated():
logout(self.request)
return super(SessionLogoutView, self).get_redirect_url(*args, **kwargs)
```
#### File: v1/signup/views.py
```python
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.status import HTTP_201_CREATED
from rest_framework.views import APIView
from apis.betterself.v1.users.serializers import UserDetailsSerializer
from apis.betterself.v1.signup.tasks import create_demo_fixtures
from betterself.users.models import DemoUserLog
from config.settings.constants import TESTING
from events.utils.default_events_builder import DefaultEventsBuilder
from supplements.models import Supplement
User = get_user_model()
class CreateUserView(APIView):
# Limit the amount of signups from any individual ip to 5 a day
# to prevent spam issues
throttle_scope = 'signups'
# If the user is just signing up, one would assume they can't have authentication yet ...
permission_classes = ()
def post(self, request):
serializer = UserDetailsSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# Per some signups, custom supplements are pre-filled for custom users
supplements = serializer.validated_data.pop('supplements')
try:
user = serializer.save()
except IntegrityError:
# if a user hits signup multiple times very very quickly, this causes issues ... when that happens
# return an empty response instead
return Response(status=400)
# if there's a custom set of supplements that were added along with signup
# this signup is coming programmatically and supplements should automatically be created
if supplements:
for supplement_name in supplements:
Supplement.objects.get_or_create(user=user, name=supplement_name)
else:
# build default events for new-users
builder = DefaultEventsBuilder(user)
builder.build_defaults()
token, _ = Token.objects.get_or_create(user=user)
json_response = serializer.data
json_response['token'] = token.key
return Response(json_response, status=HTTP_201_CREATED)
class CreateUserViewMobile(CreateUserView):
authentication_classes = (TokenAuthentication,)
class CreateDemoUserView(APIView):
"""
Used to create a demo user with preloaded fixtures to illustrate features
"""
throttle_scope = 'demo_signups'
# If the user is just signing up, one would assume they can't have authentication yet ...
permission_classes = ()
def get(self, request):
# Get the last DemoUserLog Created
last_demo_log = DemoUserLog.objects.all().order_by('created').last()
try:
# use that user to show a historical data sample
user = last_demo_log.user
except AttributeError:
# if attribute error, then that means demo-user-logs were cleared, so
# in that case, recreate a new demo user on the fly
create_demo_fixtures()
last_demo_log = DemoUserLog.objects.all().order_by('created').last()
user = last_demo_log.user
serializer = UserDetailsSerializer(instance=user)
response = serializer.data
token, _ = Token.objects.get_or_create(user=user)
response['token'] = token.key
# After the user has been chosen create more expensive celery tasks so a lot of unique fixtures can be used
# for the next demo experience.
# Otherwise trying to generate within lot of fixtures within <10 seconds is too much work and crappy experience.
# in testing, immediately create the fixtures, otherwise send to celery
if settings.DJANGO_ENVIRONMENT == TESTING:
create_demo_fixtures()
else:
create_demo_fixtures.delay()
return Response(response, status=HTTP_201_CREATED)
```
#### File: v1/sleep/tests.py
```python
import math
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from apis.betterself.v1.signup.fixtures.builders import DemoHistoricalDataBuilder
from events.models import SleepLog
User = get_user_model()
class SleepAggregateTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='demo')
builder = DemoHistoricalDataBuilder(cls.user)
builder.create_sleep_fixtures()
super().setUpTestData()
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_sleep_aggregates_view(self):
url = reverse('sleep-aggregates')
response = self.client.get(url)
self.assertTrue(response.status_code, 200)
data = response.data
first_date = min(data.keys())
first_record_sleep_time = data[first_date]
# check that the data from the view somewhat equals what the sleep duration should be
# this test isn't fully exact, but didn't want to rebuild a time hashing algorithm for a test
first_sleep_activity_record = SleepLog.objects.filter(user=self.user).order_by('start_time').first()
second_sleep_activity_record = SleepLog.objects.filter(user=self.user).order_by('start_time')[1]
# duration range
min_duration_minutes = first_sleep_activity_record.duration.seconds / 60
max_duration_minutes = math.ceil(min_duration_minutes + second_sleep_activity_record.duration.seconds / 60)
first_record_falls_within_range = min_duration_minutes <= first_record_sleep_time < max_duration_minutes
self.assertTrue(first_record_falls_within_range)
def test_sleep_aggregates_with_user_and_no_data(self):
url = reverse('sleep-aggregates')
user = User.objects.create(username='jack-in-box-2')
client = APIClient()
client.force_authenticate(user)
response = client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, {})
class SleepAverageTests(TestCase):
@classmethod
def setUpClass(cls):
cls.url = reverse('sleep-averages')
super().setUpClass()
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='demo')
builder = DemoHistoricalDataBuilder(cls.user)
builder.create_sleep_fixtures()
super().setUpTestData()
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_sleep_averages_view(self):
kwargs = {'lookback': 7}
response = self.client.get(self.url, data=kwargs)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.data) > 1)
def test_sleep_averages_returns_400_with_invalid_lookback(self):
kwargs = {'lookback': 'cat'}
response = self.client.get(self.url, data=kwargs)
self.assertEqual(response.status_code, 400)
def test_sleep_averages_view_returns_200(self):
# if no params passed, should default to 1
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_sleep_averages_view_same_result_as_default_parameter(self):
# for responses with the default params, it should equal
# the same as something that didn't pass a lookback param
default_response = self.client.get(self.url)
kwargs = {'lookback': 1}
second_response = self.client.get(self.url, data=kwargs)
self.assertEqual(default_response.data, second_response.data)
def test_sleep_averages_view_not_same_result_as_default_parameter(self):
# for responses with different params, it should not equal equal default
default_response = self.client.get(self.url)
# default window is 1, so if this is 2, it should be different
kwargs = {'lookback': 2}
second_response = self.client.get(self.url, data=kwargs)
self.assertNotEqual(default_response.data, second_response.data)
def test_sleep_averages_with_not_logged_in_user(self):
client = APIClient()
response = client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_sleep_averages_with_user_and_no_data(self):
user = User.objects.create(username='jack-in-box')
client = APIClient()
client.force_authenticate(user)
kwargs = {'lookback': 7}
response = client.get(self.url, data=kwargs)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, {})
```
#### File: v1/sleep/views.py
```python
import json
from django.utils.datastructures import MultiValueDictKeyError
from rest_framework.generics import ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from apis.betterself.v1.events.filters import SleepLogFilter
from apis.betterself.v1.events.serializers import SleepLogReadSerializer, SleepLogCreateSerializer
from analytics.events.utils.dataframe_builders import SleepActivityDataframeBuilder
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin
from config.pagination import ModifiedPageNumberPagination
from constants import LOOKBACK_PARAM_NAME
from events.models import SleepLog
class SleepActivityView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin):
model = SleepLog
pagination_class = ModifiedPageNumberPagination
read_serializer_class = SleepLogReadSerializer
write_serializer_class = SleepLogCreateSerializer
filter_class = SleepLogFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class SleepAggregatesView(APIView):
def get(self, request):
user = request.user
sleep_activities = SleepLog.objects.filter(user=user)
serializer = SleepActivityDataframeBuilder(sleep_activities)
sleep_aggregate = serializer.get_sleep_history_series()
# because pandas uses a timeindex, when we go to json - it doesn't
# play nicely with a typical json dump, so we do an additional load so drf can transmit nicely
result = sleep_aggregate.to_json(date_format='iso')
result = json.loads(result)
return Response(data=result)
class SleepAveragesView(APIView):
def get(self, request):
try:
window = int(request.query_params[LOOKBACK_PARAM_NAME])
except MultiValueDictKeyError:
# MultiValueDictKeyError happens when a key doesn't exist
window = 1
except ValueError:
# ValueError if something entered for a window that couldn't be interpreted
return Response(status=400)
user = request.user
sleep_activities = SleepLog.objects.filter(user=user)
builder = SleepActivityDataframeBuilder(sleep_activities)
sleep_aggregate = builder.get_sleep_history_series()
sleep_average = sleep_aggregate.rolling(window=window, min_periods=1).mean()
result = sleep_average.to_json(date_format='iso')
result = json.loads(result)
return Response(data=result)
```
#### File: supplements/tests/test_views.py
```python
from django.urls import reverse
from apis.betterself.v1.tests.mixins.test_delete_requests import DeleteRequestsTestsMixinV2
from apis.betterself.v1.tests.mixins.test_get_requests import GetRequestsTestsMixin, GetRequestsTestsMixinV2
from apis.betterself.v1.tests.mixins.test_post_requests import PostRequestsTestsMixin, PostRequestsTestsMixinV2
from apis.betterself.v1.tests.mixins.test_put_requests import PUTRequestsTestsMixin
from apis.betterself.v1.tests.test_base import BaseAPIv1Tests, BaseAPIv2Tests
from apis.betterself.v1.urls import API_V1_LIST_CREATE_URL
from events.fixtures.mixins import UserSupplementStackFixturesGenerator
from supplements.fixtures.factories import DEFAULT_INGREDIENT_NAME_1, UserSupplementStackFactory, SupplementFactory
from supplements.fixtures.mixins import SupplementModelsFixturesGenerator
from supplements.models import Supplement, IngredientComposition, Ingredient, Measurement, UserSupplementStack, \
UserSupplementStackComposition
from vendors.fixtures.factories import DEFAULT_VENDOR_NAME
from vendors.fixtures.mixins import VendorModelsFixturesGenerator
from vendors.models import Vendor
# I heavily dislike what you made here now, the inheritance is toooooo much.
class SupplementBaseTests(BaseAPIv1Tests):
# maybe debate this might be better as a template design pattern ...
# this inheritance chain is getting pretty long
@classmethod
def setUpTestData(cls):
# generic fixtures based on the apps, inclusive of all the models
# there, so supplement/models includes ingredients, etc.
SupplementModelsFixturesGenerator.create_fixtures()
VendorModelsFixturesGenerator.create_fixtures()
super().setUpTestData()
class VendorV1Tests(SupplementBaseTests, PostRequestsTestsMixin, GetRequestsTestsMixin):
TEST_MODEL = Vendor
DEFAULT_POST_PARAMS = {
'name': 'Poptarts',
'email': '<EMAIL>',
'url:': 'cool.com',
}
def test_valid_get_request_with_params_filters_correctly(self):
request_parameters = {'name': DEFAULT_VENDOR_NAME}
super().test_valid_get_request_with_params_filters_correctly(request_parameters)
def test_valid_get_request_for_key_in_response(self):
key = 'name'
super().test_valid_get_request_for_key_in_response(key)
class MeasurementV1Tests(SupplementBaseTests, GetRequestsTestsMixin):
# measurements should be ONLY read-only
TEST_MODEL = Measurement
def test_valid_get_request_with_params_filters_correctly(self):
request_parameters = {'name': 'milligram'}
super().test_valid_get_request_with_params_filters_correctly(request_parameters)
def test_valid_get_request_for_key_in_response(self):
key = 'name'
super().test_valid_get_request_for_key_in_response(key)
def test_post_request(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.put(url)
# expected to fail, this is a read-only set of stuff
self.assertEqual(request.status_code, 405)
class IngredientV1Tests(SupplementBaseTests, GetRequestsTestsMixin, PostRequestsTestsMixin):
TEST_MODEL = Ingredient
DEFAULT_POST_PARAMS = {
'name': 'Advil',
'half_life_minutes': 30,
}
def test_valid_get_request_with_params_filters_correctly(self):
request_parameters = {'name': DEFAULT_INGREDIENT_NAME_1}
super().test_valid_get_request_with_params_filters_correctly(request_parameters)
def test_valid_get_request_for_key_in_response(self):
key = 'name'
super().test_valid_get_request_for_key_in_response(key)
class IngredientCompositionV1Tests(SupplementBaseTests, PostRequestsTestsMixin, GetRequestsTestsMixin):
TEST_MODEL = IngredientComposition
DEFAULT_POST_PARAMS = {
'quantity': 5,
}
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.DEFAULT_POST_PARAMS['ingredient_uuid'] = Ingredient.objects.all().first().uuid.__str__()
cls.DEFAULT_POST_PARAMS['measurement_uuid'] = Measurement.objects.all().first().uuid.__str__()
def test_valid_get_request_with_params_filters_correctly(self):
request_parameters = {'quantity': 1}
super().test_valid_get_request_with_params_filters_correctly(request_parameters)
def test_valid_get_request_for_key_in_response(self):
key = 'ingredient'
super().test_valid_get_request_for_key_in_response(key)
def test_valid_get_with_ingredient_uuid(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
# get the first valid data point
result = self.client_1.get(url).data[0]
result_ingredient_uuid = result['ingredient']['uuid']
parameters = {'ingredient_uuid': result_ingredient_uuid}
data = self.client_1.get(url, parameters).data
ingredient_uuids_found = [item['ingredient']['uuid'] for item in data]
ingredient_uuids_found = set(ingredient_uuids_found)
self.assertEqual(len(ingredient_uuids_found), 1)
self.assertEqual(result_ingredient_uuid, ingredient_uuids_found.pop())
def test_valid_get_with_measurement_uuid(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
# get the first valid data point
result = self.client_1.get(url).data[0]
result_measurement_uuid = result['measurement']['uuid']
parameters = {'measurement_uuid': result_measurement_uuid}
data = self.client_1.get(url, parameters).data
measurement_uuids_found = [item['measurement']['uuid'] for item in data]
measurement_uuids_found = set(measurement_uuids_found)
self.assertEqual(len(measurement_uuids_found), 1)
self.assertEqual(result_measurement_uuid, measurement_uuids_found.pop())
class SupplementV1Tests(SupplementBaseTests, GetRequestsTestsMixin, PostRequestsTestsMixin, PUTRequestsTestsMixin):
# python manage.py test apis.betterself.v1.supplements.tests.SupplementV1Tests
TEST_MODEL = Supplement
def _get_default_post_parameters(self):
# kind of whack, but create a list of valid IDs that could be passed
# when serializing
ingr_comps = IngredientComposition.objects.filter(user=self.user_1)
ingr_comps_uuids = ingr_comps.values_list('uuid', flat=True)
ingr_comps_uuids = [{'uuid': str(item)} for item in ingr_comps_uuids]
request_parameters = {
'name': 'Glutamine',
'ingredient_compositions': ingr_comps_uuids
}
return request_parameters
def test_default_parameters_recorded_correctly(self):
request_parameters = self._get_default_post_parameters()
self._make_post_request(self.client_1, request_parameters)
supplement = Supplement.objects.get(name=request_parameters['name'])
ingr_comps_uuids = supplement.ingredient_compositions.values_list('uuid', flat=True)
ingr_comps_uuids = set(str(uuid) for uuid in ingr_comps_uuids)
request_ingredient_compositions = request_parameters['ingredient_compositions']
request_ingredient_compositions_uuids = set(item['uuid'] for item in request_ingredient_compositions)
self.assertSetEqual(request_ingredient_compositions_uuids, ingr_comps_uuids)
def test_post_request(self):
request_parameters = self._get_default_post_parameters()
super().test_post_request(request_parameters)
def test_post_request_increments(self):
request_parameters = self._get_default_post_parameters()
super().test_post_request_increments(request_parameters)
def test_post_request_changes_objects_for_right_user(self):
request_parameters = self._get_default_post_parameters()
super().test_post_request_changes_objects_for_right_user(request_parameters)
def test_valid_get_request_with_params_filters_correctly(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.get(url)
data = request.data
first_name = data[0]['name']
request_parameters = {'name': first_name}
super().test_valid_get_request_with_params_filters_correctly(request_parameters)
def test_valid_get_request_for_key_in_response(self):
key = 'name'
super().test_valid_get_request_for_key_in_response(key)
def test_put_parameter_updates_supplement_name(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.get(url)
supplement_uuid = request.data[0]['uuid']
modified_supplement_name = 'Cheeseburgers'
data = {
'uuid': supplement_uuid,
'name': modified_supplement_name
}
response = self.client_1.put(url, data)
self.assertEqual(response.data['name'], modified_supplement_name)
# for good measure, let's send another request (this time via a GET) to make sure that it's updated correctly
uuid_filter = {'uuid': supplement_uuid}
response = self.client_1.get(url, uuid_filter)
self.assertEqual(response.data[0]['name'], modified_supplement_name)
def test_put_parameter_updates_ingredient_uuids_correctly(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.get(url)
supplement_uuid = request.data[0]['uuid']
supplement_ingredients = request.data[0]['ingredient_compositions']
supplement_ingredients_uuids = [item['uuid'] for item in supplement_ingredients]
# if the fixtures ever get modified / messed up, fixtures need to ensure this is greater than one
self.assertTrue(len(supplement_ingredients_uuids) > 1)
supplement_ingredients_uuid_to_use = supplement_ingredients_uuids[0]
data = {
'uuid': supplement_uuid,
'ingredient_compositions': [{'uuid': supplement_ingredients_uuid_to_use}]
}
response = self.client_1.put(url, data, format='json')
self.assertEqual(response.data['uuid'], supplement_uuid)
self.assertEqual(response.data['ingredient_compositions'][0]['uuid'], supplement_ingredients_uuid_to_use)
def test_put_parameter_when_ingredient_uuid_is_wrong(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.get(url)
supplement_uuid = request.data[0]['uuid']
supplement_ingredients = request.data[0]['ingredient_compositions']
supplement_ingredients_uuids = [item['uuid'] for item in supplement_ingredients]
# if the fixtures ever get modified / messed up, fixtures need to ensure this is greater than one
self.assertTrue(len(supplement_ingredients_uuids) > 1)
supplement_ingredients_uuid_to_use = supplement_ingredients_uuids[0]
data = {
'uuid': supplement_uuid,
# ingredient_compositions should be sent as a list of dictionaries, here we send it as a dictionary
'ingredient_compositions': {'uuid': supplement_ingredients_uuid_to_use}
}
response = self.client_1.put(url, data, format='json')
self.assertEqual(response.status_code, 400)
def test_get_request_work_uuid_filter_works_for_filtering_on_compositions(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.get(url)
supplement_ingredients = request.data[0]['ingredient_compositions']
supplement_ingredients_uuids = [item['uuid'] for item in supplement_ingredients]
supplement_ingredients_uuid = supplement_ingredients_uuids[0]
# filter on a composition to see if it returns back
uuid_filter_url = '{url}?ingredient_compositions_uuids={supplement_ingredients_uuid}'.format(
url=url, supplement_ingredients_uuid=supplement_ingredients_uuids[0])
uuid_request = self.client_1.get(uuid_filter_url)
self.assertEqual(uuid_request.status_code, 200)
length_of_compositions = len(uuid_request.data)
ingredient_composition = IngredientComposition.objects.filter(uuid=supplement_ingredients_uuid)
supplements_with_same_composition = Supplement.objects.filter(ingredient_compositions=ingredient_composition)
self.assertEqual(length_of_compositions, supplements_with_same_composition.count())
class SupplementStackV1Tests(SupplementBaseTests, GetRequestsTestsMixin, PostRequestsTestsMixin, PUTRequestsTestsMixin):
TEST_MODEL = UserSupplementStack
@classmethod
def setUpTestData(cls):
super().setUpTestData()
UserSupplementStackFixturesGenerator.create_fixtures(cls.user_1)
def _get_default_post_parameters(self):
# kind of whack, but create a list of valid IDs that could be passed
# when serializing
supplements = Supplement.objects.filter(user=self.user_1)
supplements_uuids = supplements.values_list('uuid', flat=True)
supplements_uuids = [{'supplement_uuid': str(item)} for item in supplements_uuids]
request_parameters = {
'name': 'Glutamine',
'compositions': supplements_uuids
}
return request_parameters
def test_post_request(self):
request_parameters = self._get_default_post_parameters()
super().test_post_request(request_parameters)
def test_post_request_increments(self):
request_parameters = self._get_default_post_parameters()
super().test_post_request_increments(request_parameters)
def test_post_request_changes_objects_for_right_user(self):
request_parameters = self._get_default_post_parameters()
super().test_post_request_changes_objects_for_right_user(request_parameters)
def test_valid_get_request_for_key_in_response(self):
key = 'name'
super().test_valid_get_request_for_key_in_response(key)
def test_valid_get_request_with_params_filters_correctly(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = self.client_1.get(url)
data = request.data
first_name = data[0]['name']
request_parameters = {'name': first_name}
super().test_valid_get_request_with_params_filters_correctly(request_parameters)
class UserSupplementStackCompositionViewsetTestsV2(BaseAPIv2Tests, GetRequestsTestsMixinV2, PostRequestsTestsMixinV2,
PUTRequestsTestsMixin, DeleteRequestsTestsMixinV2):
TEST_MODEL = UserSupplementStackComposition
PAGINATION = False
username_1 = 'jack'
username_2 = 'sarah'
required_response_keys = ['supplement']
filterable_keys = ['uuid']
@staticmethod
def _get_post_parameters(user):
stack = UserSupplementStackFactory(user=user)
supplement = SupplementFactory(user=user)
data = {
'stack_uuid': str(stack.uuid),
'supplement_uuid': str(supplement.uuid)
}
return data
@classmethod
def setUpTestData(cls):
super().setUpTestData()
UserSupplementStackFixturesGenerator.create_fixtures(cls.user_1)
def test_delete_composition_wont_delete_stack(self):
""" That moment of fear when you want to be absolutely certain your cascades are set correctly """
supplement_composition = UserSupplementStackComposition.objects.filter(user=self.user_1).first()
supplement_composition_uuid = supplement_composition.uuid
stack_uuid = supplement_composition.stack.uuid
data = {
'uuid': supplement_composition_uuid
}
response = self.client_1.delete(self.url, data=data)
self.assertEqual(response.status_code, 204)
stack_still_exists = UserSupplementStack.objects.filter(uuid=stack_uuid, user=self.user_1).exists()
self.assertTrue(stack_still_exists)
def test_delete_supplement_wont_delete_stack(self):
supplement_composition = UserSupplementStackComposition.objects.filter(user=self.user_1).first()
supplement_uuid = supplement_composition.supplement.uuid
stack_uuid = supplement_composition.stack.uuid
data = {
'uuid': supplement_uuid
}
url = reverse(Supplement.RESOURCE_NAME)
response = self.client_1.delete(url, data=data)
self.assertEqual(response.status_code, 204)
stack_still_exists = UserSupplementStack.objects.filter(uuid=stack_uuid, user=self.user_1).exists()
self.assertTrue(stack_still_exists)
```
#### File: v1/supplements/views.py
```python
from django.db.models.query import Prefetch
from rest_framework.generics import ListAPIView, ListCreateAPIView
from apis.betterself.v1.supplements.filters import IngredientCompositionFilter, SupplementFilter, \
UserSupplementStackFilter, UserSupplementStackCompositionFilter
from apis.betterself.v1.supplements.serializers import IngredientCompositionReadOnlySerializer, \
SupplementCreateUpdateSerializer, MeasurementReadOnlySerializer, IngredientSerializer, VendorSerializer, \
SupplementReadSerializer, IngredientCompositionCreateSerializer, UserSupplementStackReadSerializer, \
UserSupplementStackCreateUpdateSerializer, UserSupplementStackCompositionCreateUpdateSerializer, \
UserSupplementStackCompositionReadSerializer
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
from supplements.models import Ingredient, IngredientComposition, Measurement, Supplement, UserSupplementStack, \
UserSupplementStackComposition
from vendors.models import Vendor
"""
These inherited models such as BaseGenericListCreateAPIViewV1 contain a override to get_queryset
so that users won't have access to models that are not the default or don't belong to them!
"""
class VendorView(ListCreateAPIView):
serializer_class = VendorSerializer
model = Vendor
filter_fields = ('name', 'uuid')
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class MeasurementView(ListAPIView):
# Users are not allowed to create measurements, only can choose
# whatever is on the default
serializer_class = MeasurementReadOnlySerializer
model = Measurement
filter_fields = ('name', 'uuid')
queryset = Measurement.objects.all()
class IngredientView(ListCreateAPIView):
serializer_class = IngredientSerializer
model = Ingredient
filter_fields = ('name', 'half_life_minutes', 'uuid')
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class IngredientCompositionView(ListCreateAPIView, ReadOrWriteSerializerChooser):
read_serializer_class = IngredientCompositionReadOnlySerializer
write_serializer_class = IngredientCompositionCreateSerializer
model = IngredientComposition
filter_class = IngredientCompositionFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class SupplementsListView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
read_serializer_class = SupplementReadSerializer
write_serializer_class = SupplementCreateUpdateSerializer
update_serializer_class = SupplementCreateUpdateSerializer
model = Supplement
filter_class = SupplementFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
# prefetch any compositions that exist to speed this up
ingredient_compositions_queryset = IngredientComposition.objects.filter(user=self.request.user)
return self.model.objects.filter(user=self.request.user).prefetch_related(Prefetch('ingredient_compositions',
ingredient_compositions_queryset))
class UserSupplementStackViewSet(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
model = UserSupplementStack
write_serializer_class = UserSupplementStackCreateUpdateSerializer
read_serializer_class = UserSupplementStackReadSerializer
update_serializer_class = UserSupplementStackCreateUpdateSerializer
filter_class = UserSupplementStackFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).prefetch_related('compositions')
class UserSupplementStackCompositionViewSet(
ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
):
model = UserSupplementStackComposition
write_serializer_class = UserSupplementStackCompositionCreateUpdateSerializer
read_serializer_class = UserSupplementStackCompositionReadSerializer
update_serializer_class = UserSupplementStackCompositionCreateUpdateSerializer
filter_class = UserSupplementStackCompositionFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).select_related('supplement')
```
#### File: tests/mixins/test_put_requests.py
```python
import numbers
from dateutil import parser
from uuid import uuid4
from apis.betterself.v1.tests.test_base import GenericRESTMethodMixin
from apis.betterself.v1.urls import API_V1_LIST_CREATE_URL
class PUTRequestsTestsMixin(GenericRESTMethodMixin):
def _get_initial_data(self, data=None):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
if data:
request = self.client_1.get(url, data=data)
else:
request = self.client_1.get(url)
if self.PAGINATION:
data = request.data['results']
else:
data = request.data
return data
def test_put_empty_data_returns_404(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
result = self.client_1.put(url)
self.assertEqual(result.status_code, 404)
def test_put_requests_with_booleans(self):
data = self._get_initial_data()
# take the first object and update something within it
initial_result = data[0]
uuid = initial_result.pop('uuid')
# make a copied result to update with new parameters
copied_result = initial_result.copy()
# get only numbers params, but make sure that the numbers don't include true/false
copied_result = {k: v for k, v in copied_result.items()
if isinstance(v, bool)}
# for any results, if its a string, update them to a constant
attributes_to_update = list(copied_result.keys())
for bool_update in [True, False]:
bool_update_parameter = bool_update
for attribute in attributes_to_update:
copied_result[attribute] = bool_update_parameter
# now add uuid back since that's the one value that should be immutable
copied_result['uuid'] = uuid
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
result = self.client_1.put(url, data=copied_result, format='json')
for attribute in attributes_to_update:
self.assertEqual(result.data[attribute], bool_update_parameter)
# now for safe measure, let's use a get to retrieve the same object via UUID
get_response = self._get_initial_data(data={'uuid': uuid})
second_result = get_response[0]
for attribute in attributes_to_update:
self.assertEqual(second_result[attribute], bool_update_parameter)
def test_put_request_with_numbers(self):
data = self._get_initial_data()
# take the first object and update something within it
initial_result = data[0]
uuid = initial_result.pop('uuid')
# make a copied result to update with new parameters
copied_result = initial_result.copy()
# get only numbers params, but make sure that the numbers don't include true/false
copied_result = {k: v for k, v in copied_result.items()
if isinstance(v, numbers.Real)
and not isinstance(v, bool)}
# for any results, if its a string, update them to a constant
attributes_to_update = list(copied_result.keys())
for number_to_try in [5, 10.0]:
number_update_param = number_to_try
for attribute in attributes_to_update:
copied_result[attribute] = number_update_param
# now add uuid back since that's the one value that should be immutable
copied_result['uuid'] = uuid
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
result = self.client_1.put(url, data=copied_result, format='json')
for attribute in attributes_to_update:
self.assertEqual(result.data[attribute], number_update_param, result.data)
# now for safe measure, let's use a get to retrieve the same object via UUID
get_response = self._get_initial_data(data={'uuid': uuid})
second_result = get_response[0]
for attribute in attributes_to_update:
self.assertEqual(second_result[attribute], number_update_param)
def test_put_request_updates_for_strings(self):
"""
This test is literally garbage now that I come back and look at this.
"""
data = self._get_initial_data()
# take the first object and update something within it
initial_result = data[0]
uuid = initial_result.pop('uuid')
# make a copied result to update with new parameters
copied_result = initial_result.copy()
# don't update anything that's a list or a dictionary
# also include an ignore list where certain attributes are read-only
readonly_parameters = ['supplement_name', 'supplement_uuid', 'description']
copied_result = {k: v for k, v in copied_result.items() if isinstance(v, str) and k not in readonly_parameters}
# for any results, if its a string, update them to a constant "aka" api, since we know that's accepted in
# tuple validation
attributes_to_update = list(copied_result.keys())
string_update_param = 'api'
for attribute in attributes_to_update:
try:
parser.parse(copied_result[attribute])
# don't update datetime variables
copied_result.pop(attribute)
continue
except ValueError:
pass
copied_result[attribute] = string_update_param
# since we updated a few that no longer should be updated, let's refresh this list
attributes_to_update = list(copied_result.keys())
# now add uuid back since that's the one value that should be immutable
copied_result['uuid'] = uuid
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
result = self.client_1.put(url, data=copied_result, format='json')
for attribute in attributes_to_update:
# we are ignoring all the notes field since that can be dynamically generated
if attribute == 'notes':
continue
self.assertEqual(result.data[attribute], string_update_param)
# now for safe measure, let's use a get to retrieve the same object via UUID
get_response = self._get_initial_data(data={'uuid': uuid})
second_result = get_response[0]
for attribute in attributes_to_update:
# we are ignoring all the notes field since that can be dynamically generated
if attribute == 'notes':
continue
self.assertEqual(second_result[attribute], string_update_param)
def test_put_request_with_invalid_uuid_will_fail(self):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
uuid = uuid4()
crap_response = self.client_1.put(url, {'uuid': uuid})
self.assertEqual(crap_response.status_code, 404)
```
#### File: rescuetime/v1/serializers.py
```python
from rest_framework import serializers
class RescueTimeAPIRequestSerializer(serializers.Serializer):
rescuetime_api_key = serializers.CharField(max_length=200)
# add a check to make sure end_date is greater than start_date
start_date = serializers.DateField()
end_date = serializers.DateField()
def validate(self, data):
"""
Check that the start is before the stop.
"""
if data['start_date'] > data['end_date']:
raise serializers.ValidationError('Finish must occur after start')
# Do something to be nice to RescueTime's servers
days_difference = data['end_date'] - data['start_date']
if days_difference.days > 370:
raise serializers.ValidationError('Start and end dates must be within 370 days')
return data
```
#### File: rescuetime/v1/views.py
```python
from django.utils.datastructures import MultiValueDictKeyError
from rest_framework.response import Response
from rest_framework.views import APIView
from apis.rescuetime.tasks import import_user_rescuetime_history_via_api
from apis.rescuetime.v1.serializers import RescueTimeAPIRequestSerializer
class UpdateRescueTimeAPIView(APIView):
# don't slam rescuetime's servers, so you won't get banned
throttle_scope = 'rescuetime-api-sync'
def post(self, request):
user = request.user
data = request.data
try:
initial_data = {
'rescuetime_api_key': data['rescuetime_api_key'],
'start_date': data['start_date'],
'end_date': data['end_date'],
}
except (MultiValueDictKeyError, KeyError) as exc:
return Response('Missing POST parameters {}'.format(exc), status=400)
serializer = RescueTimeAPIRequestSerializer(data=initial_data)
serializer.is_valid(raise_exception=True)
# send the job off to celery so it's an async task
import_user_rescuetime_history_via_api.delay(user=user, **serializer.validated_data)
return Response(status=202)
```
#### File: betterself/utils/django_utils.py
```python
def create_django_choice_tuple_from_list(list_a):
if list_a is None:
return ()
tuples_list = []
for item in list_a:
if isinstance(item, str):
tuple_item_title = item.title()
else:
tuple_item_title = item
tuple_item = (item, tuple_item_title)
tuples_list.append(tuple_item)
return tuple(tuples_list)
```
#### File: events/fixtures/mixins.py
```python
import datetime
import itertools
from pytz import timezone
from events.fixtures.factories import SupplementEventFactory, DailyProductivityLogFactory, UserActivityFactory, \
UserActivityEventFactory
from events.models import INPUT_SOURCES
from supplements.fixtures.factories import SupplementFactory
from supplements.models import UserSupplementStack, UserSupplementStackComposition
VALID_QUANTITIES = range(1, 30)
STATIC_DATE = datetime.datetime(2016, 12, 31)
eastern_tz = timezone('US/Eastern')
STATIC_DATE = eastern_tz.localize(STATIC_DATE)
GENERATED_DATES = [STATIC_DATE + datetime.timedelta(days=x) for x in range(0, 30)]
def generate_test_cases_for_events():
"""
Generate an array of all the test cases by multiplying all the possibilities by themselves
"""
test_cases = itertools.product(VALID_QUANTITIES, INPUT_SOURCES)
return test_cases
def generate_unique_index_per_supplements_and_time(supplements_used):
unique_index = itertools.product(supplements_used, GENERATED_DATES)
return unique_index
class SupplementEventsFixturesGenerator(object):
@staticmethod
def create_fixtures(user):
supplement_1 = SupplementFactory(user=user, name='Fish Oil')
supplement_2 = SupplementFactory(user=user, name='Snake Oil')
supplement_3 = SupplementFactory(user=user, name='Truffle Oil')
supplements_used = [supplement_1, supplement_2, supplement_3]
# test unique_index where database constrains to only one
unique_index = generate_unique_index_per_supplements_and_time(supplements_used)
# test random non-unique identifiers like quantity of 50 and different input sources
test_cases = generate_test_cases_for_events()
for supplement, event_time in unique_index:
# use generator and get the next result
# we can't do for loop because this is a unique constraint on table
# here, we are just getting the next possible q/input ONCE versus
# in each of the for loops
#
# TODO - Refactor your stupidity
quantity, input_source = test_cases.__next__()
SupplementEventFactory(quantity=quantity, source=input_source,
time=event_time, user=user, supplement=supplement)
class UserSupplementStackFixturesGenerator(object):
@staticmethod
def create_fixtures(user):
supplement_1 = SupplementFactory(user=user, name='Fish Oil')
supplement_2 = SupplementFactory(user=user, name='Snake Oil')
supplement_3 = SupplementFactory(user=user, name='Truffle Oil')
first_stack = UserSupplementStack.objects.create(user=user, name='first_stack')
UserSupplementStackComposition.objects.create(stack=first_stack, user=user, supplement=supplement_1)
UserSupplementStackComposition.objects.create(stack=first_stack, user=user, supplement=supplement_2)
second_stack = UserSupplementStack.objects.create(user=user, name='second_stack')
UserSupplementStackComposition.objects.create(stack=second_stack, user=user, supplement=supplement_1)
UserSupplementStackComposition.objects.create(stack=second_stack, user=user, supplement=supplement_2)
UserSupplementStackComposition.objects.create(stack=second_stack, user=user, supplement=supplement_3)
class ProductivityLogFixturesGenerator(object):
@staticmethod
def create_fixtures(user, days_fwd_amt=35):
start_date = datetime.date(2016, 12, 15)
for days_fwd in range(days_fwd_amt):
fixture_date = start_date + datetime.timedelta(days=days_fwd)
DailyProductivityLogFactory(user=user, date=fixture_date)
@staticmethod
def create_fixtures_starting_from_today(user, periods_back):
end_date = datetime.date.today()
for days_back in range(periods_back):
fixture_date = end_date - datetime.timedelta(days=days_back)
DailyProductivityLogFactory(user=user, date=fixture_date)
class UserActivityEventFixturesGenerator(object):
@staticmethod
def create_fixtures(user):
activity_names = [
'Ran',
'Bike',
'Swim',
'Eat',
'Sleep',
'Fight Crime'
]
for activity in activity_names:
user_activity = UserActivityFactory(user=user, name=activity)
UserActivityEventFactory(user_activity=user_activity, user=user)
```
#### File: betterself/supplements/admin.py
```python
from django.contrib import admin
from supplements.models import Measurement, Ingredient, IngredientComposition, Supplement, UserSupplementStack
@admin.register(Measurement)
class MeasurementAdmin(admin.ModelAdmin):
list_display = ('name', 'short_name', 'is_liquid')
search_fields = ('short_name', 'name')
class Meta:
model = Measurement
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
list_display = ('user', 'name', 'half_life_minutes')
search_fields = ('user__username', 'name')
class Meta:
model = Ingredient
@admin.register(IngredientComposition)
class IngredientCompositionAdmin(admin.ModelAdmin):
list_display = ('user', 'ingredient_name', 'measurement', 'quantity')
search_fields = ('user__username', 'name')
class Meta:
model = IngredientComposition
@staticmethod
def ingredient_name(instance):
return instance.ingredient.name
@admin.register(Supplement)
class SupplementAdmin(admin.ModelAdmin):
list_display = ('user', 'name', 'ingredient_composition_display', )
search_fields = ('user__username', 'name')
class Meta:
model = Supplement
@staticmethod
def ingredient_composition_display(instance):
ingredient_composition = instance.ingredient_compositions.all()
if ingredient_composition.exists():
return ingredient_composition.values_list('ingredient__name', flat=True)
@admin.register(UserSupplementStack)
class UserSupplementStackAdmin(admin.ModelAdmin):
list_display = ('user', 'name', 'compositions_display', )
class Meta:
model = UserSupplementStack
@staticmethod
def compositions_display(instance):
compositions = instance.compositions.all()
if compositions.exists():
return compositions.values_list('supplement__name', flat=True)
```
#### File: supplements/migrations/0001_squashed_0007_auto_20171023_0402.py
```python
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
def create_measurement_data(apps, schema_editor):
app = apps.get_model('supplements', 'Measurement')
# pycharm is amazing.
dict_list = [
{
'name': 'milligram',
'short_name': 'mg',
'is_liquid': False,
},
{
'name': 'gram',
'short_name': 'g',
'is_liquid': False,
},
{
'name': 'milliliter',
'short_name': 'ml',
'is_liquid': True,
},
{
'name': 'liter',
'short_name': 'l',
'is_liquid': True,
},
{
'name': 'pill',
'short_name': 'pill',
'is_liquid': False,
},
]
for measurement in dict_list:
app.objects.create(**measurement)
class Migration(migrations.Migration):
replaces = [('supplements', '0001_initial'), ('supplements', '0002_auto_20170605_1603'), ('supplements', '0003_data_migrations_for_measurements'), ('supplements', '0004_auto_20171019_0120'), ('supplements', '0005_adjust_supplement_stack_to_include_quantity'), ('supplements', '0006_auto_20171022_0700'), ('supplements', '0007_auto_20171023_0402')]
initial = True
dependencies = [
('vendors', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=300)),
('half_life_minutes', models.PositiveIntegerField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='IngredientComposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('quantity', models.FloatField(default=1)),
('ingredient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplements.Ingredient')),
],
options={
'verbose_name': 'Ingredient Composition',
'ordering': ['user', 'ingredient__name'],
'verbose_name_plural': 'Ingredient Compositions',
},
),
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=100)),
('short_name', models.CharField(blank=True, max_length=100, null=True)),
('is_liquid', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Supplement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=300)),
('ingredient_compositions', models.ManyToManyField(blank=True, to='supplements.IngredientComposition')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Supplement',
'ordering': ['user', 'name'],
'verbose_name_plural': 'Supplements',
},
),
migrations.AddField(
model_name='ingredientcomposition',
name='measurement',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='supplements.Measurement'),
),
migrations.AddField(
model_name='ingredientcomposition',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='supplement',
unique_together=set([('user', 'name')]),
),
migrations.AlterField(
model_name='ingredientcomposition',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterUniqueTogether(
name='ingredientcomposition',
unique_together=set([('user', 'ingredient', 'measurement', 'quantity')]),
),
migrations.AlterField(
model_name='ingredient',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterUniqueTogether(
name='ingredient',
unique_together=set([('name', 'user')]),
),
migrations.RunPython(
code=create_measurement_data,
),
migrations.CreateModel(
name='UserSupplementStack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=300)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Supplements Stack',
'ordering': ['user', 'name'],
'verbose_name_plural': 'Supplements Stacks',
},
),
migrations.AlterUniqueTogether(
name='usersupplementstack',
unique_together=set([('user', 'name')]),
),
migrations.CreateModel(
name='UserSupplementStackComposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('quantity', models.FloatField(default=1)),
('stack', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplements.UserSupplementStack')),
('supplement', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supplements.Supplement')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='usersupplementstackcomposition',
unique_together=set([('supplement', 'stack')]),
),
migrations.AlterField(
model_name='usersupplementstackcomposition',
name='stack',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='compositions', to='supplements.UserSupplementStack'),
),
migrations.AlterUniqueTogether(
name='usersupplementstackcomposition',
unique_together=set([('user', 'supplement', 'stack')]),
),
]
```
|
{
"source": "jeffshek/pyvips",
"score": 2
}
|
#### File: pyvips/pyvips/__init__.py
```python
import logging
import os
import sys
import atexit
logger = logging.getLogger(__name__)
# user code can override this null handler
logger.addHandler(logging.NullHandler())
# pull in our module version number, see also setup.py
from .version import __version__
# try to import our binary interface ... is that works, we are in API mode
API_mode = False
try:
import _libvips
logger.debug('Loaded binary module _libvips')
ffi = _libvips.ffi
vips_lib = _libvips.lib
glib_lib = _libvips.lib
gobject_lib = _libvips.lib
# now check that the binary wrapper is for the same version of libvips that
# we find ourseleves linking to at runtime ... if it isn't, we must fall
# back to ABI mode
lib_major = vips_lib.vips_version(0)
lib_minor = vips_lib.vips_version(1)
wrap_major = vips_lib.VIPS_MAJOR_VERSION
wrap_minor = vips_lib.VIPS_MINOR_VERSION
if wrap_major != lib_major or wrap_minor != lib_minor:
logger.debug('Binary module was generated for libvips %s.%s ' +
'but you are running against libvips %s.%s' %
(lib_major, lib_minor, wrap_major, wrap_minor))
raise Exception('bad wrapper version')
API_mode = True
except Exception as e:
logger.debug('Binary module load failed: %s' % e)
logger.debug('Falling back to ABI mode')
from cffi import FFI
ffi = FFI()
_is_windows = os.name == 'nt'
_is_mac = sys.platform == 'darwin'
# yuk
if _is_windows:
_glib_libname = 'libglib-2.0-0.dll'
_gobject_libname = 'libgobject-2.0-0.dll'
_vips_libname = 'libvips-42.dll'
elif _is_mac:
_glib_libname = None
_vips_libname = 'libvips.42.dylib'
_gobject_libname = 'libgobject-2.0.dylib'
else:
_glib_libname = None
_vips_libname = 'libvips.so.42'
_gobject_libname = 'libgobject-2.0.so.0'
# possibly use ctypes.util.find_library() to locate the lib?
gobject_lib = ffi.dlopen(_gobject_libname)
vips_lib = ffi.dlopen(_vips_libname)
if _glib_libname:
glib_lib = ffi.dlopen(_glib_libname)
else:
glib_lib = gobject_lib
logger.debug('Loaded lib %s', vips_lib)
logger.debug('Loaded lib %s', gobject_lib)
ffi.cdef('''
int vips_init (const char* argv0);
int vips_version (int flag);
''')
if vips_lib.vips_init(sys.argv[0].encode()) != 0:
raise Exception('unable to init libvips')
logger.debug('Inited libvips')
if not API_mode:
from pyvips import decls
major = vips_lib.vips_version(0)
minor = vips_lib.vips_version(1)
micro = vips_lib.vips_version(2)
features = {
'major': major,
'minor': minor,
'micro': micro,
'api': False,
}
ffi.cdef(decls.cdefs(features))
from .error import *
# redirect all vips warnings to logging
class GLogLevelFlags(object):
# log flags
FLAG_RECURSION = 1 << 0
FLAG_FATAL = 1 << 1
# GLib log levels
LEVEL_ERROR = 1 << 2 # always fatal
LEVEL_CRITICAL = 1 << 3
LEVEL_WARNING = 1 << 4
LEVEL_MESSAGE = 1 << 5
LEVEL_INFO = 1 << 6
LEVEL_DEBUG = 1 << 7
LEVEL_TO_LOGGER = {
LEVEL_DEBUG : 10,
LEVEL_INFO : 20,
LEVEL_MESSAGE : 20,
LEVEL_WARNING : 30,
LEVEL_ERROR : 40,
LEVEL_CRITICAL : 50,
}
def _log_handler(domain, level, message, user_data):
logger.log(GLogLevelFlags.LEVEL_TO_LOGGER[level],
'{0}: {1}'.format(_to_string(domain), _to_string(message)))
# keep a ref to the cb to stop it being GCd
_log_handler_cb = ffi.callback('GLogFunc', _log_handler)
_log_handler_id = glib_lib.g_log_set_handler(_to_bytes('VIPS'),
GLogLevelFlags.LEVEL_DEBUG |
GLogLevelFlags.LEVEL_INFO |
GLogLevelFlags.LEVEL_MESSAGE |
GLogLevelFlags.LEVEL_WARNING |
GLogLevelFlags.LEVEL_CRITICAL |
GLogLevelFlags.LEVEL_ERROR |
GLogLevelFlags.FLAG_FATAL |
GLogLevelFlags.FLAG_RECURSION,
_log_handler_cb, ffi.NULL)
# ffi doesn't like us looking up methods during shutdown: make a note of the
# remove handler here
_remove_handler = glib_lib.g_log_remove_handler
# we must remove the handler on exit or libvips may try to run the callback
# during shutdown
def _remove_log_handler():
global _log_handler_id
global _remove_handler
if _log_handler_id:
_remove_handler(_to_bytes('VIPS'), _log_handler_id)
_log_handler_id = None
atexit.register(_remove_log_handler)
from .enums import *
from .base import *
from .gobject import *
from .gvalue import *
from .vobject import *
from .vinterpolate import *
from .voperation import *
from .vimage import *
__all__ = [
'Error', 'Image', 'Operation', 'GValue', 'Interpolate', 'GObject',
'VipsObject', 'type_find', 'type_name', 'version', '__version__',
'at_least_libvips', 'API_mode',
'cache_set_max', 'cache_set_max_mem', 'cache_set_max_files',
]
```
|
{
"source": "jeff-shepherd/MachineLearningNotebooks",
"score": 3
}
|
#### File: training-with-deep-learning/how-to-use-estimator/dummy_train.py
```python
import argparse
print("*********************************************************")
print("Hello Azure ML!")
parser = argparse.ArgumentParser()
parser.add_argument('--numbers-in-sequence', type=int, dest='num_in_sequence', default=10,
help='number of fibonacci numbers in sequence')
# This is how you can use a bool argument in Python. If you want the 'my_bool_var' to be True, just pass it
# in Estimator's script_param as script+params:{'my_bool_var': ''}.
# And, if you want to use it as False, then do not pass it in the Estimator's script_params.
# You can reverse the behavior by setting action='store_false' in the next line.
parser.add_argument("--my_bool_var", action='store_true')
args = parser.parse_args()
num = args.num_in_sequence
my_bool_var = args.my_bool_var
def fibo(n):
if n < 2:
return n
else:
return fibo(n - 1) + fibo(n - 2)
try:
from azureml.core import Run
run = Run.get_context()
print("The value of boolean parameter 'my_bool_var' is {}".format(my_bool_var))
print("Log Fibonacci numbers.")
for i in range(0, num - 1):
run.log('Fibonacci numbers', fibo(i))
run.complete()
except:
print("Warning: you need to install Azure ML SDK in order to log metrics.")
print("*********************************************************")
```
|
{
"source": "jeffshih/autoTrain",
"score": 2
}
|
#### File: autoTrain/tools/rc2.py
```python
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
import datasets.imdb
from datasets.coco import coco
from datasets.vatic import VaticData, IMDBGroup
from datasets.pascal_voc_new import pascal_voc
import caffe
import argparse
import pprint
import numpy as np
import sys
import os
def combined_roidb(imdb):
def get_roidb(imdb):
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidb = get_roidb(imdb)
return imdb, roidb
def prepare_data():
#Set the training configuration first
cfg_path="models/pvanet/lite/train.yml"
cfg_from_file(cfg_path)
"""
1. PREPARING DATASET
"""
#Firstly, prepare the dataset for fine-tuning
#Different kind of dataset is wrapped by the IMDB class, originally designed by <NAME>
#You need to put coco data directory(soft-link works as well) under the PVA-NET directory
#COCO IMDB needs two parameter: data-split and year
coco_train = coco("train", "2014")
coco_val = coco("val", "2014")
#Fetch the classes of coco dataet, this will be useful in the following section
classes = coco_val._classes
#Next, we import the VOC dataset via pascal_voc wrapper
#Since VOC and COCO data have different naming among classes, a naming mapper is needed to unify the class names
mapper = {"tvmonitor":"tv", "sofa":"couch", "aeroplane":"airplane",
"motorbike":"motorcycle", "diningtable":"dining table", "pottedplant":"potted plant"}
#Finnaly, let's wrap datasets from Vatic.
#A vatic dataset directory should be located under ~/data/ directory in the naming of data-*
#For example: ~/data/data-YuDa, ~/data/data-A1HighwayDay
vatic_names = ["YuDa","A1HighwayDay", "B2HighwayNight", "airport", "airport2"]
mapper = {"van":"car", "trailer-head":"truck",\
"sedan/suv":"car", "scooter":"motorcycle", "bike":"bicycle"}
vatics = [VaticData(vatic_name, classes, CLS_mapper=mapper, train_split="all") for vatic_name in vatic_names]
#Combine all the IMDBs into one single IMDB for training
datasets = vatics + [coco_train, coco_val]
imdb_group = IMDBGroup(datasets)
imdb, roidb = combined_roidb(imdb_group)
total_len = float(len(imdb_group.gt_roidb()))
#Show the dataset percentage in the whole composition
for dataset in imdb_group._datasets:
img_nums = len(dataset.gt_roidb())
print(dataset.name, img_nums, "{0:.2f}%".format(img_nums/total_len * 100))
return roidb
def finetune(net_params, roidb, GPU_ID=1):
solver, train_pt, caffenet,bbox_pred_name, max_iters, output_dir, output_prefix = net_params
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
print 'Trained model will be saved to `{:s}`'.format(output_dir)
caffe.set_mode_gpu()
caffe.set_device(GPU_ID)
train_net(solver, roidb, output_dir, output_prefix,
pretrained_model=caffenet, max_iters=max_iters, bbox_pred_name="bbox_pred-coco")
if __name__ == '__main__':
#Prepare roidb
roidb = prepare_data()
# Set each training parameter
solver = "models/pvanet/lite/coco_solver.prototxt"
train_pt = "models/pvanet/lite/coco_train.prototxt"
caffenet = "models/pvanet/lite/test.model"
#The bbox_pred_name is used to specify the new name of bbox_pred layer in the modified prototxt. bbox_pred layer is handeled differentlly in the snapshooting procedure for the purpose of bbox normalization. In order to prevent sanpshotting the un-tuned bbox_pred layer, we need to specify the new name.
bbox_pred_name = "bbox_pred-coco"
#The ouput directory and prefix for snapshots
output_dir = "models/rc/rc2"
output_prefix = "rc2"
#The maximum iterations is controlled in here instead of in solver
max_iters = 100 * 10000
net_params = (solver, train_pt, caffenet,bbox_pred_name, max_iters, output_dir, output_prefix)
GPU_ID = 2
#Start to finetune
finetune(net_params, roidb, GPU_ID)
```
|
{
"source": "jeffshurtliff/freshpy",
"score": 2
}
|
#### File: jeffshurtliff/freshpy/setup.py
```python
import setuptools
import codecs
import os.path
def read(rel_path):
"""This function reads the ``version.py`` script in order to retrieve the version.
.. versionadded:: 1.0.0
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
"""This function retrieves the current version of the package without needing to import the
:py:mod:`freshpy.utils.version` module in order to avoid dependency issues.
.. versionadded:: 1.0.0
"""
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delimiter = '"' if '"' in line else "'"
return line.split(delimiter)[1]
raise RuntimeError("Unable to find the version string")
with open('README.md', 'r') as fh:
long_description = fh.read()
version = get_version('src/freshpy/utils/version.py')
setuptools.setup(
name='freshpy',
version=version,
author='<NAME>',
author_email='<EMAIL>',
description='A Python toolset for utilizing the Freshservice API',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jeffshurtliff/freshpy",
project_urls={
'Change Log': 'https://freshpy.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/jeffshurtliff/freshpy/issues',
},
package_dir={'': 'src'},
packages=setuptools.find_packages(where='src'),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Communications",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Content Management System",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Topic :: Office/Business",
"Topic :: Software Development :: Libraries :: Python Modules"
],
python_requires='>=3.6',
install_requires=[
"urllib3>=1.26.7",
"requests>=2.26.0",
"setuptools~=52.0.0"
],
extras_require={
'sphinx': [
'Sphinx>=3.4.0',
'sphinxcontrib-applehelp>=1.0.2',
'sphinxcontrib-devhelp>=1.0.2',
'sphinxcontrib-htmlhelp>=1.0.3',
'sphinxcontrib-jsmath>=1.0.1',
'sphinxcontrib-qthelp>=1.0.3',
'sphinxcontrib-serializinghtml>=1.1.4'
],
}
)
```
#### File: src/freshpy/api.py
```python
import json
import requests
from . import errors
from .utils import log_utils
# Initialize logging
logger = log_utils.initialize_logging(__name__)
def define_headers():
"""This function defines the headers to use in API calls.
.. versionadded:: 1.0.0
"""
headers = {'Content-Type': 'application/json'}
return headers
def define_auth(api_key):
"""This function defines the authentication dictionary to use in API calls.
.. versionadded:: 1.0.0
"""
credentials = (api_key, 'X')
return credentials
def get_request_with_retries(fresh_object, uri, headers=None, return_json=True, verify_ssl=True):
"""This function performs a GET request and will retry several times if a failure occurs.
.. versionchanged:: 1.1.0
Added the ability to disable SSL verification on API calls.
.. versionadded:: 1.0.0
:param fresh_object: The instantiated :py:class:`freshpy.core.FreshPy` object.
:param uri: The URI to query
:type uri: string
:param headers: The HTTP headers to utilize in the REST API call
:type headers: dict, None
:param return_json: Determines if JSON data should be returned
:type return_json: bool
:param verify_ssl: Determines if SSL verification should occur (``True`` by default)
:type verify_ssl: bool
:returns: The JSON data from the response or the raw :py:mod:`requests` response.
:raises: :py:exc:`freshpy.errors.exceptions.APIConnectionError`
"""
# Define headers if not supplied
headers = define_headers() if not headers else headers
# Construct the credentials dictionary
credentials = define_auth(fresh_object.api_key)
# Construct the query URL
query_url = fresh_object.base_url + uri
# Perform the API call
retries, response = 0, None
while retries <= 5:
try:
response = requests.get(query_url, headers=headers, auth=credentials, verify=verify_ssl)
break
except Exception as exc_msg:
_report_failed_attempt(exc_msg, 'get', retries)
retries += 1
if retries == 6:
_raise_exception_for_repeated_timeouts()
pass
if return_json:
response = response.json()
return response
def _report_failed_attempt(_exc_msg, _request_type, _retries):
"""This function reports a failed API call that will be retried.
.. versionadded:: 1.0.0
:param _exc_msg: The exception that was raised within a try/except clause
:param _request_type: The type of API request (e.g. ``post``, ``put`` or ``get``)
:type _request_type: str
:param _retries: The attempt number for the API request
:type _retries: int
:returns: None
"""
_exc_name = type(_exc_msg).__name__
if 'connect' not in _exc_name.lower():
raise Exception(f"{_exc_name}: {_exc_msg}")
_current_attempt = f"(Attempt {_retries} of 5)"
_error_msg = f"The {_request_type.upper()} request has failed with the following exception: " + \
f"{_exc_name}: {_exc_msg} {_current_attempt}"
errors.handlers.eprint(f"{_error_msg}\n{_exc_name}: {_exc_msg}\n")
return
def _raise_exception_for_repeated_timeouts():
"""This function raises an exception when all API attempts (including) retries resulted in a timeout.
.. versionadded:: 1.0.0
:returns: None
:raises: :py:exc:`freshpy.errors.exceptions.APIConnectionError`
"""
_failure_msg = "The script was unable to complete successfully after five consecutive API timeouts. " + \
"Please run the script again or contact Freshservice Support for further assistance."
raise errors.exceptions.APIConnectionError(_failure_msg)
```
#### File: freshpy/errors/exceptions.py
```python
class FreshPyError(Exception):
"""This is the base class for FreshPy exceptions.
.. versionadded:: 1.0.0
"""
pass
############################
# Authentication Exceptions
############################
class MissingAuthDataError(FreshPyError):
"""This exception is used when authentication data is not supplied and therefore a connection cannot occur.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The authentication data was not provided and a connection cannot be established."
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
#####################
# General Exceptions
#####################
class CurrentlyUnsupportedError(FreshPyError):
"""This exception is used when a feature or functionality being used is currently unsupported.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "This feature is currently unsupported at this time."
if not (args or kwargs):
args = (default_msg,)
else:
custom_msg = f"The '{args[0]}' {default_msg.split('This ')[1]}"
args = (custom_msg,)
super().__init__(*args)
class DataMismatchError(FreshPyError):
"""This exception is used when there is a mismatch between two data sources.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "A data mismatch was found with the data sources."
if not (args or kwargs):
args = (default_msg,)
elif 'data' in kwargs:
multi_types = [list, tuple, set]
if type(kwargs['data']) == str:
custom_msg = f"{default_msg.split('data')[0]}'{kwargs['val']}'{default_msg.split('with the')[1]}"
custom_msg = custom_msg.replace('sources', 'source')
args = (custom_msg,)
elif type(kwargs['data']) in multi_types and len(kwargs['data']) == 2:
custom_section = f"'{kwargs['data'][0]}' and '{kwargs['data'][1]}'"
custom_msg = f"{default_msg.split('data sources')[0]}{custom_section}{default_msg.split('with the')[1]}"
args = (custom_msg,)
super().__init__(*args)
class InvalidFieldError(FreshPyError):
"""This exception is used when an invalid field is provided.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The field that was provided is invalid."
if not (args or kwargs):
args = (default_msg,)
elif 'val' in kwargs:
custom_msg = f"{default_msg.split('field ')[0]}'{kwargs['val']}'{default_msg.split('The')[1]}"
args = (custom_msg,)
super().__init__(*args)
class InvalidURLError(FreshPyError):
"""This exception is used when a provided URL is invalid.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The provided URL is invalid"
if not (args or kwargs):
args = (default_msg,)
elif 'url' in kwargs:
custom_msg = f"{default_msg.split('is')[0]}'{kwargs['url']}'{default_msg.split('URL')[1]}"
args = (custom_msg,)
super().__init__(*args)
class MissingRequiredDataError(FreshPyError):
"""This exception is used when a function or method is missing one or more required arguments.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "Missing one or more required parameters"
init_msg = "The object failed to initialize as it is missing one or more required arguments."
param_msg = "The required parameter 'PARAMETER_NAME' is not defined"
if not (args or kwargs):
args = (default_msg,)
elif 'init' in args or 'initialize' in args:
if 'object' in kwargs:
custom_msg = f"{init_msg.split('object')[0]}'{kwargs['object']}'{init_msg.split('The')[1]}"
args = (custom_msg,)
else:
args = (init_msg,)
elif 'param' in kwargs:
args = (param_msg.replace('PARAMETER_NAME', kwargs['param']),)
else:
args = (default_msg,)
super().__init__(*args)
#########################
# Generic API Exceptions
#########################
class APIConnectionError(FreshPyError):
"""This exception is used when the API query could not be completed due to connection aborts and/or timeouts.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The API query could not be completed due to connection aborts and/or timeouts."
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class APIRequestError(FreshPyError):
"""This exception is used for generic API request errors when there isn't a more specific exception.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The API request did not return a successful response."
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class DELETERequestError(FreshPyError):
"""This exception is used for generic DELETE request errors when there isn't a more specific exception.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The DELETE request did not return a successful response."
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class FeatureNotConfiguredError(FreshPyError):
"""This exception is used when an API request fails because a feature is not configured.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
exc_msg = "The feature is not configured."
if 'identifier' in kwargs or 'feature' in kwargs:
if 'identifier' in kwargs:
exc_msg += f" Identifier: {kwargs['identifier']}"
if 'feature' in kwargs:
exc_msg = exc_msg.replace("feature", f"{kwargs['feature']} feature")
args = (exc_msg,)
elif not (args or kwargs):
args = (exc_msg,)
super().__init__(*args)
class GETRequestError(FreshPyError):
"""This exception is used for generic GET request errors when there is not a more specific exception.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The GET request did not return a successful response."
custom_msg = "The GET request failed with the following message:"
if 'status_code' in kwargs or 'message' in kwargs:
if 'status_code' in kwargs:
status_code_msg = f"returned the {kwargs['status_code']} status code"
custom_msg = custom_msg.replace('failed', status_code_msg)
if 'message' in kwargs:
custom_msg = f"{custom_msg} {kwargs['message']}"
else:
custom_msg = custom_msg.split(' with the following')[0] + "."
args = (custom_msg,)
elif not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class InvalidPayloadValueError(FreshPyError):
"""This exception is used when an invalid value is provided for a payload field.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "An invalid payload value was provided."
custom_msg = "The invalid payload value 'X' was provided for the 'Y' field."
if not (args or kwargs):
args = (default_msg,)
elif 'value' in kwargs:
if 'field' in kwargs:
custom_msg = custom_msg.replace('X', kwargs['value']).replace('Y', kwargs['field'])
else:
custom_msg = f"{custom_msg.replace('X', kwargs['value']).split(' for the')[0]}."
args = (custom_msg,)
super().__init__(*args)
class InvalidRequestTypeError(FreshPyError):
"""This exception is used when an invalid API request type is provided.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The supplied request type for the API is not recognized. (Examples of valid " + \
"request types include 'POST' and 'PUT')"
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class LookupMismatchError(FreshPyError):
"""This exception is used when an a lookup value does not match the supplied lookup type.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The supplied lookup type for the API does not match the value that was provided."
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class NotFoundResponseError(FreshPyError):
"""This exception is used when an API query returns a 404 response and there isn't a more specific class.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The API query returned a 404 response."
if not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class POSTRequestError(FreshPyError):
"""This exception is used for generic POST request errors when there isn't a more specific exception.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The POST request did not return a successful response."
custom_msg = "The POST request failed with the following message:"
if 'status_code' in kwargs or 'message' in kwargs:
if 'status_code' in kwargs:
status_code_msg = f"returned the {kwargs['status_code']} status code"
custom_msg = custom_msg.replace('failed', status_code_msg)
if 'message' in kwargs:
custom_msg = f"{custom_msg} {kwargs['message']}"
else:
custom_msg = custom_msg.split(' with the following')[0] + "."
args = (custom_msg,)
elif not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
class PUTRequestError(FreshPyError):
"""This exception is used for generic PUT request errors when there isn't a more specific exception.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "The PUT request did not return a successful response."
custom_msg = "The PUT request failed with the following message:"
if 'status_code' in kwargs or 'message' in kwargs:
if 'status_code' in kwargs:
status_code_msg = f"returned the {kwargs['status_code']} status code"
custom_msg = custom_msg.replace('failed', status_code_msg)
if 'message' in kwargs:
custom_msg = f"{custom_msg} {kwargs['message']}"
else:
custom_msg = custom_msg.split(' with the following')[0] + "."
args = (custom_msg,)
elif not (args or kwargs):
args = (default_msg,)
super().__init__(*args)
####################
# Ticket Exceptions
####################
class InvalidFilterLogicError(FreshPyError):
"""This exception is used when an invalid filter logic operator is supplied.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "An invalid filter logic operator was provided."
custom_msg = "The filter logic operator 'X' is invalid."
if not (args or kwargs):
args = (default_msg,)
elif 'value' in kwargs:
custom_msg = custom_msg.replace('X', kwargs['value'])
args = (custom_msg,)
super().__init__(*args)
class InvalidPredefinedFilterError(FreshPyError):
"""This exception is used when the API query could not be completed due to connection aborts and/or timeouts.
.. versionadded:: 1.0.0
"""
def __init__(self, *args, **kwargs):
"""This method defines the default or custom message for the exception."""
default_msg = "An invalid predefined filter was provided."
custom_msg = "The provided filter 'X' is not a valid predefined filter."
if not (args or kwargs):
args = (default_msg,)
elif 'value' in kwargs:
custom_msg = custom_msg.replace('X', kwargs['value'])
args = (custom_msg,)
super().__init__(*args)
```
|
{
"source": "jeffshurtliff/khorosjx",
"score": 2
}
|
#### File: khorosjx/content/base.py
```python
import re
from .. import core, errors
from ..utils import core_utils
from ..utils.classes import Content
# Define global variables
base_url, api_credentials = '', None
# Define function to verify the connection in the core module
def verify_core_connection():
"""This function verifies that the core connection information (Base URL and API credentials) has been defined.
.. versionchanged:: 3.1.0
Refactored the function to be more pythonic and to avoid depending on a try/except block.
:returns: None
:raises: :py:exc:`khorosjx.errors.exceptions.KhorosJXError`,
:py:exc:`khorosjx.errors.exceptions.NoCredentialsError`
"""
if not base_url or not api_credentials:
retrieve_connection_info()
return
def retrieve_connection_info():
"""This function initializes and defines the global variables for the connection information.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
:returns: None
:raises: :py:exc:`khorosjx.errors.exceptions.KhorosJXError`,
:py:exc:`khorosjx.errors.exceptions.NoCredentialsError`
"""
# Define the global variables at this module level
global base_url
global api_credentials
base_url, api_credentials = core.get_connection_info()
return
# Define function to get the content ID from a URL
def get_content_id(url, content_type="document", verify_ssl=True):
"""This function obtains the Content ID for a particular content asset. (Supports all but blog posts)
.. versionchanged:: 3.1.0
Made some minor syntax improvements.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param url: The URL to the content
:type url: str
:param content_type: The content type for the URL for which to obtain the Content ID (Default: ``document``)
:type content_type: str
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The Content ID for the content URL
:raises: :py:exc:`ValueError`, :py:exc:`khorosjx.errors.exceptions.ContentNotFoundError`
"""
# Verify that the core connection has been established
verify_core_connection()
# Get the domain URL from the supplied content URL
if content_type in Content.content_url_delimiters:
platform_url = url.split(Content.content_url_delimiters.get(content_type))[0]
if not platform_url.startswith('http'):
platform_url = f"https://{platform_url}"
else:
error_msg = "Unable to identify the platform URL for the URL and defined content type."
raise ValueError(error_msg)
# Get the ID to be used in the GET request
if content_type == "document":
item_id = url.split('DOC-')[1]
elif content_type == "blog post":
raise ValueError("The get_content_id function does not currently support blog posts.")
else:
item_id = re.sub(r'^.*/', '', url)
# Construct the appropriate query URL
if content_type in Content.content_types:
content_type_id = Content.content_types.get(content_type)
query_url = f"{platform_url}/api/core/v3/contents?filter=entityDescriptor({content_type_id},{item_id})&count=1"
else:
error_msg = f"The content type {content_type} is unrecognized. Unable to perform the function."
raise ValueError(error_msg)
# Query the API to get the content ID
try:
response = core.get_request_with_retries(query_url, verify_ssl=verify_ssl)
content_data = response.json()
content_id = content_data['list'][0]['contentID']
except KeyError:
raise errors.exceptions.ContentNotFoundError()
return content_id
# Define an internal function to convert a lookup value to a proper lookup type
def __convert_lookup_value(_lookup_value, _lookup_type, _content_type="document"):
"""This function converts a lookup value to a proper lookup type.
:param _lookup_value: The lookup value to be converted
:type _lookup_value: str, int
:param _lookup_type: The current lookup type of the value to be converted
:type _lookup_type: str
:param _content_type: The type of content associated with the lookup value and lookup type (Default: ``document``)
:type _content_type: str
:returns: The properly formatted lookup value
:raises: :py:exc:`khorosjx.errors.exceptions.LookupMismatchError`,
:py:exc:`khorosjx.errors.exceptions.InvalidLookupTypeError`,
:py:exc:`khorosjx.errors.exceptions.CurrentlyUnsupportedError`
"""
# TODO: Rename this function to only have one underscore prefix
# Verify that the core connection has been established
verify_core_connection()
# Convert the lookup value as needed
if _content_type == "document":
# Get the Content ID if not supplied
if _lookup_type == "doc_id" or _lookup_type == "url":
if _lookup_type == "doc_id":
if 'http' in str(_lookup_value):
_error_msg = f"The 'doc_id' lookup_type was supplied (default) but the lookup value is a URL."
raise errors.exceptions.LookupMismatchError(_error_msg)
_lookup_value = f"{base_url.split('/api')[0]}/docs/DOC-{_lookup_value}"
_lookup_value = get_content_id(_lookup_value)
elif _lookup_type != "id" and _lookup_type != "content_id":
_exception_msg = "The supplied lookup type for the API is not recognized. " + \
"(Valid lookup types include 'id', 'content_id', 'doc_id' and 'url')"
raise errors.exceptions.InvalidLookupTypeError(_exception_msg)
else:
_exception_msg = f"The '{_content_type}' content type is not currently supported."
raise errors.exceptions.CurrentlyUnsupportedError(_exception_msg)
# TODO: Add functionality for other content types (e.g. discussion/question threads)
return _lookup_value
# Define internal function to trim the attachments data
def __trim_attachments_info(_attachment_info):
"""This function removes certain fields from attachments data captured via the API.
:param _attachment_info: List containing dictionaries of attachments retrieved via the API
:type _attachment_info: list
:returns: The trimmed list of dictionaries
"""
# TODO: Rename this function to only have one underscore prefix
for _idx in range(len(_attachment_info)):
_fields_to_ignore = ['resources', 'doUpload']
for _ignored_field in _fields_to_ignore:
if _ignored_field in _attachment_info[_idx].keys():
del _attachment_info[_idx][_ignored_field]
return _attachment_info
def get_paginated_content(endpoint, query_string="", start_index=0, dataset="", all_fields=False, return_fields=None,
ignore_exceptions=False):
"""This function returns paginated content information. (Up to 100 records at a time)
.. versionchanged:: 3.1.0
Changed the default ``return_fields`` value to ``None`` and adjusted the function accordingly.
:param endpoint: The full endpoint without preceding slash (e.g. ``securityGroups``, ``people/email/user_email``)
:type endpoint: str
:param query_string: Any query strings to apply (without preceding ``?``) excluding ``count`` and ``startIndex``
:type query_string: str
:param start_index: The startIndex API value
:type start_index: int, str
:param dataset: Defines the type of data returned in the API response (e.g. ``security_group``, ``people``, etc.)
:type dataset: str
:param all_fields: Determines if the ``fields=@all`` parameter should be passed in the query
:type all_fields: bool
:param return_fields: Specific fields to return if not all of the default fields are needed (Optional)
:type return_fields: list, None
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:returns: A list of dictionaries containing information for each group in the paginated query
"""
# Initialize the empty list for the group information
content = []
# Identify if all fields should be captured
all_fields_options = {True: 'fields=@all&', False: ''}
all_fields = False if 'fields=@all' in query_string else all_fields
all_fields = all_fields_options.get(all_fields)
# Construct the API query
start_index_delimiter = {True: '?', False: '&'}
query_uri = f"{base_url}/{endpoint.replace('?', '')}?{query_string.replace('?', '')}"
empty_query = True if query_string == "" else False
query_uri = f"{query_uri}{start_index_delimiter.get(empty_query)}" + \
f"{all_fields}startIndex={start_index}&count=100"
# Perform the API query to retrieve the group information
response = core.get_request_with_retries(query_uri)
# Verify that the query was successful
successful_response = errors.handlers.check_api_response(response, ignore_exceptions=ignore_exceptions)
if successful_response:
# Get the response data in JSON format
paginated_data = response.json()
for content_data in paginated_data.get('list'):
if dataset == "" or dataset not in Content.datasets:
dataset = core_utils.identify_dataset(query_uri)
parsed_data = core.get_fields_from_api_response(content_data, dataset, return_fields)
content.append(parsed_data)
return content
```
#### File: khorosjx/content/__init__.py
```python
import warnings
from . import base, docs, events, ideas, threads, videos
__all__ = ['base', 'docs', 'events', 'ideas', 'threads', 'videos']
# This function is deprecated and is only present until v3.0.0 to retain backward compatibility
def get_content_id(url, content_type="document"):
"""This function obtains the Content ID for a particular content asset. (Supports all but blog posts)
.. deprecated:: 2.0.0
The :py:func:`khorosjx.content.base.get_content_id` function should be used.
:param url: The URL to the content
:type url: str
:param content_type: The content type for the URL for which to obtain the Content ID (Default: ``document``)
:type content_type: str
:returns: The Content ID for the content URL
:raises: :py:exc:`ValueError`
"""
warnings.warn(
"The khorosjx.content.get_content_id function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.content.base.get_content_id instead.",
DeprecationWarning
)
content_id = base.get_content_id(url, content_type)
return content_id
# This function is deprecated and is only present until v3.0.0 to retain backward compatibility
def overwrite_doc_body(url, body_html, minor_edit=True, ignore_exceptions=False):
"""This function overwrites the body of a document with new HTML content.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.content.docs.overwrite_doc_body` function should be used.
:param url: THe URL of the document to be updated
:type url: str
:param body_html: The new HTML body to replace the existing document body
:param minor_edit: Determines whether the *Minor Edit* flag should be set (Default: ``True``)
:type minor_edit: bool
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:returns: The response of the PUT request used to update the document
:raises: :py:exc:`khorosjx.errors.exceptions.ContentPublishError`
"""
warnings.warn(
"The khorosjx.content.overwrite_doc_body function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.content.docs.overwrite_doc_body instead.",
DeprecationWarning
)
put_response = docs.overwrite_doc_body(url, body_html, minor_edit, ignore_exceptions)
return put_response
# This function is deprecated and is only present until v3.0.0 to retain backward compatibility
def __convert_lookup_value(_lookup_value, _lookup_type, _content_type="document"):
"""This function converts a lookup value to a proper lookup type.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.content.base.__convert_lookup_value` function should be used.
:param _lookup_value: The lookup value to be converted
:type _lookup_value: str, int
:param _lookup_type: The current lookup type of the value to be converted
:type _lookup_type: str
:param _content_type: The type of content associated with the lookup value and lookup type (Default: ``document``)
:type _content_type: str
:returns: The properly formatted lookup value
:raises: :py:exc:`khorosjx.errors.exceptions.LookupMismatchError`,
:py:exc:`khorosjx.errors.exceptions.InvalidLookupTypeError`,
:py:exc:`khorosjx.errors.exceptions.CurrentlyUnsupportedError`
"""
warnings.warn(
"The khorosjx.content.__convert_lookup_value function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.content.base.__convert_lookup_value instead.",
DeprecationWarning
)
_lookup_value = base.__convert_lookup_value(_lookup_value, _lookup_type, _content_type)
return _lookup_value
# This function is deprecated and is only present until v3.0.0 to retain backward compatibility
def get_document_info(lookup_value, lookup_type='doc_id', return_fields=[], ignore_exceptions=False):
"""This function obtains the group information for a given document.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.content.docs.get_document_info` function should be used.
:param lookup_value: The value with which to look up the document
:type lookup_value: int, str
:param lookup_type: Identifies the type of lookup value that has been provided (Default: ``doc_id``)
:type lookup_type: str
:param return_fields: Specific fields to return if not all of the default fields are needed (Optional)
:type return_fields: list
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:returns: A dictionary with the group information
:raises: :py:exc:`khorosjx.errors.exceptions.GETRequestError`,
:py:exc:`khorosjx.errors.exceptions.InvalidDatasetError`,
:py:exc:`khorosjx.errors.exceptions.InvalidLookupTypeError`,
:py:exc:`khorosjx.errors.exceptions.LookupMismatchError`
"""
warnings.warn(
"The khorosjx.content.get_document_info function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.content.docs.get_document_info instead.",
DeprecationWarning
)
doc_info = docs.get_document_info(lookup_value, lookup_type, return_fields, ignore_exceptions)
return doc_info
# This function is deprecated and is only present until v3.0.0 to retain backward compatibility
def __trim_attachments_info(_attachment_info):
"""This function removes certain fields from attachments data captured via the API.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.content.base.__trim_attachments_info` function should be used.
:param _attachment_info: List containing dictionaries of attachments retrieved via the API
:type _attachment_info: list
:returns: The trimmed list of dictionaries
"""
warnings.warn(
"The khorosjx.content.__trim_attachments_info function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.content.base.__trim_attachments_info instead.",
DeprecationWarning
)
_attachment_info = base.__trim_attachments_info(_attachment_info)
return _attachment_info
# This function is deprecated and is only present until v3.0.0 to retain backward compatibility
def get_document_attachments(lookup_value, lookup_type='doc_id', return_dataframe=False):
"""This function retrieves information on any attachments associated with a document.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.content.docs.get_document_attachments` function should be used.
:param lookup_value: The value with which to look up the document
:type lookup_value: str, int
:param lookup_type: Identifies the type of lookup value that has been provided (Default: ``doc_id``)
:type lookup_type: str
:param return_dataframe: Determines whether or not a pandas dataframe should be returned
:type return_dataframe: bool
:returns: A list, dictionary or pandas dataframe depending on the number of attachments and/or function arguments
:raises: :py:exc:`khorosjx.errors.exceptions.GETRequestError`,
:py:exc:`khorosjx.errors.exceptions.InvalidDatasetError`,
:py:exc:`khorosjx.errors.exceptions.InvalidLookupTypeError`,
:py:exc:`khorosjx.errors.exceptions.LookupMismatchError`
"""
warnings.warn(
"The khorosjx.content.get_document_attachments function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.content.docs.get_document_attachments instead.",
DeprecationWarning
)
attachment_info = docs.get_document_attachments(lookup_value, lookup_type, return_dataframe)
return attachment_info
```
#### File: khorosjx/places/blogs.py
```python
from .. import core
from . import base as places_core
# Define global variables
base_url, api_credentials = '', None
def verify_core_connection():
"""This function verifies that the core connection information (Base URL and API credentials) has been defined.
.. versionchanged:: 3.1.0
Refactored the function to be more pythonic and to avoid depending on a try/except block.
:returns: None
:raises: :py:exc:`khorosjx.errors.exceptions.KhorosJXError`,
:py:exc:`khorosjx.errors.exceptions.NoCredentialsError`
"""
if not base_url or not api_credentials:
retrieve_connection_info()
return
def retrieve_connection_info():
"""This function initializes and defines the global variables for the connection information.
.. versionadded:: 3.1.0
:returns: None
:raises: :py:exc:`khorosjx.errors.exceptions.KhorosJXError`,
:py:exc:`khorosjx.errors.exceptions.NoCredentialsError`
"""
# Define the global variables at this module level
global base_url
global api_credentials
base_url, api_credentials = core.get_connection_info()
return
# Define function to get space info
def get_blog_info(place_id, return_fields=None, ignore_exceptions=False):
"""This function obtains the blog information for a given Place ID. (aka Browse ID)
.. versionchanged:: 3.1.0
Changed the default ``return_fields`` value to ``None`` and adjusted the function accordingly.
:param place_id: The Place ID (aka Browse ID) of the blog whose information will be requested
:type place_id: int, str
:param return_fields: Specific fields to return if not all of the default fields are needed (Optional)
:type return_fields: list, None
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:returns: A dictionary with the blog information
:raises: GETRequestError, InvalidDatasetError
"""
# Verify that the core connection has been established
verify_core_connection()
# Leverage the core module to retrieve the data
blog_info = places_core.get_place_info(place_id, return_fields, ignore_exceptions)
return blog_info
```
#### File: khorosjx/khorosjx/spaces.py
```python
import warnings
from .places import spaces
from .places import base as places_core
# Define function to get basic group information for a particular Group ID
def get_space_info(place_id, return_fields=[], ignore_exceptions=False):
"""This function obtains the space information for a given Space ID.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.get_space_info` function should be used.
:param place_id: The Place ID (aka Browse ID) of the space whose information will be requested
:type place_id: int, str
:param return_fields: Specific fields to return if not all of the default fields are needed (Optional)
:type return_fields: list
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:returns: A dictionary with the space information
:raises: :py:exc:`khorosjx.errors.exceptions.GETRequestError`,
:py:exc:`khorosjx.errors.exceptions.InvalidDatasetError`
"""
warnings.warn(
"The khorosjx.spaces.get_space_info function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.spaces.get_space_info instead.",
DeprecationWarning
)
space_info = spaces.get_space_info(place_id, return_fields, ignore_exceptions)
return space_info
# Define function to get the Place ID for a space
def get_place_id(space_id, return_type='str'):
"""This function retrieves the Place ID (aka Browse ID) for a space given its ID.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.get_place_id` function should be used.
:param space_id: The Space ID for the space to query
:type space_id: int, str
:param return_type: Determines whether to return the value as a ``str`` or an ``int`` (Default: ``str``)
:type return_type: str
:returns: The Place ID (aka Browse ID) for the space
:raises: GETRequestError
"""
warnings.warn(
"The khorosjx.spaces.get_place_id function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.base.get_place_id instead.",
DeprecationWarning
)
place_id = places_core.get_place_id(space_id, return_type)
return place_id
# Define function to get the Browse ID for a space
def get_browse_id(space_id, return_type='str'):
"""This function retrieves the Browse ID (aka Place ID) for a space given its ID.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.get_browse_id` function should be used.
:param space_id: The Space ID for the space to query
:type space_id: int, str
:param return_type: Determines whether to return the value as a ``str`` or an ``int`` (Default: ``str``)
:type return_type: str
:returns: The Browse ID (aka Place ID) for the space
:raises: :py:exc:`khorosjx.errors.exceptions.GETRequestError`
"""
warnings.warn(
"The khorosjx.spaces.get_browse_id function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.base.get_browse_id instead.",
DeprecationWarning
)
browse_id = places_core.get_place_id(space_id, return_type)
return browse_id
def __verify_browse_id(_id_value, _id_type):
"""This function checks for a Browse ID and converts another value to get it if necessary.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.__verify_browse_id` function should be used.
"""
warnings.warn(
"The khorosjx.spaces.__verify_browse_id function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.base.__verify_browse_id instead.",
DeprecationWarning
)
_id_value = places_core.__verify_browse_id(_id_value, _id_type)
return _id_value
# Define function to get a space list from a CSV or Excel file
def get_spaces_list_from_file(full_path, file_type='csv', has_headers=True,
id_column='', id_type='browse_id', excel_sheet_name='', filter_info={}):
"""This function retrieves a list of space identifiers from a file.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.base.get_places_list_from_file` function should be used.
:param full_path: The full path to the file to import
:type full_path: str
:param file_type: Defines if the file to be imported is a ``csv`` (Default), ``xlsx``, ``xls`` or ``txt`` file.
:param has_headers: Defines if the import file uses column headers (``True`` by default)
:type has_headers: bool
:param id_column: Defines the column name (if applicable) which contains the space identifier (Null by default)
:type id_column: str
:param id_type: Defines if the ID type is a ``browse_id`` (Default) or ``place_id`` (aka ``container_id``)
:type id_type: str
:param excel_sheet_name: The sheet name to retrieve if an Excel file is supplied (First sheet imported by default)
:type excel_sheet_name: str
:param filter_info: Dictionary used to apply any filter to the imported data if necessary (Null by default)
:type filter_info: dict
:returns: A list of space identifiers
:raises: :py:exc:`khorosjx.errors.exceptions.InvalidFileTypeError`
"""
warnings.warn(
"The khorosjx.spaces.get_spaces_list_from_file function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.base.get_places_list_from_file instead.",
DeprecationWarning
)
spaces_list = places_core.get_places_list_from_file(full_path, file_type, has_headers, id_column,
id_type, excel_sheet_name, filter_info)
return spaces_list
# Define function to get the permitted content types for a space
def get_permitted_content_types(id_value, id_type='browse_id', return_type='list'):
"""This function returns the permitted content types for a given space.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.get_permitted_content_types` function should be used.
:param id_value: The space identifier as a Browse ID (default), Place ID or Space ID
:type id_value: int, str
:param id_type: Determines if the ``id_value`` is a ``browse_id`` (Default), ``place_id`` or ``space_id``
:type id_type: str
:param return_type: Determines if the result should be returned in ``list`` (Default), ``tuple`` or ``str`` format
:type return_type: str
:returns: The permitted content types in list, tuple or string format
:raises: :py:exc:`khorosjx.errors.exceptions.SpaceNotFountError`,
:py:exc:`khorosjx.errors.exceptions.GETRequestError`
"""
warnings.warn(
"The khorosjx.spaces.get_permitted_content_types function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.spaces.get_permitted_content_types instead.",
DeprecationWarning
)
content_types = spaces.get_permitted_content_types(id_value, id_type, return_type)
return content_types
# Define function to get space permissions for a space
def get_space_permissions(id_value, id_type='browse_id', return_type='list'):
"""This function returns all of the defined permissions for a specific space.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.get_space_permissions` function should be used.
:param id_value: The space identifier as a Browse ID (default), Place ID or Space ID
:type id_value: int, str
:param id_type: Determines if the ``id_value`` is a ``browse_id`` (Default), ``place_id`` or ``space_id``
:type id_type: str
:param return_type: Determines if the result should be returned as a ``list`` (Default) or pandas ``dataframe``
:type return_type: str
:returns: The list or dataframe with the space permissions
:raises: :py:exc:`khorosjx.errors.exceptions.SpaceNotFountError`,
:py:exc:`khorosjx.errors.exceptions.GETRequestError`
"""
warnings.warn(
"The khorosjx.spaces.get_space_permissions function is deprecated and will be removed in v3.0.0. Use " +
"khorosjx.places.spaces.get_space_permissions instead.",
DeprecationWarning
)
all_permissions = spaces.get_space_permissions(id_value, id_type, return_type)
return all_permissions
# Define function to get the unique fields for the permissions data
def __get_unique_permission_fields(_permissions_dict_list):
"""This function gets the unique fields from a space permissions list.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.__get_unique_permission_fields` function should be used.
:param _permissions_dict_list: A list of dictionaries containing space permissions
:type _permissions_dict_list: list
:returns: List of unique field names
"""
warnings.warn(
"The khorosjx.spaces.__get_unique_permission_fields function is deprecated and will be removed " +
"in v3.0.0. Use khorosjx.places.spaces.__get_unique_permission_fields instead.",
DeprecationWarning
)
_unique_fields = spaces.__get_unique_permission_fields(_permissions_dict_list)
return _unique_fields
# Define function to generate a dataframe with the space permissions
def __generate_permissions_dataframe(_permissions_dict_list):
"""This function converts a list of dictionaries containing space permissions into a pandas dataframe.
.. deprecated:: 2.0.0
The :py:func:`khorosjx.places.spaces.__generate_permissions_dataframe` function should be used.
:param _permissions_dict_list: A list of dictionaries containing space permissions
:type _permissions_dict_list: list
:returns: A pandas dataframe with the permissions data
"""
warnings.warn(
"The khorosjx.spaces.__generate_permissions_dataframe function is deprecated and will be removed " +
"in v3.0.0. Use khorosjx.places.spaces.__generate_permissions_dataframe instead.",
DeprecationWarning
)
_permissions_data = spaces.__generate_permissions_dataframe(_permissions_dict_list)
return _permissions_data
```
#### File: khorosjx/utils/version.py
```python
import warnings
import requests
__version__ = "3.2.0"
def get_full_version():
"""This function returns the current full version of the khorosjx package.
:returns: The current full version (i.e. X.Y.Z) in string format
"""
return __version__
def get_major_minor_version():
"""This function returns the current major.minor (i.e. X.Y) version of the khorosjx package.
:returns: The current major.minor (i.e. X.Y) version in string format
"""
return ".".join(__version__.split(".")[:2])
def get_latest_stable():
"""This function returns the latest stable version of the khorosjx package.
:returns: The latest stable version in string format
"""
pypi_data = requests.get('https://pypi.org/pypi/khorosjx/json').json()
return pypi_data['info']['version']
def latest_version():
"""This function defines if the current version matches the latest stable version on PyPI.
:returns: Boolean value indicating if the versions match
"""
latest_stable = get_latest_stable()
return True if __version__ == latest_stable else False
def warn_when_not_latest():
"""This function displays a :py:exc:`RuntimeWarning` if the running version doesn't match the latest stable version.
:returns: None
"""
try:
if not latest_version():
warn_msg = "The latest stable version of khorosjx is not running. " + \
"Consider running 'pip install khorosjx --upgrade' when feasible."
warnings.warn(warn_msg, RuntimeWarning)
except Exception:
pass
return
```
|
{
"source": "jeffshurtliff/khoros",
"score": 2
}
|
#### File: khoros/objects/messages.py
```python
import json
import warnings
from . import attachments, users
from . import tags as tags_module
from .. import api, liql, errors
from ..structures import nodes
from ..utils import log_utils
# Initialize the logger for this module
logger = log_utils.initialize_logging(__name__)
REQUIRED_FIELDS = ['board', 'subject']
CONTEXT_KEYS = ['id', 'url']
SEO_KEYS = ['title', 'description', 'canonical_url']
MESSAGE_SEO_URLS = {
'Blog Article': 'ba-p',
'Blog Comment': 'bc-p',
'Contest Item': 'cns-p',
'Idea': 'idi-p',
'Message': 'm-p',
'Question': 'qaq-p',
'TKB Article': 'ta-p',
'Topic': 'td-p'
}
def create(khoros_object, subject=None, body=None, node=None, node_id=None, node_url=None, canonical_url=None,
context_id=None, context_url=None, cover_image=None, images=None, is_answer=None, is_draft=None,
labels=None, product_category=None, products=None, read_only=None, seo_title=None, seo_description=None,
tags=None, ignore_non_string_tags=False, teaser=None, topic=None, videos=None, attachment_file_paths=None,
full_payload=None, full_response=False, return_id=False, return_url=False, return_api_url=False,
return_http_code=False, return_status=None, return_error_messages=None, split_errors=False,
proxy_user_object=None):
"""This function creates a new message within a given node.
.. versionchanged:: 4.4.0
Introduced the ``proxy_user_object`` parameter to allow messages to be created on behalf of other users.
.. versionchanged:: 4.3.0
It is now possible to pass the pre-constructed full JSON payload into the function via the ``full_payload``
parameter as an alternative to defining each field individually.
.. versionchanged:: 2.8.0
The ``ignore_non_string_tags``, ``return_status``, ``return_error_messages`` and ``split_errors``
arguments were introduced.
.. versionadded:: 2.3.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param subject: The title or subject of the message
:type subject: str, None
:param body: The body of the message in HTML format
:type body: str, None
:param node: A dictionary containing the ``id`` key and its associated value indicating the destination
:type node: dict, None
:param node_id: The ID of the node in which the message will be published
:type node_id: str, None
:param node_url: The URL of the node in which the message will be published
.. note:: This argument is necessary in the absence of the ``node`` and ``node_id`` arguments.
:type node_url: str, None
:param canonical_url: The search engine-friendly URL to the message
:type canonical_url: str, None
:param context_id: Metadata on a message to identify the message with an external identifier of your choosing
:type context_id: str, None
:param context_url: Metadata on a message representing a URL to associate with the message (external identifier)
:type context_url: str, None
:param cover_image: The cover image set for the message
:type cover_image: dict, None
:param images: The query to retrieve images uploaded to the message
:type images: dict, None
:param is_answer: Designates the message as an answer on a Q&A board
:type is_answer: bool, None
:param is_draft: Indicates whether or not the message is still a draft (i.e. unpublished)
:type is_draft: bool, None
:param labels: The query to retrieve labels applied to the message
:type labels: dict, None
:param product_category: The product category (i.e. container for ``products``) associated with the message
:type product_category: dict, None
:param products: The product in a product catalog associated with the message
:type products: dict, None
:param read_only: Indicates whether or not the message should be read-only or have replies/comments blocked
:type read_only: bool, None
:param seo_title: The title of the message used for SEO purposes
:type seo_title: str, None
:param seo_description: A description of the message used for SEO purposes
:type seo_description: str, None
:param tags: The query to retrieve tags applied to the message
:type tags: dict, None
:param ignore_non_string_tags: Determines if non-strings (excluding iterables) should be ignored rather than
converted to strings (``False`` by default)
:type ignore_non_string_tags: bool
:param teaser: The message teaser (used with blog articles)
:type teaser: str, None
:param topic: The root message of the conversation in which the message appears
:type topic: dict, None
:param videos: The query to retrieve videos uploaded to the message
:type videos: dict, None
:param attachment_file_paths: The full path(s) to one or more attachment (e.g. ``path/to/file1.pdf``)
:type attachment_file_paths: str, tuple, list, set, None
:param full_payload: Pre-constructed full JSON payload as a dictionary (*preferred*) or a JSON string with the
following syntax:
.. code-block:: json
{
"data": {
"type": "message",
}
}
.. note:: The ``type`` field shown above is essential for the payload to be valid.
:type full_payload: dict, str, None
:param full_response: Defines if the full response should be returned instead of the outcome (``False`` by default)
.. caution:: This argument overwrites the ``return_id``, ``return_url``, ``return_api_url``
and ``return_http_code`` arguments.
:type full_response: bool
:param return_id: Indicates that the **Message ID** should be returned (``False`` by default)
:type return_id: bool
:param return_url: Indicates that the **Message URL** should be returned (``False`` by default)
:type return_url: bool
:param return_api_url: Indicates that the **API URL** of the message should be returned (``False`` by default)
:type return_api_url: bool
:param return_http_code: Indicates that the **HTTP status code** of the response should be returned
(``False`` by default)
:type return_http_code: bool
:param return_status: Determines if the **Status** of the API response should be returned by the function
:type return_status: bool, None
:param return_error_messages: Determines if the **Developer Response Message** (if any) associated with the
API response should be returned by the function
:type return_error_messages: bool, None
:param split_errors: Defines whether or not error messages should be merged when applicable
:type split_errors: bool
:param proxy_user_object: Instantiated :py:class:`khoros.objects.users.ImpersonatedUser` object to create the
message on behalf of a secondary user.
:type proxy_user_object: class[khoros.objects.users.ImpersonatedUser], None
:returns: Boolean value indicating a successful outcome (default) or the full API response
:raises: :py:exc:`TypeError`, :py:exc:`ValueError`, :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`,
:py:exc:`khoros.errors.exceptions.DataMismatchError`
"""
api_url = f"{khoros_object.core['v2_base']}/messages"
if full_payload:
payload = validate_message_payload(full_payload)
else:
payload = construct_payload(subject, body, node, node_id, node_url, canonical_url, context_id, context_url,
is_answer, is_draft, read_only, seo_title, seo_description, teaser, tags,
cover_image, images, labels, product_category, products, topic, videos,
ignore_non_string_tags=ignore_non_string_tags, khoros_object=khoros_object)
payload = validate_message_payload(payload)
multipart = True if attachment_file_paths else False
if multipart:
payload = attachments.construct_multipart_payload(payload, attachment_file_paths)
response = api.post_request_with_retries(api_url, payload, khoros_object=khoros_object, multipart=multipart,
proxy_user_object=proxy_user_object)
return api.deliver_v2_results(response, full_response, return_id, return_url, return_api_url, return_http_code,
return_status, return_error_messages, split_errors, khoros_object)
def validate_message_payload(payload):
"""This function validates the payload for a message to ensure that it can be successfully utilized.
.. versionadded:: 4.3.0
:param payload: The message payload to be validated as a dictionary (*preferred*) or a JSON string.
:type payload: dict, str
:returns: The payload as a dictionary
:raises: :py:exc:`khoros.errors.exceptions.InvalidMessagePayloadError`
"""
if not payload and not isinstance(payload, dict) and not isinstance(payload, str):
raise errors.exceptions.InvalidMessagePayloadError("The message payload is null.")
if isinstance(payload, str):
logger.warning("The message payload is defined as a JSON string and will be converted to a dictionary.")
payload = json.loads(payload)
if not isinstance(payload, dict):
raise errors.exceptions.InvalidMessagePayloadError("The message payload must be a dictionary or "
"JSON string.")
if 'data' not in payload:
raise errors.exceptions.InvalidMessagePayloadError("The message payload must include the 'data' key.")
if 'type' not in payload.get('data'):
raise errors.exceptions.InvalidMessagePayloadError("The message payload must include the `type` key (with "
"'message' as the value) within the 'data' parent key.")
if payload.get('data').get('type') != 'message':
raise errors.exceptions.InvalidMessagePayloadError("The value for the 'type' key in the message payload "
"must be defined as 'message' but was defined as "
f"'{payload.get('data').get('type')}' instead.")
if 'subject' not in payload.get('data' or 'board' not in payload.get('data') or
'id' not in payload.get('data').get('board')):
raise errors.exceptions.InvalidMessagePayloadError("A node and subject must be defined.")
return payload
def construct_payload(subject=None, body=None, node=None, node_id=None, node_url=None, canonical_url=None,
context_id=None, context_url=None, is_answer=None, is_draft=None, read_only=None, seo_title=None,
seo_description=None, teaser=None, tags=None, cover_image=None, images=None, labels=None,
product_category=None, products=None, topic=None, videos=None, parent=None, status=None,
moderation_status=None, attachments_to_add=None, attachments_to_remove=None, overwrite_tags=False,
ignore_non_string_tags=False, msg_id=None, khoros_object=None, action='create'):
"""This function constructs and properly formats the JSON payload for a messages API request.
.. todo::
Add support for the following parameters which are currently present but unsupported: ``cover_image``,
``images``, ``labels``, ``product_category``, ``products``, ``topic``, ``videos``, ``parent``, ``status``,
``attachments_to_add`` and ``attachments to remove``
.. versionchanged:: 2.8.0
Added the ``parent``, ``status``, ``moderation_status``, ``attachments_to_add``, ``attachments_to_remove``,
``overwrite_tags``, ``ignore_non_string_tags``, ``msg_id``, ``khoros_object`` and ``action`` arguments, and
added the ``raises`` section to the docstring.
.. versionadded:: 2.3.0
:param subject: The title or subject of the message
:type subject: str, None
:param body: The body of the message in HTML format
:type body: str, None
:param node: A dictionary containing the ``id`` key and its associated value indicating the destination
:type node: dict, None
:param node_id: The ID of the node in which the message will be published
:type node_id: str, None
:param node_url: The URL of the node in which the message will be published
.. note:: This argument is necessary in the absence of the ``node`` and ``node_id`` arguments.
:type node_url: str, None
:param canonical_url: The search engine-friendly URL to the message
:type canonical_url: str, None
:param context_id: Metadata on a message to identify the message with an external identifier of your choosing
:type context_id: str, None
:param context_url: Metadata on a message representing a URL to associate with the message (external identifier)
:type context_url: str, None
:param is_answer: Designates the message as an answer on a Q&A board
:type is_answer: bool, None
:param is_draft: Indicates whether or not the message is still a draft (i.e. unpublished)
:type is_draft: bool, None
:param read_only: Indicates whether or not the message should be read-only or have replies/comments blocked
:type read_only: bool, None
:param seo_title: The title of the message used for SEO purposes
:type seo_title: str, None
:param seo_description: A description of the message used for SEO purposes
:type seo_description: str, None
:param teaser: The message teaser (used with blog articles)
:type teaser: str, None
:param tags: The query to retrieve tags applied to the message
:type tags: dict, None
:param cover_image: The cover image set for the message
:type cover_image: dict, None
:param images: The query to retrieve images uploaded to the message
:type images: dict, None
:param labels: The query to retrieve labels applied to the message
:type labels: dict, None
:param product_category: The product category (i.e. container for ``products``) associated with the message
:type product_category: dict, None
:param products: The product in a product catalog associated with the message
:type products: dict, None
:param topic: The root message of the conversation in which the message appears
:type topic: dict, None
:param videos: The query to retrieve videos uploaded to the message
:type videos: dict, None
:param parent: The parent of the message
:type parent: str, None
:param status: The message status for messages where conversation.style is ``idea`` or ``contest``
.. caution:: This property is not returned if the message has the default ``Unspecified`` status
assigned. It will only be returned for ideas with a status of ``Completed`` or with a
custom status created in Community Admin.
:type status: dict, None
:param moderation_status: The moderation status of the message
.. note:: Acceptable values are ``unmoderated``, ``approved``, ``rejected``,
``marked_undecided``, ``marked_approved`` and ``marked_rejected``.
:type moderation_status: str, None
:param attachments_to_add: The full path(s) to one or more attachments (e.g. ``path/to/file1.pdf``) to be
added to the message
:type attachments_to_add: str, tuple, list, set, None
:param attachments_to_remove: One or more attachments to remove from the message
.. note:: Each attachment should specify the attachment id of the attachment to
remove, which begins with ``m#_``. (e.g. ``m283_file1.pdf``)
:type attachments_to_remove: str, tuple, list, set, None
:param overwrite_tags: Determines if tags should overwrite any existing tags (where applicable) or if the tags
should be appended to the existing tags (default)
:type overwrite_tags: bool
:param ignore_non_string_tags: Determines if non-strings (excluding iterables) should be ignored rather than
converted to strings (``False`` by default)
:type ignore_non_string_tags: bool
:param msg_id: Message ID of an existing message so that its existing tags can be retrieved (optional)
:type msg_id: str, int, None
:param khoros_object: The core :py:class:`khoros.Khoros` object
.. note:: The core object is only necessary when providing a Message ID as it will be
needed to retrieve the existing tags from the message.
:type khoros_object: class[khoros.Khoros], None
:param action: Defines if the payload will be used to ``create`` (default) or ``update`` a message
:type action: str
:returns: The properly formatted JSON payload
:raises: :py:exc:`TypeError`, :py:exc:`ValueError`, :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`,
:py:exc:`khoros.errors.exceptions.DataMismatchError`
"""
# Define the default payload structure
payload = {
"data": {
"type": "message"
}
}
# Ensure the required fields are defined if creating a message
if action == 'create':
_verify_required_fields(node, node_id, node_url, subject)
# Define the destination
if action == 'create' or any((node, node_id, node_url)):
if not node:
if node_id:
node = {"id": f"{node_id}"}
else:
node = {"id": f"{nodes.get_node_id(url=node_url)}"}
payload['data']['board'] = node
# Add supplied data where appropriate if string or Boolean
supplied_data = {
'body': (body, str),
'subject': (subject, str),
'canonical_url': (canonical_url, str),
'context_id': (context_id, str),
'context_url': (context_url, str),
'is_answer': (is_answer, bool),
'is_draft': (is_draft, bool),
'read_only': (read_only, bool),
'seo_title': (seo_title, str),
'seo_description': (seo_description, str),
'teaser': (teaser, str)
}
for field_name, field_value in supplied_data.items():
if field_value[0]:
if field_value[1] == str:
payload['data'][field_name] = f"{field_value[0]}"
elif field_value[1] == bool:
bool_value = bool(field_value[0]) if isinstance(field_value[0], str) else field_value[0]
payload['data'][field_name] = bool_value
# Add moderation status to payload when applicable
payload = _add_moderation_status_to_payload(payload, moderation_status)
# Add tags to payload when applicable
if tags:
payload = _add_tags_to_payload(payload, tags, _khoros_object=khoros_object, _msg_id=msg_id,
_overwrite_tags=overwrite_tags, _ignore_non_strings=ignore_non_string_tags)
# TODO: Add functionality for remaining non-string and non-Boolean arguments
return payload
def update(khoros_object, msg_id=None, msg_url=None, subject=None, body=None, node=None, node_id=None, node_url=None,
canonical_url=None, context_id=None, context_url=None, cover_image=None, is_draft=None, labels=None,
moderation_status=None, parent=None, product_category=None, products=None, read_only=None, topic=None,
status=None, seo_title=None, seo_description=None, tags=None, overwrite_tags=False,
ignore_non_string_tags=False, teaser=None, attachments_to_add=None, attachments_to_remove=None,
full_response=None, return_id=None, return_url=None, return_api_url=None, return_http_code=None,
return_status=None, return_error_messages=None, split_errors=False, proxy_user_object=None):
"""This function updates one or more elements of an existing message.
.. versionchanged:: 4.4.0
Introduced the ``proxy_user_object`` parameter to allow messages to be updated on behalf of other users.
.. versionadded:: 2.8.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param msg_id: The ID of the existing message
:type msg_id: str, int, None
:param msg_url: The URL of the existing message
:type msg_url: str, None
:param subject: The title or subject of the message
:type subject: str, None
:param body: The body of the message in HTML format
:type body: str, None
:param node: A dictionary containing the ``id`` key and its associated value indicating the destination
:type node: dict, None
:param node_id: The ID of the node in which the message will be published
:type node_id: str, None
:param node_url: The URL of the node in which the message will be published
.. note:: This argument is necessary in the absence of the ``node`` and ``node_id`` arguments.
:type node_url: str, None
:param canonical_url: The search engine-friendly URL to the message
:type canonical_url: str, None
:param context_id: Metadata on a message to identify the message with an external identifier of your choosing
:type context_id: str, None
:param context_url: Metadata on a message representing a URL to associate with the message (external identifier)
:type context_url: str, None
:param cover_image: The cover image set for the message
:type cover_image: dict, None
:param is_draft: Indicates whether or not the message is still a draft (i.e. unpublished)
:type is_draft: bool, None
:param labels: The query to retrieve labels applied to the message
:type labels: dict, None
:param moderation_status: The moderation status of the message
.. note:: Acceptable values are ``unmoderated``, ``approved``, ``rejected``,
``marked_undecided``, ``marked_approved`` and ``marked_rejected``.
:type moderation_status: str, None
:param parent: The parent of the message
:type parent: str, None
:param product_category: The product category (i.e. container for ``products``) associated with the message
:type product_category: dict, None
:param products: The product in a product catalog associated with the message
:type products: dict, None
:param read_only: Indicates whether or not the message should be read-only or have replies/comments blocked
:type read_only: bool, None
:param topic: The root message of the conversation in which the message appears
:type topic: dict, None
:param status: The message status for messages where conversation.style is ``idea`` or ``contest``
.. caution:: This property is not returned if the message has the default ``Unspecified`` status
assigned. It will only be returned for ideas with a status of Completed or with a
custom status created in Community Admin.
:type status: dict, None
:param seo_title: The title of the message used for SEO purposes
:type seo_title: str, None
:param seo_description: A description of the message used for SEO purposes
:type seo_description: str, None
:param tags: The query to retrieve tags applied to the message
:type tags: dict, None
:param overwrite_tags: Determines if tags should overwrite any existing tags (where applicable) or if the tags
should be appended to the existing tags (default)
:type overwrite_tags: bool
:param ignore_non_string_tags: Determines if non-strings (excluding iterables) should be ignored rather than
converted to strings (``False`` by default)
:type ignore_non_string_tags: bool
:param teaser: The message teaser (used with blog articles)
:type teaser: str, None
:param attachments_to_add: The full path(s) to one or more attachments (e.g. ``path/to/file1.pdf``) to be
added to the message
:type attachments_to_add: str, tuple, list, set, None
:param attachments_to_remove: One or more attachments to remove from the message
.. note:: Each attachment should specify the attachment id of the attachment to
remove, which begins with ``m#_``. (e.g. ``m283_file1.pdf``)
:type attachments_to_remove: str, tuple, list, set, None
:param full_response: Defines if the full response should be returned instead of the outcome (``False`` by default)
.. caution:: This argument overwrites the ``return_id``, ``return_url``, ``return_api_url``
and ``return_http_code`` arguments.
:type full_response: bool, None
:param return_id: Indicates that the **Message ID** should be returned (``False`` by default)
:type return_id: bool, None
:param return_url: Indicates that the **Message URL** should be returned (``False`` by default)
:type return_url: bool, None
:param return_api_url: Indicates that the **API URL** of the message should be returned (``False`` by default)
:type return_api_url: bool, None
:param return_http_code: Indicates that the **HTTP status code** of the response should be returned
(``False`` by default)
:type return_http_code: bool, None
:param return_status: Determines if the **Status** of the API response should be returned by the function
:type return_status: bool, None
:param return_error_messages: Determines if the **Developer Response Message** (if any) associated with the
API response should be returned by the function
:type return_error_messages: bool, None
:param split_errors: Defines whether or not error messages should be merged when applicable
:type split_errors: bool
:param proxy_user_object: Instantiated :py:class:`khoros.objects.users.ImpersonatedUser` object to update the
message on behalf of a secondary user.
:type proxy_user_object: class[khoros.objects.users.ImpersonatedUser], None
:returns: Boolean value indicating a successful outcome (default) or the full API response
:raises: :py:exc:`TypeError`, :py:exc:`ValueError`, :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`,
:py:exc:`khoros.errors.exceptions.DataMismatchError`
"""
msg_id = _verify_message_id(msg_id, msg_url)
api_url = f"{khoros_object.core['v2_base']}/messages/{msg_id}"
payload = construct_payload(subject, body, node, node_id, node_url, canonical_url, context_id, context_url,
is_draft=is_draft, read_only=read_only, seo_title=seo_title, tags=tags, topic=topic,
seo_description=seo_description, teaser=teaser, cover_image=cover_image, labels=labels,
parent=parent, products=products, product_category=product_category, status=status,
moderation_status=moderation_status, attachments_to_add=attachments_to_add,
attachments_to_remove=attachments_to_remove, overwrite_tags=overwrite_tags,
ignore_non_string_tags=ignore_non_string_tags, action='update')
multipart = True if attachments_to_add else False
if multipart:
payload = attachments.construct_multipart_payload(payload, attachments_to_add, 'update')
response = api.put_request_with_retries(api_url, payload, khoros_object=khoros_object, multipart=multipart,
proxy_user_object=proxy_user_object)
return api.deliver_v2_results(response, full_response, return_id, return_url, return_api_url, return_http_code,
return_status, return_error_messages, split_errors, khoros_object)
def _verify_message_id(_msg_id, _msg_url):
"""This function verifies that a message ID has been defined or can be using the message URL.
.. versionadded:: 2.8.0
:param _msg_id: The message ID associated with a message
:type _msg_id: str, int, None
:param _msg_url: The URL associated with a message
:type _msg_url: str, None
:returns: The message ID
:raises: :py:exc:`errors.exceptions.MissingRequiredDataError`,
:py:exc:`errors.exceptions.MessageTypeNotFoundError`
"""
if not any((_msg_id, _msg_url)):
raise errors.exceptions.MissingRequiredDataError("A message ID or URL must be defined when updating messages")
elif not _msg_id:
_msg_id = get_id_from_url(_msg_url)
return _msg_id
def _verify_required_fields(_node, _node_id, _node_url, _subject):
"""This function verifies that the required fields to create a message are satisfied.
.. versionchanged:: 2.8.0
Updated the if statement to leverage the :py:func:`isinstance` function.
.. versionadded:: 2.3.0
:param _node: A dictionary containing the ``id`` key and its associated value indicating the destination
:type _node: dict
:param _node_id: The ID of the node in which the message will be published
:type _node_id: str
:param _node_url: The URL of the node in which the message will be published
.. note:: This argument is necessary in the absence of the ``node`` and ``node_id`` arguments.
:type _node_url: str
:param _subject: The title or subject of the message
:type _subject: str
:returns: None
:raises: :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`
"""
_requirements_satisfied = True
if (not _node and not _node_id and not _node_url) or (_node is not None and not isinstance(_node, dict)) \
or not _subject:
_requirements_satisfied = False
elif _node and (not _node_id and not _node_url):
_requirements_satisfied = False if 'id' not in _node else True
if not _requirements_satisfied:
raise errors.exceptions.MissingRequiredDataError("A node and subject must be defined when creating messages")
return
def _add_moderation_status_to_payload(_payload, _moderation_status):
"""This function adds the moderation status field and value to the payload when applicable.
.. versionadded:: 2.8.0
:param _payload: The payload for the API call
:type _payload: dict
:param _moderation_status: The ``moderation_status`` field value
:type _moderation_status: str, None
:returns: The payload with the potentially added ``moderation_status`` key value pair
"""
_valid_options = ['unmoderated', 'approved', 'rejected', 'marked_undecided', 'marked_approved', 'marked_rejected']
if _moderation_status:
if not isinstance(_moderation_status, str) or _moderation_status not in _valid_options:
warnings.warn(f"The moderation status '{_moderation_status}' is not a valid option and will be ignored.",
RuntimeWarning)
else:
_payload['data']['moderation_status'] = _moderation_status
return _payload
def _add_tags_to_payload(_payload, _tags, _khoros_object=None, _msg_id=None, _overwrite_tags=False,
_ignore_non_strings=False):
"""This function adds tags to the payload for an API call against the *messages* collection.
:param _payload: The payload for the API call
:type _payload: dict
:param _tags: A list, tuple, set or string containing one or more tags to add to the message
:type _tags: list, tuple, set, str
:param _khoros_object: The core :py:class:`khoros.Khoros` object
.. note:: The core object is only necessary when providing a Message ID as it will be
needed to retrieve the existing tags from the message.
:type _khoros_object: class[khoros.Khoros], None
:param _msg_id: Message ID of an existing message so that its existing tags can be retrieved (optional)
:type _msg_id: str, int, None
:param _overwrite_tags: Determines if tags should overwrite any existing tags (where applicable) or if the tags
should be appended to the existing tags (default)
:type _overwrite_tags: bool
:param _ignore_non_strings: Determines if non-strings (excluding iterables) should be ignored rather than
converted to strings (``False`` by default)
:type _ignore_non_strings: bool
:returns: The payload with tgs included when relevant
"""
_formatted_tags = tags_module.structure_tags_for_message(_tags, khoros_object=_khoros_object, msg_id=_msg_id,
overwrite=_overwrite_tags,
ignore_non_strings=_ignore_non_strings)
_payload['data']['tags'] = _formatted_tags
return _payload
def _confirm_field_supplied(_fields_dict):
"""This function checks to ensure that at least one field has been enabled to retrieve.
.. versionadded:: 2.3.0
"""
_field_supplied = False
for _field_value in _fields_dict.values():
if _field_value[0]:
_field_supplied = True
break
if not _field_supplied:
raise errors.exceptions.MissingRequiredDataError("At least one field must be enabled to retrieve a response.")
return
def parse_v2_response(json_response, return_dict=False, status=False, response_msg=False, http_code=False,
message_id=False, message_url=False, message_api_uri=False, v2_base=''):
"""This function parses an API response for a message operation (e.g. creating a message) and returns parsed data.
.. deprecated:: 2.5.0
Use the :py:func:`khoros.api.parse_v2_response` function instead.
.. versionadded:: 2.3.0
:param json_response: The API response in JSON format
:type json_response: dict
:param return_dict: Defines if the parsed data should be returned within a dictionary
:type return_dict: bool
:param status: Defines if the **status** value should be returned
:type status: bool
:param response_msg: Defines if the **developer response** message should be returned
:type response_msg: bool
:param http_code: Defines if the **HTTP status code** should be returned
:type http_code: bool
:param message_id: Defines if the **message ID** should be returned
:type message_id: bool
:param message_url: Defines if the **message URL** should be returned
:type message_url: bool
:param message_api_uri: Defines if the ** message API URI** should be returned
:type message_api_uri: bool
:param v2_base: The base URL for the API v2
:type v2_base: str
:returns: A string, tuple or dictionary with the parsed data
:raises: :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`
"""
warnings.warn(f"This function is deprecated and 'khoros.api.parse_v2_response' should be used.", DeprecationWarning)
return api.parse_v2_response(json_response, return_dict, status, response_msg, http_code, message_id, message_url,
message_api_uri, v2_base)
def get_id_from_url(url):
"""This function retrieves the message ID from a given URL.
.. versionadded:: 2.4.0
:param url: The URL from which the ID will be parsed
:type url: str
:returns: The ID associated with the message in string format
:raises: :py:exc:`khoros.errors.exceptions.MessageTypeNotFoundError`
"""
for msg_type in MESSAGE_SEO_URLS.values():
if msg_type in url:
return (url.split(f'{msg_type}/')[1]).split('#')[0]
raise errors.exceptions.MessageTypeNotFoundError(url=url)
def is_read_only(khoros_object=None, msg_id=None, msg_url=None, api_response=None):
"""This function checks to see whether or not a message is read-only.
.. versionadded:: 2.8.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros], None
:param msg_id: The unique identifier for the message
:type msg_id: str, int, None
:param msg_url: THe URL of the message
:type msg_url: str, None
:param api_response: The JSON data from an API response
:type api_response: dict, None
:returns: Boolean value indicating whether or not the message is read-only
:raises: :py:exc:`errors.exceptions.MissingRequiredDataError`,
:py:exc:`errors.exceptions.MessageTypeNotFoundError`
"""
if api_response:
current_status = api_response['data']['read_only']
else:
errors.handlers.verify_core_object_present(khoros_object)
msg_id = _verify_message_id(msg_id, msg_url)
query = f'SELECT read_only FROM messages WHERE id = "{msg_id}"' # nosec
api_response = liql.perform_query(khoros_object, liql_query=query, verify_success=True)
current_status = api_response['data']['items'][0]['read_only']
return current_status
def set_read_only(khoros_object, enable=True, msg_id=None, msg_url=None, suppress_warnings=False):
"""This function sets (i.e. enables or disables) the read-only flag for a given message.
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param enable: Determines if the read-only flag should be enabled (``True`` by default)
:type enable: bool
:param msg_id: The unique identifier for the message
:type msg_id: str, int, None
:param msg_url: The URL for the message
:type msg_url: str, None
:param suppress_warnings: Determines whether or not warning messages should be suppressed (``False`` by default)
:type suppress_warnings: bool
:returns: None
:raises: :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`
"""
def _get_warn_msg(_msg_id, _status):
"""This function returns the appropriate warning message to use when applicable."""
return f"Read-only status is already {_status} for Message ID {_msg_id}"
msg_id = _verify_message_id(msg_id, msg_url)
current_status = is_read_only(khoros_object, msg_id)
warn_msg = None
if all((enable, current_status)):
warn_msg = _get_warn_msg(msg_id, 'enabled')
elif enable is False and current_status is False:
warn_msg = _get_warn_msg(msg_id, 'disabled')
if warn_msg and not suppress_warnings:
errors.handlers.eprint(warn_msg)
else:
result = update(khoros_object, msg_id, msg_url, read_only=enable, full_response=True)
if result['status'] == 'error':
errors.handlers.eprint(errors.handlers.get_error_from_json(result))
else:
new_status = is_read_only(api_response=result)
if new_status == current_status and not suppress_warnings:
warn_msg = f"The API call was successful but the read-only status for Message ID {msg_id} is " \
f"still {new_status}."
errors.handlers.eprint(warn_msg)
return
def _get_required_user_mention_data(_khoros_object, _user_info, _user_id, _login):
"""This function retrieves the required data for constructing a user mention.
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _user_info: User information provided in a dictionary
:type _user_info: dict, None
:param _user_id: The User ID for the user
:type _user_id: str, int, None
:param _login: The username (i.e. login) for the user
:type _login: str, None
:returns: The User ID and username (i.e. login) for the user
:raises: :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`
"""
_missing_data_error = "A User ID or login must be supplied to construct an user @mention"
_info_fields = ['id', 'login']
if not any((_user_info, _user_id, _login)):
raise errors.exceptions.MissingRequiredDataError(_missing_data_error)
elif not _user_id and not _login:
if not any(_field in _info_fields for _field in _user_info):
raise errors.exceptions.MissingRequiredDataError(_missing_data_error)
else:
if 'id' in _user_info:
_user_id = _user_info.get('id')
if 'login' in _user_info:
_login = _user_info.get('login')
if not _user_id or not _login:
if not _khoros_object:
raise errors.exceptions.MissingAuthDataError()
if not _user_id:
_user_id = users.get_user_id(_khoros_object, login=_login)
elif not _login:
_login = users.get_login(_khoros_object, user_id=_user_id)
return _user_id, _login
def format_user_mention(khoros_object=None, user_info=None, user_id=None, login=None):
"""This function formats the ``<li-user>`` HTML tag for a user @mention.
.. versionadded:: 2.4.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
.. note:: This argument is necessary if only one of the user values (i.e. ``user_id`` or
``login``) are passed in the function, as a lookup will need to be performed to
define the missing value.
:type khoros_object: class[khoros.Khoros], None
:param user_info: A dictionary containing the ``'id'`` and/or ``'login'`` key(s) with the user information
.. note:: This argument is necessary if the User ID and/or Login are not explicitly passed
using the ``user_id`` and/or ``login`` function arguments.
:type user_info: dict, None
:param user_id: The unique user identifier (i.e. User ID) for the user
:type user_id: str, int, None
:param login: The login (i.e. username) for the user
:type login: str, None
:returns: The properly formatted ``<li-user>`` HTML tag in string format
:raises: :py:exc:`khoros.errors.exceptions.MissingAuthDataError`,
:py:exc:`khoros.errors.exceptions.MissingRequiredDataError`
"""
user_id, login = _get_required_user_mention_data(khoros_object, user_info, user_id, login)
mention_tag = f'<li-user uid="{user_id}" login="@{login}"></li-user>'
return mention_tag
def _report_missing_id_and_retrieve(_content_id, _url):
"""This function displays a ``UserWarning`` message if needed and then retrieves the correct ID from the URL.
.. versionadded:: 2.4.0
:param _content_id: The missing or incorrect ID of the message
:type _content_id: str, int, None
:param _url: The full URL of the message
:type _url: str
:returns: The appropriate ID of the message where possible
:raises: :py:exc:`khoros.errors.exceptions.MessageTypeNotFoundError`
"""
if _content_id is not None:
warnings.warn(f"The given ID '{_content_id}' is not found in the URL {_url} and will be verified.",
UserWarning)
return get_id_from_url(_url)
def _check_for_bad_content_id(_content_id, _url):
"""This function confirms that a supplied Content ID is found within the provided URL.
.. versionadded:: 2.4.0
:param _content_id: The ID of the message
:type _content_id: str, int, None
:param _url: The full URL of the message
:type _url: str
:returns: The appropriate ID of the message where possible
:raises: :py:exc:`khoros.errors.exceptions.MessageTypeNotFoundError`
"""
if _content_id is None or str(_content_id) not in _url:
_content_id = _report_missing_id_and_retrieve(_content_id, _url)
return _content_id
def _get_required_content_mention_data(_khoros_object, _content_info, _content_id, _title, _url):
"""This function retrieves the required data to construct a content mention.
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _content_info: Information on the content within a dictionary
:type _content_info: dict, None
:param _content_id: The ID of the content
:type _content_id: str, int, None
:param _title: The title of the content
:type _title: str, None
:param _url: The URL of the content
:type _url: str, None
:returns: The ID, title and URL of the content
:raises: :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`
"""
_missing_data_error = "A title and URL must be supplied to construct an user @mention"
_content_info = {} if _content_info is None else _content_info
_info_fields = ['title', 'url']
_info_arguments = (_content_info, _content_id, _title, _url)
_required_fields_in_dict = all(_field in _content_info for _field in _info_fields)
_required_fields_in_args = all((_title, _url))
if not _required_fields_in_dict and not _required_fields_in_args:
raise errors.exceptions.MissingRequiredDataError(_missing_data_error)
elif _required_fields_in_dict:
_title = _content_info.get('title')
_url = _content_info.get('url')
_content_id = _content_info.get('id') if 'id' in _content_info else _content_id
else:
_content_id = _content_info.get('id') if not _content_id else _content_id
_content_id = _check_for_bad_content_id(_content_id, _url)
return _content_id, _title, _url
def format_content_mention(khoros_object=None, content_info=None, content_id=None, title=None, url=None):
"""This function formats the ``<li-message>`` HTML tag for a content @mention.
.. versionadded:: 2.4.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
.. note:: This argument is necessary if the URL (i.e. ``url`` argument) is not an absolute
URL, as the base community URL will need to be retrieved from the object.
:type khoros_object: class[khoros.Khoros], None
:param content_info: A dictionary containing the ``'id'`` and/or ``'login'`` key(s) with the user information
.. note:: This argument is necessary if the Title and URL are not explicitly passed
using the ``title`` and ``url`` function arguments.
:type content_info: dict, None
:param content_id: The Message ID (aka Content ID) associated with the content mention
.. note:: This is an optional argument as the ID can be retrieved from the URL.
:type content_id: str, int, None
:param title: The display title for the content mention (e.g. ``"Click Here"``)
:type title: str, None
:param url: The fully-qualified URL of the message being mentioned
:type url: str, None
:returns: The properly formatted ``<li-message>`` HTML tag in string format
:raises: :py:exc:`khoros.errors.exceptions.MessageTypeNotFoundError`,
:py:exc:`khoros.errors.exceptions.MissingRequiredDataError`,
:py:exc:`khoros.errors.exceptions.MessageTypeNotFoundError`,
:py:exc:`khoros.errors.exceptions.InvalidURLError`
"""
content_id, title, url = _get_required_content_mention_data(khoros_object, content_info, content_id, title, url)
if url.startswith('/t5'):
if not khoros_object:
raise errors.exceptions.MissingRequiredDataError('The core Khoros object is required when a '
'fully-qualified URL is not provided.')
url = f"{khoros_object.core['base_url']}{url}"
mention_tag = f'<li-message title="{title}" uid="{content_id}" url="{url}"></li-message>'
return mention_tag
```
#### File: utils/tests/test_ssl_verify.py
```python
from . import resources
def test_default_core_object_setting():
"""This function tests to verify the ``ssl_verify`` setting is ``True`` by default.
.. versionadded:: 4.3.0
"""
khoros = resources.initialize_khoros_object()
assert khoros.core_settings.get('ssl_verify') is True # nosec
return
def test_core_object_with_param_setting():
"""This function tests to verify the ``ssl_verify`` setting is honored when explicitly defined.
.. versionadded:: 4.3.0
"""
defined_setting = {'ssl_verify': False}
khoros = resources.initialize_khoros_object(use_defined_settings=True, defined_settings=defined_setting,
append_to_default=True)
assert khoros.core_settings.get('ssl_verify') is False # nosec
def test_api_global_variable_assignment():
"""This function tests to verify that the ``ssl_verify_disabled`` global variable gets defined appropriately.
.. versionadded:: 4.3.0
"""
defined_setting = {'ssl_verify': False}
khoros = resources.initialize_khoros_object(use_defined_settings=True, defined_settings=defined_setting,
append_to_default=True)
assert api.ssl_verify_disabled is True
def test_api_should_verify_function():
"""This function tests to verify that the :py:func:`khoros.api.should_verify_tls` function works properly.
.. versionadded:: 4.3.0
"""
defined_setting = {'ssl_verify': False}
khoros = resources.initialize_khoros_object(use_defined_settings=True, defined_settings=defined_setting,
append_to_default=True)
assert api.should_verify_tls(khoros) is False
assert api.should_verify_tls() is False
# Import modules and initialize the core object
api, exceptions = resources.import_modules('khoros.api', 'khoros.errors.exceptions')
```
|
{
"source": "jeffsieu/gazpacho",
"score": 3
}
|
#### File: gazpacho/tests/test_get.py
```python
import json
import sys
from json.decoder import JSONDecodeError
from unittest.mock import patch
import pytest
from gazpacho.get import HTTPError, UrllibHTTPError, build_opener, get
def test_get(create_mock_responses):
title = "<title>Gazpacho - Wikipedia"
create_mock_responses(title, "application/text")
url = "https://en.wikipedia.org/wiki/Gazpacho"
content = get(url)
assert title in content
def test_get_invalid_content_type(create_mock_responses):
create_mock_responses("asd")
url = "https://en.wikipedia.org/wiki/Gazpacho"
with pytest.raises(JSONDecodeError):
get(url)
def test_get_headers(create_mock_responses):
UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:69.0) Gecko/20100101 Firefox/69.0"
headers = {"User-Agent": UA}
create_mock_responses(json.dumps({"headers": headers}))
url = "https://httpbin.org/headers"
content = get(url, headers=headers)
assert UA == content["headers"]["User-Agent"]
def test_get_multiple_headers(create_mock_responses):
url = "https://httpbin.org/headers"
headers = {"User-Agent": "Mozilla/5.0", "Accept-Encoding": "gzip"}
create_mock_responses(json.dumps({"headers": headers}))
content = get(url, headers=headers)
headers_set = set(headers.values())
response_set = set(content["headers"].values())
assert headers_set.intersection(response_set) == {"Mozilla/5.0", "gzip"}
def test_get_params(create_mock_responses):
params = {"foo": "bar", "bar": "baz"}
create_mock_responses(json.dumps({"args": params}))
url = "https://httpbin.org/anything"
content = get(url, params)
assert params == content["args"]
def test_HTTPError_404(create_mock_responses):
url = "https://httpstat.us/404"
_, mock_response = create_mock_responses("asd")
mock_response.headers.get_content_type.side_effect = UrllibHTTPError(
url, 404, "Not found", None, None
)
with pytest.raises(HTTPError):
get(url)
```
|
{
"source": "jeffsimp88/twitterclone",
"score": 2
}
|
#### File: twitterclone/tweet/models.py
```python
from django.db import models
from django.conf import settings
from django.utils import timezone
class Tweet(models.Model):
message = models.TextField(max_length=140)
date_posted = models.DateTimeField(default=timezone.now)
post_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name = 'post_user'
)
def __str__(self):
return f'{self.message} | {self.post_user}'
```
|
{
"source": "jeffsiver/monorepo-builder",
"score": 3
}
|
#### File: monorepo-builder/monorepo_builder/console.py
```python
import click
def write_to_console(message, color=None, bold=False):
if color:
click.secho(message, fg=color, bold=bold)
elif bold:
click.secho(message, bold=True)
else:
click.echo(message)
```
|
{
"source": "Jeff-sjtu/Emoji2Sticker",
"score": 2
}
|
#### File: Emoji2Sticker/backend/ocr.py
```python
import httplib, urllib, base64, json
from jeff.translate import translate
###############################################
#### Update or verify the following values. ###
###############################################
# Replace the subscription_key string value with your valid subscription key.
subscription_key = '3b24d41162714af897e7b00e4b29566d'
# Replace or verify the region.
#
# You must use the same region in your REST API call as you used to obtain your subscription keys.
# For example, if you obtained your subscription keys from the westus region, replace
# "westcentralus" in the URI below with "westus".
#
# NOTE: Free trial subscription keys are generated in the westcentralus region, so if you are using
# a free trial subscription key, you should not need to change this region.
uri_base = 'westcentralus.api.cognitive.microsoft.com'
headers = {
# Request headers.
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.urlencode({
# Request parameters. The language setting "unk" means automatically detect the language.
'language': 'zh-Hans',
'detectOrientation ': 'true',
})
def text_detection(url):
body = "{'url':'" + url + "'}"
# Execute the REST API call and get the response.
conn = httplib.HTTPSConnection('westcentralus.api.cognitive.microsoft.com')
conn.request("POST", "/vision/v1.0/ocr?%s" % params, body, headers)
response = conn.getresponse()
data = response.read()
# 'data' contains the JSON data. The following formats the JSON data for display.
parsed = json.loads(data)
# print ("Response:")
text = ''
if u'regions' in parsed:
for i in parsed[u'regions']:
message = ''
for j in i[u'lines']:
for k in j[u'words']:
tex = k['text']
if (tex > u'\u4E00') & (tex < u'\u9FA5'):
t = tex.encode('utf-8')
message += t
else:
message += ' '
text += message
text += ' '
else:
print(parsed)
# print (parsed)
# print (json.dumps(parsed, sort_keys=True, indent=2))
conn.close()
text = text.strip()
# print(text)
if text:
text = translate(text)[0].replace(',', '').replace('.', '')
return text
```
|
{
"source": "jeffskinnerbox/computer-vision",
"score": 2
}
|
#### File: src/massmutual-people-counter/tracemess.py
```python
from cv2 import __version__
import uuid
import json
import ts_dweepy # https://pypi.python.org/pypi/dweepy/
import time
class TraceMess:
def __init__(self, src):
self.run_stamp = {
"mess-type": "EXEC",
"mess-format": "0.0.2",
"run-id": str(uuid.uuid4()),
"run-time": time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()),
"run-status": "start",
"run-platform": "Desktop-Jupyter",
"run-source": src,
"version": {
"algorithm": "0.0.3",
"cv2": __version__
}
}
print(json.dumps(self.run_stamp))
def start(self):
self.run_stamp["run-status"] = "start"
self.run_stamp["run-time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) #noqa
print(json.dumps(self.run_stamp))
def stop(self):
self.run_stamp["run-status"] = "stop"
self.run_stamp["run-time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) #noqa
print(json.dumps(self.run_stamp))
def error(self, mess):
print(json.dumps({"mess-type": "ERROR","run-id": self.run_stamp["run-id"], "mess-text": mess})) #noqa
def warning(self, mess):
print(json.dumps({"mess-type": "WARNING","run-id": self.run_stamp["run-id"], "mess-text": mess})) #noqa
def info(self, mess):
print(json.dumps({"mess-type": "INFO","run-id": self.run_stamp["run-id"], "mess-text": mess})) #noqa
def feature(self, mess):
ts_dweepy.dweet_for(self.run_stamp["run-platform"], {"mess-type": "FEATURE","run-id": self.run_stamp["run-id"], "mess-text": mess}) #noqa
print(json.dumps({"mess-type": "FEATURE","run-id": self.run_stamp["run-id"], "mess-text": mess})) #noqa
```
|
{
"source": "jeffskwang/Landscape_Evolution_Model_RCM",
"score": 2
}
|
#### File: Landscape_Evolution_Model_RCM/modules/F_forward.py
```python
import importlib
import sys
parameters = importlib.import_module(sys.argv[1])
globals().update(parameters.__dict__)
def f_forward(eta_old,eta_temp,eta_new,discharge,slope,uplift,precipitation,incision,diffusion,lateral_incision_cumulative,lateral_incision_threshold,direction):
for x in xrange(1,cellsx+1):
eta_new[x][1] = eta_old[x][1]
eta_new[x][cellsy] = eta_old[x][cellsy]
eta_temp[x][1] = eta_old[x][1]
eta_temp[x][cellsy] = eta_old[x][cellsy]
for y in xrange(1,cellsy+1):
eta_new[1][y] = eta_old[1][y]
eta_new[cellsx][y] = eta_old[cellsx][y]
eta_temp[1][y] = eta_old[1][y]
eta_temp[cellsx][y] = eta_old[cellsx][y]
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
diffusion[x][y]= D / (dx * dy)* \
((eta_old[x-1][y]-2.*eta_old[x][y]+eta_old[x+1][y])+ \
(eta_old[x][y-1]-2.*eta_old[x][y]+eta_old[x][y+1]))
if diffusion[x][y] > 0.0 and diffusion_deposition == 0:
diffusion[x][y] = 0.0
incision[x][y] = K *(discharge[x][y]**m)*(slope[x][y]**n)
eta_new[x][y] = eta_old[x][y] + dt * (uplift[x][y] + diffusion[x][y] - incision[x][y])
eta_temp[x][y] = eta_new[x][y]
if lateral_incision_boolean == 1:
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
if lateral_incision_cumulative[x][y] > lateral_incision_threshold[x][y]:
i = direction[x][y]
if i != -9999:
xloc = x + xn[i]
yloc = y + yn[i]
if BC[0] == 2 and BC[1] == 2:
if yloc == cellsy+1:
yloc = 1
elif yloc == 0:
yloc = cellsy
elif BC[2] == 2 and BC[3] == 2:
if xloc == cellsx+1:
xloc = 1
elif xloc == 0:
xloc = cellsx
lateral_incision_cumulative[x][y] = 0.0
eta_new[x][y] = eta_temp[xloc][yloc] + dx * hole_adjustment
return eta_new,incision,diffusion,lateral_incision_cumulative
```
#### File: Landscape_Evolution_Model_RCM/modules/F_lateral.py
```python
import importlib
import sys
import random
parameters = importlib.import_module(sys.argv[1])
globals().update(parameters.__dict__)
def f_lateral(discharge,lateral_incision,lateral_incision_cumulative,area,slope,direction,lateral_incision_threshold,eta_old):
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
#the cells at play
x1 = x
y1 = y
i1 = direction[x1][y1]
if y1 == cellsy and BC[0] == 1:
bingo = 1
elif y1 == 1 and BC[1] == 1:
bingo = 1
elif x1 == 1 and BC[2] == 1:
bingo = 1
elif x1 == cellsx and BC[3] == 1:
bingo = 1
elif i1 == -9999:
bingo = 1
else:
x2 = x1 + xn[i1]
y2 = y1 + yn[i1]
if BC[0] == 2 and BC[1] == 2:
if y2 == cellsy+1:
y2 = 1
elif y2 == 0:
y2 = cellsy
elif BC[2] == 2 and BC[3] == 2:
if x2 == cellsx+1:
x2 = 1
elif x2 == 0:
x2 = cellsx
lateral_incision_threshold[x1][y1] = (eta_old[x1][y1] - eta_old[x2][y2]) * dx * dy
i2 = direction[x2][y2]
if y2 == cellsy and BC[0] == 1:
bingo = 1
elif y2 == 1 and BC[1] == 1:
bingo = 1
elif x2 == 1 and BC[2] == 1:
bingo = 1
elif x2 == cellsx and BC[3] == 1:
bingo = 1
elif i2 == -9999:
bingo = 1
else:
x3 = x2 + xn[i2]
y3 = y2 + yn[i2]
if BC[0] == 2 and BC[1] == 2:
if y3 == cellsy+1:
y3 = 1
elif y3 == 0:
y3 = cellsy
elif BC[2] == 2 and BC[3] == 2:
if x3 == cellsx+1:
x3 = 1
elif x3 == 0:
x3 = cellsx
#lateral node location
curve = str(i1) + str(i2)
lateral_node_direction = lateral_nodes[curve][int(0.5 + random.random())]
xlat = x2 + xn[lateral_node_direction]
ylat = y2 + yn[lateral_node_direction]
if BC[0] == 2 and BC[1] == 2:
if ylat == cellsy+1:
ylat = 1
elif ylat == 0:
ylat = cellsy
elif BC[2] == 2 and BC[3] == 2:
if xlat == cellsx+1:
xlat = 1
elif xlat == 0:
xlat = cellsx
if ylat == cellsy and BC[0] == 1:
bingo = 1
elif ylat == 1 and BC[1] == 1:
bingo = 1
elif xlat == 1 and BC[2] == 1:
bingo = 1
elif xlat == cellsx and BC[3] == 1:
bingo = 1
else:
inverse_radius_curavture = lateral_nodes[curve][2]
lateral_incision[xlat][ylat] = Kl *(discharge[x2][y2]**m_l)*(slope[x2][y2]**n_l) * inverse_radius_curavture * (discharge_constant * discharge[x2][y2] ** discharge_exponent * dx)
lateral_incision_cumulative[xlat][ylat] += dt * Kl *(discharge[x2][y2]**m_l)*(slope[x2][y2]**n_l) * inverse_radius_curavture * (discharge_constant * discharge[x2][y2] ** discharge_exponent * dx)
## lateral_incision[xlat][ylat] = Kl *(discharge[x1][y1]**m_l)*(slope[x1][y1]**n_l) * inverse_radius_curavture * (discharge_constant * discharge[x1][y1] ** discharge_exponent * dx)
## lateral_incision_cumulative[xlat][ylat] += dt * Kl *(discharge[x1][y1]**m_l)*(slope[x1][y1]**n_l) * inverse_radius_curavture * (discharge_constant * discharge[x1][y1] ** discharge_exponent * dx)
return lateral_incision,lateral_incision_cumulative, lateral_incision_threshold
```
|
{
"source": "jeffskwang/LEM-wLE",
"score": 3
}
|
#### File: LEM_wLE/modules/F_direction.py
```python
import importlib
import sys
parameters = importlib.import_module(sys.argv[1])
globals().update(parameters.__dict__)
#This function determines the flow direction for each cell using the D8 algorithm.
def f_direction(eta,direction,slope,hole):
f_slope = [0.0 for i in xrange(0,8)]
hole[0] = 0
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
for i in xrange (0,8):
f_slope[i]=(eta[x][y]-eta[x+xn[i]][y+yn[i]])/(dn[i])
if max(f_slope) <= 0.0:
slope[x][y] = 0.0
direction[x][y] = -9999
hole[0] = 1
else:
max_slope = 0.0
max_slope_int = -9999
for i in xrange (0,8):
if max_slope < f_slope[i]:
max_slope_int = i
max_slope = f_slope[i]
direction[x][y] = max_slope_int
slope[x][y] = max_slope
return direction, slope, hole
```
#### File: LEM_wLE/modules/F_lateral.py
```python
import importlib
import sys
import random
parameters = importlib.import_module(sys.argv[1])
globals().update(parameters.__dict__)
#These functions implement Langston and Tucker (2018) lateral incision algorithm.
# o | o | x1
# --------------
# o | x2 | o
# --------------
# xlat | x3 | o
#This function determines where flow goes (x_out, y_out) given a coordinate (x_in, y_in) and drainage direction (d_in)
def direction_single(x_in,y_in,d_in):
#do not process if located in a open boundary cell
proceed = 1
x_out, y_out = -9999, -9999
if y_in == cellsy and BC[0] == 1:
proceed = 0
elif y_in == 1 and BC[1] == 1:
proceed = 0
elif x_in == 1 and BC[2] == 1:
proceed = 0
elif x_in == cellsx and BC[3] == 1:
proceed = 0
elif d_in == -9999:
proceed = 0
else:
x_out = x_in + xn[d_in]
y_out = y_in + yn[d_in]
if BC[0] == 2 and BC[1] == 2:
if y_out == cellsy+1:
y_out = 1
elif y_out == 0:
y_out = cellsy
elif BC[2] == 2 and BC[3] == 2:
if x_out == cellsx+1:
x_out = 1
elif x_out == 0:
x_out = cellsx
return x_out, y_out, proceed
def f_lateral(discharge,lateral_incision,lateral_incision_cumulative,area,slope,direction,lateral_incision_threshold_total,lateral_discharge,eta_old,eta_new):
#initiate lateral incision with zeros
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
lateral_incision[x][y] = 0.0
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
#first find the maximum incoming discharge for each cell
d_in_max = - 9999
discharge_in_max = 0.0
for i in xrange (0,8):
x_in_max,y_in_max,proceed = direction_single(x,y,i)
if proceed == 1 and direction[x_in_max][y_in_max] == dop[i]:#checks that neighbor node flows into primary node
if discharge_in_max < discharge[x_in_max][y_in_max]:
discharge_in_max = discharge[x_in_max][y_in_max]
d_in_max = dop[i]
elif discharge_in_max == discharge[x_in_max][y_in_max]: #if there are two incoming discharges that are the same, choose one randomly
if random.random() < 0.5:
d_in_max = dop[i]
if d_in_max != - 9999:
x_out,y_out,proceed = direction_single(x,y,direction[x][y])
if proceed == 1:
curve = str(d_in_max) + str(direction[x][y])
if curve in lateral_nodes:
lateral_node_direction = lateral_nodes[curve][int(0.5 + random.random())] #find lateral node
xlat,ylat,proceed = direction_single(x,y,lateral_node_direction) #find lateral node coordinates
if proceed == 1 and eta_old[xlat][ylat] > eta_old[x][y]:
flow_depth = discharge_constant * discharge[x][y] ** discharge_exponent #calculate flow depth
inverse_radius_curvature = lateral_nodes[curve][2] #find the inverse radius of curvature
if discharge[x][y] < lateral_discharge[xlat][ylat]: # discharge that erodes the lateral node is associated with the max discharge
lateral_discharge[xlat][ylat] = discharge[x][y]
lateral_incision_threshold_total[xlat][ylat] = (eta_old[xlat][ylat] - eta_old[x_out][y_out]) * dx * dy #the total incision required to erode the bank is equal to the bank height times dx and dy
lateral_incision[xlat][ylat] += Kl *(area[x][y]**m_l)*(slope[x][y]**n_l) * inverse_radius_curvature * (flow_depth * dx) #lateral incision rate
lateral_incision_cumulative[xlat][ylat] += dt * Kl *(area[x][y]**m_l)*(slope[x][y]**n_l) * inverse_radius_curvature * (flow_depth * dx) #cumulative amount of lateral incision
#if the cumulative lateral incision >= the amount needs to remove the lateral cell, incision occurs
for x in xrange(x_lower,x_upper):
for y in xrange(y_lower,y_upper):
if lateral_incision_cumulative[x][y] > 0.0:
if lateral_incision_cumulative[x][y] > lateral_incision_threshold_total[x][y]:
eta_new[x][y] -= lateral_incision_threshold_total[x][y] / dx / dy #erode the bank material
lateral_incision_cumulative[x][y] = 0.0 #reset the cumulative amount
return lateral_incision,lateral_incision_cumulative,lateral_incision_threshold_total,lateral_discharge,eta_new
```
|
{
"source": "jeffskwang/SOC_LEM",
"score": 2
}
|
#### File: SOC_LEM/SOC_LEM/SOC_LEM_5.1.py
```python
import numpy as np
import os
import importlib
import shutil
import time
import sys
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
#####HOUSEKEEPING#####
#import parameters
code_folder = os.getcwd()
sys.path.append(code_folder +'\\drivers')
sys.path.append(code_folder +'\\utilities')
parameters = importlib.import_module(sys.argv[1])
globals().update(parameters.__dict__)
#make output folder
if os.path.isdir(results_folder+'\\results'):
shutil.rmtree(results_folder+'\\results')
time.sleep(3)
os.makedirs(results_folder+'\\results')
os.makedirs(results_folder+'\\results\\input')
shutil.copyfile(code_folder +'\\drivers\\'+sys.argv[1]+'.py',results_folder+'\\results\\input\\'+sys.argv[1]+'.py')
#make padded inputs
from pad_rasters import*
pad_rasters(ini_file,pad_mode)
for real_data_file_temp in real_data_file:
pad_rasters(real_data_file_temp,pad_mode)
for ndvi_data_file_temp in ndvi_data_file:
pad_rasters(ndvi_data_file_temp,pad_mode)
#load landlab components
from landlab.components import LinearDiffuser
from landlab import RasterModelGrid
from landlab.utils import structured_grid
from landlab.io import read_esri_ascii
from landlab.components import FlowAccumulator, DepressionFinderAndRouter
#load initial conditionf
(grid, eta) = read_esri_ascii(results_folder+'\\results\\input\\padded_'+os.path.basename(ini_file), name='topographic__elevation')
grid.set_nodata_nodes_to_closed(eta, -9999.)
grid.set_fixed_value_boundaries_at_grid_edges(True,True,True,True)
if flow_method == 'D4':
fa = FlowAccumulator(grid,'topographic__elevation',flow_director='FlowDirectorSteepest',depression_finder=DepressionFinderAndRouter)
elif flow_method == 'D8':
fa = FlowAccumulator(grid,'topographic__elevation',flow_director='FlowDirectorD8')
elif flow_method == 'Dinf':
fa = FlowAccumulator(grid,'topographic__elevation',flow_director='FlowDirectorDINF')
nrows = grid.number_of_node_rows
ncols = grid.number_of_node_columns
#define arrays
SOC_La = grid.add_zeros('node','SOC_La')
SOC_transfer = grid.add_zeros('node','SOC_transfer')
dqda = grid.add_zeros('node','dqda')
nz = int(2.0 * Z / dz)
SOC_z = np.zeros((nrows,ncols,nz))
dz_ini = np.zeros((nrows,ncols,nz))
z_coordinates = np.zeros((nrows,ncols,nz))
#grid size and number of cells
dx = grid.dx
dy = grid.dy
nt = int(T / dt)
nt_plot = int(dt_plot/dt)
three_D_nt_plot = int(three_D_dt_plot/dt)
#output files
data_elevation=[]
data_soc=[]
data_area=[]
#timer
start_time = time.time()
#########################
#####FUNCTIONS#####
def curv(eta,curvature,max_curvature):
for i in range (1,nrows-1):
for j in range (1,ncols-1):
curvature[i,j] = (eta[i-1,j] - 2.0 * eta[i,j] + eta[i+1,j]) / dx / dx + (eta[i,j-1] - 2.0 * eta[i,j] + eta[i,j+1]) / dy / dy
if curvature[i,j] > max_curvature:
curvature[i,j] = max_curvature
for i in range (0,nrows):
curvature[i,0] = 0.0
curvature[i,ncols-1] = 0.0
for j in range (0,ncols):
curvature[0,j] = 0.0
curvature[nrows-1,j] = 0.0
return curvature
def IC(eta, dz_ini, SOC_z, SOC_La):
eta_gauss = np.zeros((nrows,ncols))
#copy elevation
eta_temp_long = eta.copy()
#convert -9999 to numpy NaNs
eta_temp_long[eta_temp_long==-9999]=np.nan
#restructure into a 2D array
eta_temp = eta_temp_long.reshape(nrows,ncols)
#astropy Gaussian (fills NaNs)
kernel = Gaussian2DKernel(sigma_value/dx)
eta_gauss = convolve(eta_temp, kernel,boundary='extend')
#calculate curvature
max_curvature = 0.01 #stops negative soci profiles (especially in ditches)
curvature = np.zeros((nrows,ncols))
curvature = curv(eta_gauss,curvature,max_curvature)
curvature[np.isnan(eta_temp)]=np.nan
for i in range(0,nrows):
for j in range(0,ncols):
dz_ini[i,j,:] = np.linspace(-Z + dz / 2.0, Z - dz / 2.0, nz)
B_SOCI = B_SOCI_int + curvature[i,j] * B_SOCI_slope + B_err * np.sqrt(B_MSE* (1./B_n + ((curvature[i,j] -B_average)**2.0)/B_sum_resid))
C_SOCI = C_SOCI_int + curvature[i,j] * C_SOCI_slope + C_err * np.sqrt(C_MSE* (1./C_n + ((curvature[i,j] -C_average)**2.0)/C_sum_resid))
SOC_z[i,j,0:int(nz/2)] = A_SOCI + B_SOCI * np.exp(dz_ini[i,j,0:int(nz/2)] * C_SOCI)
avg_count = 0
for k in range(int(nz/2 - La / dz),int(nz/2)):
avg_count += 1
SOC_La.reshape(nrows,ncols)[i,j] += SOC_z[i,j,k]
SOC_La.reshape(nrows,ncols)[i,j] /= float(avg_count)
return dz_ini, SOC_z, SOC_La
def boundary_conditions(eta):
noflow = grid.map_value_at_min_node_to_link(eta,eta)
noflow[noflow!=-9999] = 1.0
noflow[noflow==-9999] = 0.0
return noflow
def soil_and_SOC_transport(eta,SOC_La,dqda,noflow):
dzdx = grid.calc_grad_at_link(eta)
SOC_La_uphill = grid.map_value_at_max_node_to_link(eta,SOC_La)
q = - D * dzdx * noflow #q is a link
qc = q * SOC_La_uphill * noflow
dqda = grid.calc_flux_div_at_node(q)
dqcda = grid.calc_flux_div_at_node(qc)
if flow_method == 'D4' or flow_method == 'D8':
fa.run_one_step()
slope = grid.at_node['topographic__steepest_slope']
area = grid.at_node['drainage_area']
receiver = grid.at_node['flow__receiver_node']
noflow_sheet = eta[receiver]
noflow_sheet[noflow_sheet!=-9999] = 1.0
noflow_sheet[noflow_sheet==-9999] = 0.0
qs = K * slope ** 1.4 * area ** 1.5 * noflow_sheet
#water
dqda[receiver] -= qs/dx
dqda += qs/dx
dqcda[receiver] -= qs*SOC_La/dx
dqcda += qs*SOC_La/dx
elif flow_method == 'Dinf':
fa.run_one_step()
slope = grid.at_node['topographic__steepest_slope']
area = grid.at_node['drainage_area']
area_proportion = grid.at_node['flow__receiver_proportions']
receiver = grid.at_node['flow__receiver_node']
noflow_sheet = eta[receiver]
noflow_sheet[noflow_sheet!=-9999] = 1.0
noflow_sheet[noflow_sheet==-9999] = 0.0
slope[slope<0.0] = 0.0
qs1 = K * slope[:,0] ** 1.4 * (area * area_proportion[:,0]) ** 1.5 * np.min(noflow_sheet,axis=1)
qs2 = K * slope[:,1] ** 1.4 * (area * area_proportion[:,1]) ** 1.5 * np.min(noflow_sheet,axis=1)
#water
dqda[receiver[:,0]] -= qs1/dx
dqda[receiver[:,1]] -= qs2/dx
dqda += (qs1 + qs2)/dx
dqcda[receiver[:,0]] -= qs1*SOC_La/dx
dqcda[receiver[:,1]] -= qs2*SOC_La/dx
dqcda += (qs1 + qs2)*SOC_La/dx
return dqda,dqcda
def find_SOC_cell(interface_z_old,interface_z_new,z_coor,SOC):
index_locat_old = (np.abs(z_coor - interface_z_old)).argmin()
index_locat_new = (np.abs(z_coor - interface_z_new)).argmin()
halfway_point = 0.5 * z_coor[index_locat_old] + 0.5 * z_coor[index_locat_new]
SOC_interface = (SOC[index_locat_old] * (interface_z_old - halfway_point) + SOC[index_locat_new] * (halfway_point - interface_z_new)) / (interface_z_old - interface_z_new)
return SOC_interface
def find_top_cell_active_layer(interface_z,z_coor,SOC):
top_cell_active_layer = (np.abs(z_coor - interface_z + dz /2.)).argmin()
return top_cell_active_layer
def find_bottom_cell_active_layer(interface_z,z_coor,SOC):
bottom_cell_active_layer = (np.abs(z_coor - interface_z)).argmin()
return bottom_cell_active_layer
def SOC_transfer_function(eta_old,eta_ini,dzdt,SOC_La,SOC_transfer,SOC_z):
interface = eta_old - eta_ini - La
interface_new = eta_old + dzdt * dt - eta_ini - La
for i in range(0,nrows):
for j in range(0,ncols):
if dzdt.reshape(nrows,ncols)[i,j] < 0.0:
SOC_transfer.reshape(nrows,ncols)[i,j] = find_SOC_cell(interface.reshape(nrows,ncols)[i,j],interface_new.reshape(nrows,ncols)[i,j],dz_ini[i,j,:],SOC_z[i,j,:])
elif dzdt.reshape(nrows,ncols)[i,j] > 0.0:
SOC_transfer.reshape(nrows,ncols)[i,j] = SOC_La.reshape(nrows,ncols)[i,j]
return SOC_transfer
def SOC_profile_update(eta,eta_ini,dzdt,SOC_La,SOC_z):
interface = eta - eta_ini - La
for i in range(0,nrows):
for j in range(0,ncols):
top_cell_active_layer = find_top_cell_active_layer(interface.reshape(nrows,ncols)[i,j]+La,dz_ini[i,j,:],SOC_z[i,j,:])
bottom_cell_active_layer = find_bottom_cell_active_layer(interface.reshape(nrows,ncols)[i,j],dz_ini[i,j,:],SOC_z[i,j,:])
SOC_z[i,j,top_cell_active_layer+1:] = 0.0
SOC_z[i,j,bottom_cell_active_layer+1:top_cell_active_layer+1]=SOC_La.reshape(nrows,ncols)[i,j]
if dzdt.reshape(nrows,ncols)[i,j] > 0.0:
dz_interface_old = (interface.reshape(nrows,ncols)[i,j] - dt * dzdt.reshape(nrows,ncols)[i,j]) - dz_ini[i,j,bottom_cell_active_layer] + dz / 2.0
dz_interface_new = interface.reshape(nrows,ncols)[i,j] - dz_ini[i,j,bottom_cell_active_layer] + dz / 2.0
SOC_z[i,j,bottom_cell_active_layer] = (SOC_z[i,j,bottom_cell_active_layer] * dz_interface_old + dt * dzdt.reshape(nrows,ncols)[i,j] * SOC_La.reshape(nrows,ncols)[i,j]) / (dz_interface_new)
return SOC_z
##### LOOP START #####
noflow = boundary_conditions(eta)
dz_ini, SOC_z, SOC_La = IC(eta, dz_ini, SOC_z, SOC_La)
eta_ini = eta.copy()
for t in range(0,nt + 1):
## print(t)
## print (t,np.argmax(SOC_La.reshape(nrows,ncols)),np.max(SOC_La.reshape(nrows,ncols)),(nrows,ncols))
if t%nt_plot == 0:
print ('Time = ' + str(t * dt) + '; ' + str(int(float(t)*dt/T*1000.)/10.) + '% done')
#Append the new data
data_elevation.append(grid.at_node['topographic__elevation'].copy())
data_soc.append(grid.at_node['SOC_La'].copy())
if flow_method != 'noflow':
data_area.append(grid.at_node['drainage_area'].copy())
#Save the files
np.save(results_folder+'\\results\\' + 'elevation', data_elevation)
np.save(results_folder+'\\results\\' + 'soci', data_soc)
if flow_method != 'noflow':
np.save(results_folder+'\\results\\' + 'area', data_area)
if t%three_D_nt_plot == 0:
np.save(results_folder+'\\results\\' +'3D_SOC_' + '%06d' % + int(t*dt) +'yrs.npy',SOC_z)
np.save(results_folder+ '\\results\\' +'3D_surface_' + '%06d' % + int(t*dt) +'yrs.npy',eta.reshape(nrows,ncols))
eta_old = eta.copy()
dqda,dqcda = soil_and_SOC_transport(eta,SOC_La,dqda,noflow)
eta[grid.core_nodes] += dt *(- dqda[grid.core_nodes])
dzdt = (eta - eta_old)/dt
SOC_transfer = SOC_transfer_function(eta_old,eta_ini,dzdt,SOC_La,SOC_transfer,SOC_z)
SOC_La[grid.core_nodes] += dt/La * (SOC_transfer[grid.core_nodes] * dqda[grid.core_nodes] - dqcda[grid.core_nodes])
SOC_z = SOC_profile_update(eta,eta_ini,dzdt,SOC_La,SOC_z)
#end time
stop_time = time.time()
print (str(round((stop_time -start_time )/60.,1))+' mins')
```
|
{
"source": "jeffsmohan/words",
"score": 2
}
|
#### File: corpora/gutenberg/gutenberg_tools.py
```python
import argparse
import collections
import gzip
import itertools
import os
import re
from contextlib import closing
from typing import List
import tqdm
from gutenberg import Error as GutenbergError
from gutenberg._domain_model.types import validate_etextno
from gutenberg.acquire import get_metadata_cache, load_etext
from gutenberg.acquire.text import _TEXT_CACHE
from gutenberg.query import get_etexts
# Mirrors: https://www.gutenberg.org/MIRRORS.ALL
MIRRORS = [
"http://www.mirrorservice.org/sites/ftp.ibiblio.org/pub/docs/books/gutenberg/",
"http://eremita.di.uminho.pt/gutenberg/",
"http://mirror.csclub.uwaterloo.ca/gutenberg/",
"http://www.gutenberg.org/dirs/",
"http://mirrors.xmission.com/gutenberg/",
"https://gutenberg.pglaf.org/",
"http://aleph.gutenberg.org/",
"http://gutenberg.readingroo.ms/",
]
WORD_IGNORE_PATTERN = r"[^A-Z]"
MAX_WORD_COUNT_LENGTH = 500_000
PROCESS_CHUNK_SIZE = 100
def prime_query_cache(args: argparse.Namespace) -> None:
"""
Primes the Project Gutenberg metadata cache so future queries are fast.
Note that this can take ~18 hours on a standard laptop, and is not required
if you're only doing a few simple queries.
"""
if not args.quiet:
print("Populating Gutenberg cache. This may take a few hours...")
cache = get_metadata_cache()
cache.populate()
if not args.quiet:
print("Done!")
def prime_text_cache(args: argparse.Namespace) -> None:
"""
Primes the Project Gutenberg text cache so text retrieval is entirely local.
This will download all Gutenberg book texts onto your local machine, which
will take many hours and ~10-20GB.
"""
if not args.quiet:
print("Downloading Project Gutenberg book texts...")
etexts = get_etexts("language", args.language)
# Cycle through mirrors so as not to overload anyone's servers and get rate-limited
etexts_with_mirrors = list(zip(etexts, itertools.cycle(MIRRORS)))
etexts_iter = (
tqdm.tqdm(etexts_with_mirrors) if not args.quiet else etexts_with_mirrors
)
success_count = 0
total_count = 0
try:
for etext, mirror in etexts_iter:
total_count += 1
try:
load_etext(etext, mirror=mirror)
success_count += 1
except GutenbergError as e:
if not args.quiet:
print(f"Failure (mirror: {mirror}) ", e)
continue
except KeyboardInterrupt:
pass
except Exception:
print("Error with mirror: ", mirror, etext)
raise
if not args.quiet:
print(f"{success_count} / {total_count} books downloaded to cache")
print("Done!")
def load_etext_from_cache(etextno):
"""Load an etext only if it's already cached."""
etextno = validate_etextno(etextno)
cached = os.path.join(_TEXT_CACHE, "{}.txt.gz".format(etextno))
if not os.path.exists(cached):
text = ""
else:
with closing(gzip.open(cached, "r")) as cache:
text = cache.read().decode("utf-8")
return text
def count_words(args: argparse.Namespace) -> None:
"""Count the words in all Gutenberg books for a given language."""
# Pull the list of book IDs
if not args.quiet:
print("Processing Project Gutenberg books...")
etexts = get_etexts("language", args.language)
etexts_iter = tqdm.tqdm(list(etexts)) if not args.quiet else etexts
# Load each book and count the words
word_counts = collections.Counter()
etexts = []
failed_etexts = []
for i, etext in enumerate(etexts_iter):
try:
etexts.append(load_etext_from_cache(etext))
except GutenbergError as e:
failed_etexts.append(etext)
print("Failure: ", e)
continue
# For efficiency, only periodically turn the texts into word counts
if i % PROCESS_CHUNK_SIZE == 0:
word_counts += _count_words_in_etexts(etexts)
etexts = []
# Also trim the least common words, since they're usually
# gibberish and it's helpful to keep memory pressure down
word_counts = collections.Counter(
dict(word_counts.most_common(MAX_WORD_COUNT_LENGTH))
)
word_counts += _count_words_in_etexts(etexts)
del word_counts[""]
# Output the word counts to a file
if not args.quiet:
print(
f"Failed to download {len(failed_etexts)} books. (A few of these are "
"normal, as some books have no text.)"
)
print(f'--- Failed: {", ".join(str(etext) for etext in failed_etexts)}')
print("Writing word counts to disk...")
_output_word_counts(word_counts, args.output)
if not args.quiet:
print(f"Done! See word counts in {args.output}.")
def _count_words_in_etexts(etexts: List[str]) -> collections.Counter:
"""Return a Counter with the word counts from the given text."""
return collections.Counter(
re.sub(WORD_IGNORE_PATTERN, "", word.upper())
for word in " ".join(etexts).split()
)
def _output_word_counts(word_counts: collections.Counter, output_file: str) -> None:
"""Output the list of most common words to the output file."""
with open(output_file, "w") as f:
for word, count in word_counts.most_common():
f.write(f"{word} {count}\n")
if __name__ == "__main__":
SUBCOMMANDS = {
"prime_query_cache": prime_query_cache,
"prime_text_cache": prime_text_cache,
"count_words": count_words,
}
parser = argparse.ArgumentParser(description="Project Gutenberg tools")
parser.add_argument(
"subcommand",
choices=SUBCOMMANDS.keys(),
help="Which Gutenberg operation to perform",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Disables progress indicators on stdout",
)
parser.add_argument(
"-l", "--language", default="en", help="Specifies the language of books to use"
)
parser.add_argument(
"-o",
"--output",
default="wordcounts.txt",
help="For count_words, specifies the location to output the results",
)
args = parser.parse_args()
# Call the correct subcommand
command = SUBCOMMANDS[args.subcommand]
command(args)
```
#### File: words/scripts/wordcounts_clean.py
```python
import argparse
def clean_word_counts(args):
"""Remove count from wordcounts file."""
with open(args.valid_words) as f:
valid_words = set(word.strip() for word in f)
with open(args.word_counts) as i, open(args.output, "w") as o:
o.writelines(
line.split()[1] + "\n" for line in i if line.split()[0] in valid_words
)
if __name__ == "__main__":
# Pull arguments from the command line
parser = argparse.ArgumentParser(description="Intersect two datasets of words.")
parser.add_argument(
"--word-counts", "-w", required=True, help="Input word counts",
)
parser.add_argument(
"--valid-words", "-v", required=True, help="Valid word list",
)
parser.add_argument(
"--output",
"-o",
default="intersection.txt",
help="Output file to write intersected word list to",
)
args = parser.parse_args()
# Process the input word lists, and write the intersected output
print("Processing input datasets...")
clean_word_counts(args)
# word_lists = read_word_lists(args.input, args.ignore)
# intersection = intersect_word_lists(word_lists)
# write_intersection(intersection, args.output)
print(f"Success! Your intersected word list is at `{args.output}`.")
```
#### File: corpora/twitter/test_twitter_tools.py
```python
import argparse
import collections
import os
import unittest
from tempfile import NamedTemporaryFile
from corpora.twitter import twitter_tools
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
INPUT_FIXTURE = os.path.join(DIR_PATH, "fixtures", "input.txt")
OUTPUT_FIXTURE = os.path.join(DIR_PATH, "fixtures", "output.txt")
class TestTwitterTools(unittest.TestCase):
"""Tests for the Twitter corpus tooling."""
def test_count_words_in_tweets_empty(self):
"""Test counting words in empty list of tweets."""
tweets = []
expected = collections.Counter()
self.assertEqual(twitter_tools._count_words_in_tweets(tweets), expected)
def test_count_words_in_tweets_basic(self):
"""Test counting words in simple list of tweets."""
tweets = [
"test tweet one",
"test tweet two",
"test tweet three",
]
expected = collections.Counter(
{"TEST": 3, "TWEET": 3, "ONE": 1, "TWO": 1, "THREE": 1}
)
self.assertEqual(twitter_tools._count_words_in_tweets(tweets), expected)
def test_count_words_in_tweets_punctuation(self):
"""Test counting words in tweets with weird characters and punctuation."""
tweets = [
"That's data. ", # punctuation is ignored
"UK weather data 11:00 PM 11.8°C 83 pct", # strange characters ignored
"ηταν περιεργα ρρ", # only handle a-z characters
]
expected = collections.Counter(
{
"": 5, # This is expected; it's more efficient to remove it later on
"THATS": 1,
"DATA": 2,
"UK": 1,
"WEATHER": 1,
"PM": 1,
"C": 1,
"PCT": 1,
}
)
self.assertEqual(twitter_tools._count_words_in_tweets(tweets), expected)
def test_count_words_for_basic_input(self):
"""Test that the tool counts the words in a short file."""
with NamedTemporaryFile(mode="w+t", delete=False) as i, NamedTemporaryFile(
mode="w+t"
) as o:
# Write some simple "tweets" into the input file
i.write("tweet foo\n")
i.write("tweet bar\n")
i.write("tweet bar\n")
i.close()
# Set up the args with our temp input/output files, and call count_words
args = argparse.Namespace(input=i.name, output=o.name, quiet=True)
twitter_tools.count_words(args)
# Check that the counts are correct
word_counts = o.read()
self.assertEqual(word_counts, "TWEET 3\nBAR 2\nFOO 1\n")
def test_count_words_with_realistic_input(self):
"""Test the tool counts the words in a reasonably large, real-looking file."""
with NamedTemporaryFile(mode="w+t") as o:
# Set up the args with our temp input/output files, and call count_words
args = argparse.Namespace(input=INPUT_FIXTURE, output=o.name, quiet=True)
twitter_tools.count_words(args)
# Check that the counts are correct
word_counts = o.read()
with open(OUTPUT_FIXTURE) as f:
self.assertEqual(word_counts, f.read())
```
|
{
"source": "jeffs/nested-watch",
"score": 2
}
|
#### File: nested-watch/src/test_factorial.py
```python
from factorial import factorial
def test_factorial():
assert 120 == factorial(5)
```
|
{
"source": "jeffspence/ldpop",
"score": 2
}
|
#### File: ldpop/ldpop/moran_augmented.py
```python
from __future__ import division
from __future__ import absolute_import
from builtins import map
from builtins import zip
from builtins import range
from builtins import object
from .compute_stationary import stationary1d_tridiagonal
from .compute_stationary import assertValidProbs
import logging
import time
import numpy
import scipy
import scipy.special
from scipy import sparse
# Have a separate class for the Rates
# so we don't have to pickle all of MoranStatesAugmented
# when doing multiprocessing
# (saves memory and communication time with worker processes)
class MoranRates(object):
def __init__(self, states):
self.exact = states.exact
self.numC = states.numC
self.n = states.n
self.unscaled_recom_rates = states.unscaled_recom_rates
self.unscaled_mut_rates = states.unscaled_mut_rates
self.unscaled_coal_rates = states.unscaled_coal_rates
def get_pi_c(self, popSize, theta, rho):
if not self.exact:
return numpy.array([0.0] * self.n + [1.0])
n = self.n
coalRate = 1. / popSize
recomRate = float(rho) / 2.
if rho == 0.0:
return numpy.array([0.0] * self.n + [1.0])
else:
numCoupledLinsRates = sparse.dok_matrix((n+1, n+1))
for i in range(n+1):
if i < n:
numCoupledLinsRates[i, i+1] = ((n-i)**2) * coalRate
numCoupledLinsRates[i, i] -= numCoupledLinsRates[i, i+1]
if i > 0:
numCoupledLinsRates[i, i-1] = recomRate * i
numCoupledLinsRates[i, i] -= numCoupledLinsRates[i, i-1]
return stationary1d_tridiagonal(numCoupledLinsRates)
def getRates(self, popSize, theta, rho):
start = time.time()
recomRate = float(rho) / 2.
mutRate = float(theta) / 2.
coalRate = 1. / float(popSize)
ret = (recomRate * self.unscaled_recom_rates
+ mutRate * self.unscaled_mut_rates
+ coalRate * self.unscaled_coal_rates)
end = time.time()
logging.info('%f seconds to construct rates for '
'rho=%f,theta=%f,N=%f' % (end-start, rho, theta, popSize))
return ret
# make all haplotypes
a_haps = []
b_haps = []
c_haps = []
for allele1 in range(2):
a_haps.append((allele1, -1))
b_haps.append((-1, allele1))
for allele2 in range(2):
c_haps.append((allele1, allele2))
all_haps = a_haps + b_haps + c_haps
def makeAllConfigs(hapList, n):
# make all configs
# represent a config as a dict
tmpConfigList = [{}]
for hapIdx, hap in enumerate(hapList):
newConfigList = []
for config in tmpConfigList:
numHaps = sum([v for k, v in config.items()])
assert numHaps <= n
if hapIdx == len(hapList)-1:
next_count = [n-numHaps]
else:
next_count = range(n - numHaps + 1)
for i in next_count:
newConfig = dict(config)
newConfig[hap] = i
newConfigList.append(newConfig)
tmpConfigList = newConfigList
return tmpConfigList
def one_locus_probs(popSize, theta, n):
coalRate = 1. / popSize
mutRate = float(theta) / 2.
numOnesRates = sparse.dok_matrix((n+1, n+1))
for i in range(n+1):
if i < n:
numOnesRates[i, i+1] = (n-i) * mutRate + i * (n-i) / 2.0 * coalRate
numOnesRates[i, i] -= numOnesRates[i, i+1]
if i > 0:
numOnesRates[i, i-1] = i * mutRate + i * (n-i) / 2.0 * coalRate
numOnesRates[i, i] -= numOnesRates[i, i-1]
return stationary1d_tridiagonal(numOnesRates)
class AbstractMoranStates(object):
def __init__(self, n):
self.n = n
self._stationary = {}
def build_all_configs(self, n, exact):
'''
Create self.config_array, defined by:
self.config_array[i, a, b] = the count of haplotype
(a,b) in the i-th config
'''
if exact:
cList = list(range(n+1))
else:
cList = [n]
aConfigs = {n-c: makeAllConfigs(a_haps, n-c) for c in cList}
bConfigs = {n-c: makeAllConfigs(b_haps, n-c) for c in cList}
cConfigs = {c: makeAllConfigs(c_haps, c) for c in cList}
all_configs = []
for numC in cList:
for aConf in aConfigs[n - numC]:
for bConf in bConfigs[n - numC]:
for cConf in cConfigs[numC]:
conf = {}
conf.update(aConf)
conf.update(bConf)
conf.update(cConf)
all_configs.append(conf)
self.config_array = numpy.zeros((len(all_configs), 3, 3), dtype=int)
for idx, conf in enumerate(all_configs):
for (i, j), count in conf.items():
self.config_array[idx, i, j] = count
# create dictionary mapping their hash values back to their index
hash_vals = self.hash_config_array(self.config_array)
assert len(set(hash_vals)) == len(hash_vals) # should be all unique
self.hash_to_allIdx = {k: v for v, k in enumerate(hash_vals)}
def hash_config_array(self, conf_arr):
base = self.n+1
hash_vals = (conf_arr[:, 0, 0]
+ base * conf_arr[:, 0, 1]
+ (base**2) * (conf_arr[:, 1, 0]))
if self.exact:
hash_vals += ((base**3)*(conf_arr[:, 1, 1])
+ (base**4)*(conf_arr[:, 0, -1])
+ (base**5)*(conf_arr[:, -1, 0]))
return hash_vals
def numOnes(self, loc):
return self.folded_config_array.sum(axis=1+(1-loc))[:, 1]
def hapCount(self, hap):
return numpy.array(self.folded_config_array[:, hap[0], hap[1]])
def getUnlinkedStationary(self, popSize, theta):
one_loc_probs = one_locus_probs(popSize=popSize, theta=theta, n=self.n)
assertValidProbs(one_loc_probs)
n = self.n
leftOnes = self.numOnes(0)
rightOnes = self.numOnes(1)
bothOnes = self.hapCount((1, 1))
joint = one_loc_probs[leftOnes] * one_loc_probs[rightOnes]
if self.exact:
joint[self.numC > 0] = 0
else:
joint *= (scipy.special.comb(rightOnes, bothOnes)
* scipy.special.comb(n-rightOnes, leftOnes-bothOnes)
/ scipy.special.comb(n, leftOnes))
joint *= self.n_unfolded_versions
assertValidProbs(joint)
return joint
def build_symmetries(self):
start = time.time()
# the index of the folded version in all_configs
folded_list = get_folded_config_idxs(self)
# foldedIdx = the index in folded_configs
# allIdx = the index in all_configs
foldedIdx_to_allIdx = numpy.array(list(set(folded_list)))
allIdx_to_foldedIdx = {v: k for k, v in enumerate(foldedIdx_to_allIdx)}
allIdx_to_foldedIdx = [allIdx_to_foldedIdx[x] for x in folded_list]
self.hash_to_foldedIdx = {k: allIdx_to_foldedIdx[v]
for k, v in self.hash_to_allIdx.items()}
self.folded_config_array = self.config_array[foldedIdx_to_allIdx, :, :]
self.numC = (self.folded_config_array[:, 0, 0]
+ self.folded_config_array[:, 0, 1]
+ self.folded_config_array[:, 1, 0]
+ self.folded_config_array[:, 1, 1])
symm_mat = sparse.dok_matrix((len(allIdx_to_foldedIdx),
self.folded_config_array.shape[0]))
for i, j in enumerate(allIdx_to_foldedIdx):
symm_mat[i, j] = 1
symm_mat = symm_mat.tocsc()
antisymm_mat = symm_mat.transpose().tocsr(copy=True)
# normalize rows
self.n_unfolded_versions = numpy.array(antisymm_mat.sum(axis=1))[:, 0]
row_indices, col_indices = antisymm_mat.nonzero()
antisymm_mat.data /= self.n_unfolded_versions[row_indices]
self.symmetries = symm_mat.tocsr()
self.antisymmetries = antisymm_mat.tocsr()
logging.info('%f seconds to build symmetry matrices'
% (time.time() - start))
def ordered_log_likelihoods(self, liks):
try:
return {time: self.ordered_log_likelihoods(l)
for time, l in liks.items()}
except AttributeError:
liks = liks * self.antisymmetries
all_nC = self.config_array[:, :-1, :-1].sum(axis=(1, 2))
liks = liks[all_nC == self.n]
full_confs = self.config_array[:, :-1, :-1][all_nC == self.n, :, :]
liks = numpy.log(liks)
liks -= scipy.special.gammaln(self.n+1)
for i in (0, 1):
for j in (0, 1):
liks += scipy.special.gammaln(full_confs[:, i, j]+1)
full_confs = [tuple(sorted(((i, j), cnf[i, j])
for i in (0, 1) for j in (0, 1)))
for cnf in full_confs]
return dict(zip(full_confs, liks))
class MoranStatesAugmented(AbstractMoranStates):
'''
maintains a representation of the states(possible configs)
of the 2 locus Moran model
'''
def __init__(self, n):
'''
Constructor
'''
start = time.time()
super(MoranStatesAugmented, self).__init__(n)
self.exact = True
self.build_all_configs(n, exact=True)
end = time.time()
logging.info('Constructed exact states in %f seconds' % (end - start))
self.build_symmetries()
start = time.time()
self.unscaled_recom_rates = build_recom_rates(self)
logging.info('Constructed recombination rate matrix in %f seconds'
% (time.time() - start))
start = time.time()
self.unscaled_mut_rates = build_mut_rates(self)
logging.info('Constructed mut rate matrix in %f seconds'
% (time.time() - start))
start = time.time()
self.unscaled_coal_rates = (build_copy_rates(self)
+ build_cross_coal_rates(self))
logging.info('Constructed coalescent/copying rate matrix in %f seconds'
% (time.time() - start))
def get_folded_config_idxs(states):
arr = states.config_array
# move the missing allele in between alleles 0,1
arr = arr[:, (0, -1, 1), :][:, :, (0, -1, 1)]
# relabel alleles 0,1 (4 ways to do this)
symm_arrs = [arr, arr[:, ::-1, :], arr[:, :, ::-1], arr[:, ::-1, ::-1]]
# swap the 2 loci
symm_arrs += [numpy.transpose(a, axes=(0, 2, 1)) for a in symm_arrs]
# swap back allele 1 with missing allele
symm_arrs = [a[:, (0, -1, 1), :][:, :, (0, -1, 1)] for a in symm_arrs]
# get hash val for each (folded) config
hash_vals = numpy.vstack(list(map(states.hash_config_array, symm_arrs)))
# get the smallest hash val among all the folds
hash_vals = numpy.amin(hash_vals, axis=0)
assert len(hash_vals) == arr.shape[0]
# return the corresponding indices
return [states.hash_to_allIdx[h] for h in hash_vals]
def build_recom_rates(states):
assert states.exact
ret = sparse.csr_matrix(tuple([states.folded_config_array.shape[0]]*2))
confs = states.folded_config_array
for hap in c_haps:
rates = confs[:, hap[0], hap[1]]
otherConfs = numpy.array(confs)
otherConfs[:, hap[0], hap[1]] -= 1
otherConfs[:, hap[0], -1] += 1
otherConfs[:, -1, hap[1]] += 1
ret = ret + get_rates(states, otherConfs, rates)
return subtract_rowsum_on_diag(ret)
def build_mut_rates(states):
ret = sparse.csr_matrix(tuple([states.folded_config_array.shape[0]]*2))
confs = states.folded_config_array
if states.exact:
hapList = all_haps
else:
hapList = c_haps
for hap in hapList:
rates = confs[:, hap[0], hap[1]]
for loc in range(2):
if hap[loc] == -1:
continue
otherHap = [hap[0], hap[1]]
otherAllele = 1 - hap[loc]
otherHap[loc] = otherAllele
otherConfs = numpy.array(confs)
otherConfs[:, hap[0], hap[1]] -= 1
otherConfs[:, otherHap[0], otherHap[1]] += 1
ret = ret + get_rates(states, otherConfs, rates)
return subtract_rowsum_on_diag(ret)
def build_copy_rates(states):
ret = sparse.csr_matrix(tuple([states.folded_config_array.shape[0]]*2))
confs = states.folded_config_array
if states.exact:
hapList = all_haps
else:
hapList = c_haps
for hap in hapList:
for otherHap in hapList:
# check if we can copy
canCopy = True
for loc in range(2):
if hap[loc] == -1 and otherHap[loc] != -1:
canCopy = False
if not canCopy:
continue
copiedHap = [hap[0], hap[1]]
for loc in range(2):
if otherHap[loc] == -1:
copiedHap[loc] = -1
copiedHap = tuple(copiedHap)
hapMissing = (hap[0] == -1) + (hap[1] == -1)
otherMissing = (otherHap[0] == -1) + (otherHap[1] == -1)
assert otherMissing >= hapMissing
rates = (confs[:, hap[0], hap[1]]
* confs[:, otherHap[0], otherHap[1]] / 2.)
if otherMissing > hapMissing:
rates *= 2
otherConfs = numpy.array(confs)
otherConfs[:, otherHap[0], otherHap[1]] -= 1
otherConfs[:, copiedHap[0], copiedHap[1]] += 1
ret = ret + get_rates(states, otherConfs, rates)
return subtract_rowsum_on_diag(ret)
def subtract_rowsum_on_diag(spmat):
spmat = spmat.tocsr() - sparse.diags(numpy.array(spmat.sum(axis=1)).T,
offsets=[0],
format='csr')
return spmat.tocsr()
def build_cross_coal_rates(states):
assert states.exact
ret = sparse.csr_matrix(tuple([states.folded_config_array.shape[0]]*2))
confs = states.folded_config_array
for hap in c_haps:
otherConfs = numpy.array(confs)
rates = otherConfs[:, hap[0], -1] * otherConfs[:, -1, hap[1]]
otherConfs[:, hap[0], hap[1]] += 1
otherConfs[:, hap[0], -1] -= 1
otherConfs[:, -1, hap[1]] -= 1
ret = ret + get_rates(states, otherConfs, rates)
return subtract_rowsum_on_diag(ret)
def get_rates(states, otherConfs, rates):
otherConfs = otherConfs[rates != 0, :, :]
otherConfs = states.hash_config_array(otherConfs)
otherConfs = numpy.array([states.hash_to_foldedIdx[x] for x in otherConfs],
dtype=int)
confs = numpy.arange(states.folded_config_array.shape[0], dtype=int)
confs = confs[rates != 0]
rates = rates[rates != 0]
ret = sparse.coo_matrix((rates, (confs, otherConfs)),
shape=[states.folded_config_array.shape[0]]*2)
return ret.tocsr()
```
|
{
"source": "jeffspence/non_overlapping_mixtures",
"score": 2
}
|
#### File: non_overlapping_mixtures/code/ldpred.py
```python
import numpy as np
import scipy.stats
import sys
from time import time
def update_step_naive(beta_hat,
ld_mat,
vi_mu,
vi_s,
vi_psi,
sigma_sq_e,
sigma_sq_0,
sigma_sq_1,
p_0):
new_mu = np.copy(vi_mu)
new_s = np.copy(vi_s)
new_psi = np.copy(vi_psi)
for i in range(vi_mu.shape[0]):
new_mu[i] = (beta_hat[i]
- ld_mat[i, :].dot(new_mu)
+ new_mu[i] * ld_mat[i, i])
new_mu[i] /= (new_psi[i] * sigma_sq_e / sigma_sq_0
+ (1 - new_psi[i]) * sigma_sq_e / sigma_sq_1
+ ld_mat[i, i])
new_s[i] = 1 / (new_psi[i] / sigma_sq_0
+ (1 - new_psi[i]) / sigma_sq_1
+ ld_mat[i, i] / sigma_sq_e)
raw_psi_0 = p_0 * np.exp(
-0.5 * np.log(sigma_sq_0)
- 0.5 / sigma_sq_0 * (new_mu[i] ** 2 + new_s[i])
)
raw_psi_1 = (1 - p_0) * np.exp(
-0.5 * np.log(sigma_sq_1)
- 0.5 / sigma_sq_1 * (new_mu[i] ** 2 + new_s[i])
)
new_psi[i] = raw_psi_0 / (raw_psi_0 + raw_psi_1)
return new_mu, new_s, new_psi
def update_step_sparse(beta_hat,
ld_mat,
vi_mu,
vi_s,
vi_psi,
sigma_sq_e,
sigma_sq_1,
p_0):
new_mu = np.copy(vi_mu)
new_s = np.copy(vi_s)
new_psi = np.copy(vi_psi)
for i in range(vi_mu.shape[0]):
this_mu = (beta_hat[i]
- ld_mat[i, :].dot(new_mu * (1 - new_psi))
+ new_mu[i] * ld_mat[i, i] * (1 - new_psi[i]))
this_mu /= sigma_sq_e / sigma_sq_1 + ld_mat[i, i]
new_mu[i] = this_mu
new_s[i] = 1 / (1 / sigma_sq_1 + ld_mat[i, i] / sigma_sq_e)
psi_num = (p_0 / (1 - p_0)
* np.sqrt(1 + ld_mat[i, i] * sigma_sq_1 / sigma_sq_e)
* np.exp(-0.5 * (beta_hat[i]
- ld_mat[i, :].dot(new_mu * (1 - new_psi))
+ new_mu[i] * ld_mat[i, i]
* (1 - new_psi[i])) ** 2
/ (sigma_sq_e ** 2 / sigma_sq_1
+ sigma_sq_e * ld_mat[i, i])))
new_psi[i] = psi_num / (1 + psi_num)
return new_mu, new_s, new_psi
sigma_sq_1 = 1.0
sigma_sq_e = float(sys.argv[1])
num_reps = int(sys.argv[2])
p_zero = 0.99
num_sites = 1000
mse_mat = np.zeros((num_reps, 10))
cor_mat = np.zeros((num_reps, 10))
header = ['beta_hat', 'MLE', 'naive_1.0', 'naive_1e-2', 'naive_1e-4',
'naive_1e-10', 'nothing_1', 'nothing_2', 'nothing_3', 'sparse']
true_betas = np.zeros((num_reps, num_sites))
ld_mats = np.zeros((num_reps, num_sites, num_sites))
beta_hats = np.zeros((num_reps, num_sites))
for rep in range(num_reps):
print(rep)
true_beta = np.zeros(num_sites)
nonzero = np.random.choice([True, False], num_sites, p=[1-p_zero, p_zero])
true_beta[nonzero] = np.random.normal(loc=0,
scale=np.sqrt(sigma_sq_1),
size=nonzero.sum())
true_betas[rep] = true_beta
ld_matrix = (scipy.stats.wishart.rvs(num_sites, np.eye(num_sites))
/ num_sites)
ld_mats[rep] = ld_matrix
chol = np.linalg.cholesky(ld_matrix)
inv = np.linalg.inv(ld_matrix)
noise = chol.dot(np.random.normal(loc=0,
scale=np.sqrt(sigma_sq_e),
size=num_sites))
beta_hat = ld_matrix.dot(true_beta) + noise
beta_hats[rep] = beta_hat
cor_mat[rep, 0] = np.corrcoef(beta_hat, true_beta)[0, 1]
cor_mat[rep, 1] = np.corrcoef(inv.dot(beta_hat), true_beta)[0, 1]
mse_mat[rep, 0] = np.mean((beta_hat - true_beta)**2)
mse_mat[rep, 1] = np.mean((inv.dot(beta_hat) - true_beta)**2)
for idx, sigma_0 in enumerate([1.0, 1e-2, 1e-4, 1e-10]):
start_time = time()
vi_mu = np.zeros_like(beta_hat)
vi_s = (sigma_sq_1 + sigma_sq_e) * np.ones(num_sites)
vi_psi = np.ones(num_sites)
for i in range(100):
vi_mu, vi_s, vi_psi = update_step_naive(beta_hat,
ld_matrix,
vi_mu,
vi_s,
vi_psi,
sigma_sq_e,
sigma_0,
sigma_sq_1,
p_zero)
cor_mat[rep, idx + 2] = np.corrcoef(vi_mu, true_beta)[0, 1]
mse_mat[rep, idx + 2] = np.mean((vi_mu - true_beta)**2)
print('\tScheme took', time() - start_time)
vi_mu = np.zeros_like(beta_hat)
vi_s = (sigma_sq_1 + sigma_sq_e) * np.ones(num_sites)
vi_psi = p_zero * np.ones(num_sites)
start_time = time()
for i in range(100):
vi_mu, vi_s, vi_psi = update_step_sparse(beta_hat,
ld_matrix,
vi_mu,
vi_s,
vi_psi,
sigma_sq_e,
sigma_sq_1,
p_zero)
print('\tScheme took', time() - start_time)
cor_mat[rep, -1] = np.corrcoef(vi_mu * (1 - vi_psi), true_beta)[0, 1]
mse_mat[rep, -1] = np.mean((vi_mu * (1 - vi_psi) - true_beta)**2)
np.savetxt('../data/ldpred/cor_mat_' + str(sigma_sq_e) + '.txt', cor_mat,
header='\t'.join(header))
np.savetxt('../data/ldpred/mse_mat_' + str(sigma_sq_e) + '.txt', mse_mat,
header='\t'.join(header))
np.save('../data/ldpred/true_betas_' + str(sigma_sq_e) + '.npy', true_betas)
np.save('../data/ldpred/beta_hats_' + str(sigma_sq_e) + '.npy', beta_hats)
np.save('../data/ldpred/ld_mats_' + str(sigma_sq_e) + '.npy', ld_mats)
```
#### File: non_overlapping_mixtures/code/pyro_code_discrete.py
```python
import torch
import pyro
import pyro.infer
import pyro.optim
import pyro.distributions as dist
from torch.distributions import constraints
import numpy as np
import sys
from time import time
from pyro.poutine import block, replay, trace
from functools import partial
LR = 1e-3
NUM_PARTICLES = 2
TOTAL_ITS = 100000
GLOBAL_K = 50
N = 1000
p_causal = 0.01
GENETIC_MEAN = torch.tensor(np.zeros(N))
GENETIC_SD = torch.tensor(np.ones(N))
GENETIC_MIX = torch.tensor(np.log([1-p_causal, p_causal]))
sigma_sq_e = float(sys.argv[1])
def prs_model(beta_hat, obs_error):
z = pyro.sample(
'z',
dist.Independent(dist.Bernoulli(torch.tensor([p_causal]*N)), 1)
)
beta = pyro.sample(
'beta_latent',
dist.Independent(dist.Normal(GENETIC_MEAN,
GENETIC_SD), 1)
)
beta_hat = pyro.sample(
'beta_hat',
dist.MultivariateNormal(torch.mv(obs_error, beta*z),
covariance_matrix=obs_error*sigma_sq_e),
obs=beta_hat
)
return beta_hat
def prs_guide(index):
psi_causal = pyro.param(
'var_psi_causal_{}'.format(index),
torch.tensor(np.ones(N)*p_causal),
constraint=constraints.unit_interval
)
z = pyro.sample(
'z',
dist.Independent(dist.Bernoulli(psi_causal), 1)
)
means = pyro.param(
'var_mean_{}'.format(index),
torch.tensor(np.zeros(N))
)
scales = pyro.param(
'var_scale_{}'.format(index),
torch.tensor(np.ones(N)),
constraint=constraints.positive
)
beta_latent = pyro.sample(
'beta_latent',
dist.Independent(dist.Normal(means, scales), 1)
)
return z, beta_latent
def approximation(components, weights):
assignment = pyro.sample('assignment', dist.Categorical(weights))
results = components[assignment]()
return results
def relbo(model, guide, *args, **kwargs):
approximation = kwargs.pop('approximation')
traced_guide = trace(guide)
elbo = pyro.infer.Trace_ELBO(num_particles=NUM_PARTICLES)
loss_fn = elbo.differentiable_loss(model, traced_guide, *args, **kwargs)
guide_trace = traced_guide.trace
replayed_approximation = trace(replay(block(approximation,
expose=['beta_latent', 'z']),
guide_trace))
approximation_trace = replayed_approximation.get_trace(*args, **kwargs)
relbo = -loss_fn - approximation_trace.log_prob_sum()
return -relbo
def run_svi(beta_hat, obs_error, K, true_beta):
num_steps = TOTAL_ITS//K
start = time()
pyro.clear_param_store()
pyro.enable_validation(True)
def my_model():
return prs_model(torch.tensor(beta_hat),
torch.tensor(obs_error))
initial_approximation = partial(prs_guide, index=0)
components = [initial_approximation]
weights = torch.tensor([1.])
wrapped_approximation = partial(approximation,
components=components,
weights=weights)
optimizer = pyro.optim.Adam({'lr': LR})
losses = []
wrapped_guide = partial(prs_guide, index=0)
svi = pyro.infer.SVI(
my_model,
wrapped_guide,
optimizer,
loss=pyro.infer.Trace_ELBO(num_particles=NUM_PARTICLES)
)
for step in range(num_steps):
loss = svi.step()
losses.append(loss)
if step % 100 == 0:
print('\t', step, np.mean(losses[-100:]))
if step % 100 == 0:
pstore = pyro.get_param_store()
curr_mean = pstore.get_param(
'var_mean_{}'.format(0)).detach().numpy()
curr_psis = pstore.get_param(
'var_psi_causal_{}'.format(0)).detach().numpy()
curr_mean = curr_mean * curr_psis
print('\t\t', np.corrcoef(true_beta, curr_mean)[0, 1],
np.mean((true_beta - curr_mean)**2))
pstore = pyro.get_param_store()
for t in range(1, K):
print('Boost level', t)
wrapped_guide = partial(prs_guide, index=t)
losses = []
optimizer = pyro.optim.Adam({'lr': LR})
svi = pyro.infer.SVI(my_model, wrapped_guide, optimizer, loss=relbo)
new_weight = 2 / ((t+1) + 2)
new_weights = torch.cat((weights * (1-new_weight),
torch.tensor([new_weight])))
for step in range(num_steps):
loss = svi.step(approximation=wrapped_approximation)
losses.append(loss)
if step % 100 == 0:
print('\t', step, np.mean(losses[-100:]))
if step % 100 == 0:
pstore = pyro.get_param_store()
curr_means = [
pstore.get_param(
'var_mean_{}'.format(s)).detach().numpy()
for s in range(t+1)
]
curr_psis = [
pstore.get_param(
'var_psi_causal_{}'.format(0)).detach().numpy()
for s in range(t+1)
]
curr_means = np.array(curr_means) * np.array(curr_psis)
curr_mean = new_weights.detach().numpy().dot(curr_means)
print('\t\t', np.corrcoef(true_beta, curr_mean)[0, 1],
np.mean((true_beta - curr_mean)**2))
components.append(wrapped_guide)
weights = new_weights
wrapped_approximation = partial(approximation,
components=components,
weights=weights)
# scales.append(
# pstore.get_param('var_mean_{}'.format(t)).detach().numpy()
# )
print('BBBVI ran in', time() - start)
pstore = pyro.get_param_store()
curr_means = [
pstore.get_param(
'var_mean_{}'.format(s)).detach().numpy()
for s in range(K)
]
return weights.detach().numpy().dot(np.array(np.array(curr_means)))
if __name__ == '__main__':
beta_hats = np.load('../data/ldpred/beta_hats_' + str(sigma_sq_e) + '.npy')
cov_mats = np.load('../data/ldpred/ld_mats_' + str(sigma_sq_e) + '.npy')
true_betas = np.load('../data/ldpred/true_betas_'
+ str(sigma_sq_e) + '.npy')
cors = []
mse = []
for i in [int(sys.argv[2])-1]:
print((np.abs(true_betas[i, 0:N]) > 1e-10).sum(), 'num nonzero')
print(np.mean(true_betas[i, 0:N]**2), 'null MSE')
post_mean = run_svi(beta_hats[i][0:N],
cov_mats[i][0:N, 0:N],
GLOBAL_K,
true_betas[i, 0:N])
cors.append(np.corrcoef(post_mean, true_betas[i, 0:N])[0, 1])
mse.append(np.mean((post_mean - true_betas[i, 0:N])**2))
np.savetxt('../data/ldpred/pyro_discrete_' + str(sigma_sq_e)
+ '_rep_' + sys.argv[2] + '_cor.txt', cors)
np.savetxt('../data/ldpred/pyro_discrete_' + str(sigma_sq_e)
+ '_rep_' + sys.argv[2] + '_mse.txt', mse)
```
|
{
"source": "JeffSpies/nonwordlist",
"score": 3
}
|
#### File: JeffSpies/nonwordlist/main.py
```python
from process import wordlist
import itertools
import time
import os
import re
ALPHABET = '23456789abcdefghjkmnpqrstuvwxyz'
def main():
blacklist_all = wordlist.dict_by_length()
blacklist = blacklist_all[3].union(blacklist_all[4]).union(blacklist_all[5])
combinations = get_combinations(3)
tick = time.time()
bad_guids = generate_guids(blacklist, combinations=combinations)
print('Time: {}, Length: {}'.format(time.time()-tick, len(bad_guids)))
bad_guids.union(generate_69s(combinations))
with open('guid_blacklist.txt', 'w') as writer:
for item in bad_guids:
writer.write(item + os.linesep)
def get_combinations(length, alphabet=ALPHABET):
combinations = {}
for x in range(length):
combinations[x + 1] = list(itertools.product(alphabet, repeat=(x+1)))
return combinations
def generate_guids(words, combinations=None, length=5, alphabet=ALPHABET):
guids = set()
if not combinations:
combinations = get_combinations(2, alphabet)
n = 0
for word in words:
if n % 1000 == 0:
print(str(n))
if len(word) > length:
raise Exception
if len(word) == length:
guids.add(word)
else:
positions = n_positions(word, length)
n_random = length - len(word)
for c in combinations[n_random]:
for i in range(0, positions):
word_list = create_word_list(word, i)
available_indices = [i for i, x in enumerate(word_list) if not x]
for idx in available_indices:
index = available_indices.index(idx)
word_list[idx] = c[index]
result = ''.join(word_list)
guids.add(result)
n += 1
return guids
def generate_69s(combinations):
found = []
guids_with_69 = generate_guids(['69'], combinations=combinations)
for word in guids_with_69:
if re.search('[a-z]69[a-z]', word) or \
re.search('^69[a-z]', word) or \
re.search('[a-z]69$', word):
found.append(word)
return found
def create_word_list(word, index):
word_list = [None] * 5
for letter in word:
word_list[index] = letter
index += 1
return word_list
def n_positions(word, length):
return length - len(word) + 1
if __name__ == '__main__':
main()
```
#### File: JeffSpies/nonwordlist/transformations.py
```python
import itertools
import re
def vowel_expand(word, max_size, vowels=['a','e','i','o','u']):
results = []
found_list = list(re.finditer('['+''.join(vowels)+']', word))
for combination in itertools.product(range(0,2), repeat=len(found_list)):
if sum(combination) == 1:
for i, do_process in enumerate(combination):
if do_process:
position = found_list[i].span()[0]
character = found_list[i].group()
repeat_number = max_size-len(word)
for n in range(0, (repeat_number + 1)):
word_list = list(word)
word_list.insert(position, character*n)
result = ''.join(word_list)
results.append(result)
return results
def drop_vowel(word, vowels='aeiou', minimum=3):
result = []
found_list = get_matched_letters_indices(word, vowels)
# generate all possible combinations of vowel locations
positions = list(itertools.product(range(0, 2), repeat=len(found_list)))
for item in positions:
word_list = list(word)
for idx, value in enumerate(item):
if value == 1:
word_list[found_list[idx][0]] = ''
final = ''.join(word_list)
if len(final) >= minimum:
result.append(final)
return result
def l33t(word):
result = []
substitutions = {
'e': '3',
'a': '4',
's': '5',
't': '7'
}
found_list = get_matched_letters_indices(word, ''.join(substitutions.keys()))
positions = list(itertools.product(range(0, 2), repeat=len(found_list)))
for item in positions:
word_list = list(word)
for idx, value in enumerate(item):
if value == 1:
word_list[found_list[idx][0]] = substitutions[found_list[idx][1]]
result.append(''.join(word_list))
return result
def words_with_ck(word):
result = []
if 'ck' in word:
result.append(word)
substitutions = ['c', 'cc', 'k', 'kk', 'x', 'xx']
for s in substitutions:
new_word = word.replace('ck', s)
result.append(new_word)
return result
def repeat_to_single(word):
word_list = list(word)
repeats = []
result = []
for letter in word_list:
if word_list.count(letter) > 1 and letter not in repeats:
repeats.append(letter)
found_indices = []
for letter in repeats:
repeat_indices = []
for l in range(0, len(word_list)):
if word_list[l] == letter:
repeat_indices.append(l)
found_indices.append(repeat_indices)
positions = list(itertools.product(range(0, 2), repeat=len(found_indices)))
for item in positions:
word_list = list(word)
for idx, value in enumerate(item):
if value == 1:
repeat_set = found_indices[idx]
for r in repeat_set[1:]:
word_list[r] = ''
result.append(''.join(word_list))
else:
result.append(''.join(word_list))
return result
def get_matched_letters_indices(word, letters):
found_list = []
for found in re.finditer('[' + letters + ']', word):
match = (found.span()[0], found.group())
found_list.append(match)
return found_list
def drop_suffixes(word):
result = []
suffixes = ['ed', 'es', 's', 'y', 'ing']
for s in suffixes:
if word.endswith(s):
result.append(word[0:len(word)-len(s)])
return result
```
#### File: JeffSpies/nonwordlist/wordlist.py
```python
from collections import defaultdict
import os
import re
import unicodedata
class WordList(object):
def __init__(self, lower=False, strip_nonalpha=False, echo=True, min=None, max=None, transforms=[]):
self._lower = lower
self._echo = echo
self._strip_nonalpha = strip_nonalpha
self._words = set()
self.sets = defaultdict
self.min = min
self.max = max
self.transforms = transforms
def _transform(self, word, fns, min=None, max=None):
if not isinstance(fns, list):
fns = [fns]
results = [word]
for fn in fns:
results += fn(word)
print(results)
return self._add_words(results, min=min, max=max)
def _add_word(self, word, min=None, max=None):
word_length = len(word)
min = min if min else self.min
max = max if max else self.max
if min and word_length < min:
return 0
if max and word_length > max:
return 0
if word not in self._words:
self._words.add(word)
return 1
return 0
def _add_words(self, words, min=None, max=None):
count_added = 0
for word in words:
count_added += self._add_word(word, min=min, max=max)
return count_added
@property
def words(self):
return self._words
def add_file(self, filename, split_further=None, min=None, max=None, reject=[], transforms=[]):
count_possible = 0
count_transformed = 0
count_added = 0
with open(filename, 'U', encoding='iso-8859-15') as f: # can also try cp437 (so:16528468)
for row in f:
if split_further is None:
words = [row]
else:
words = row.split(split_further)
for word in words:
word = word.strip('\n').strip('\r')
if self._lower:
word = word.lower()
word = unicodedata.normalize('NFKD', word).encode('ascii','ignore').decode("utf-8")
if self._strip_nonalpha:
word = re.sub('[^a-zA-Z]', '', word)
do_continue = True
for fn in reject:
if fn(word):
do_continue = False
if not do_continue:
break
number_words_transformed = 0
number_words_added = self._add_word(word, min=min, max=max)
if transforms and number_words_added > 0:
number_words_transformed = self._transform(word, transforms, min=min, max=max)
count_possible += 1
count_transformed += number_words_transformed
count_added += number_words_added
if self._echo:
print('Dictionary: {}, Possible: {}, Words added: {}, Transformed added: {}, Total: {}'.format(
os.path.basename(filename), count_possible, count_added, count_transformed, len(self._words)))
def dict_by_length(self):
out = defaultdict(set)
for word in self._words:
out[len(word)].add(word)
return out
```
|
{
"source": "jeffsp/kaggle_denoising",
"score": 3
}
|
#### File: kaggle_denoising/jsp/denoise.py
```python
from __future__ import print_function
import cv2
import utils
def copy_it(img):
"""
Stupid denoise function
"""
return img
def all_white(img):
"""
Stupider denoise function
"""
img[0:] = 255
return img
def nlmeans(img):
"""
Non-local means denoising
"""
return cv2.fastNlMeansDenoising(img)
def main():
fns1 = utils.noisy_fns()
fns2 = utils.denoised_fns()
for fn1, fn2 in zip(fns1, fns2):
print(fn1, '->', fn2)
# Get noisy image
img1 = cv2.imread(fn1)
# Denoise
# img2 = all_white(img1)
img2 = copy_it(img1)
# img2 = nlmeans(img1)
# Write it out
cv2.imwrite(fn2, img2)
if __name__ == '__main__':
main()
```
#### File: kaggle_denoising/jsp/measure.py
```python
from __future__ import print_function
import sys
import glob
import cv2
import numpy
import math
import utils
def main():
fns1 = utils.ground_truth_fns()
fns2 = utils.denoised_fns()
sse = 0.0
total = 0
for (gt, pred) in utils.gen_images(fns1, fns2):
err = cv2.absdiff(gt, pred)
sse += numpy.sum((err.astype("float") / 255) ** 2)
total += pred.size
print(math.sqrt(sse / total))
if __name__ == '__main__':
main()
```
#### File: kaggle_denoising/jsp/thresholding.py
```python
import numpy as np
from PIL import Image
def discretize(a):
return np.uint8((a > 50)*255)
image_id = 101
dirty_image_path = "../input/train/%d.png" % image_id
clean_image_path = "../input/train_cleaned/%d.png" % image_id
dirty = Image.open(dirty_image_path)
clean = Image.open(clean_image_path)
dirty.save("dirty.png")
clean.save("clean.png")
clean_array = np.asarray(clean)
dirty_array = np.asarray(dirty)
discretized_array = discretize(dirty_array)
Image.fromarray(discretized_array).save("discretized.png")
html = """<html>
<body>
<h1>Thresholding</h1>
<p>This is a very simple attempt to clean up an image by thresholding the pixel value at 50. (Under 50 goes to 0, above 50 goes to 255.)</p>
<h2>Dirty image</h2>
<img src="dirty.png">
<h2>Cleaned up by thresholding</h2>
<img src="discretized.png">
<h2>Original clean image</h2>
<img src="clean.png">
</body>
</html>
"""
with open("output.html", "w") as output_file:
output_file.write(html)
```
#### File: jeffsp/kaggle_denoising/utils.py
```python
from __future__ import print_function
import cv2
import glob
import sys
import itertools
import numpy as np
def ground_truth_fns():
"""
Return list of ground truth filenames
"""
return sorted(glob.glob('input_cleaned/*.png'))
def noisy_fns():
"""
Return list of noisy filenames
"""
return sorted(glob.glob('input/*.png'))
def test_fns():
"""
Return list of the test filenames -- the ones for submission
"""
return sorted(glob.glob('test/*.png'))
def test_denoised_fns():
"""
Return list of the densoied test filenames.
"""
return sorted(glob.glob('test_denoised/*.png'))
def denoised_fns():
"""
Return list of denoised filenames
Don't glob the filenames, instead generate them from
the ground truth filenames.
"""
fns = ground_truth_fns()
fns = [fn.replace('input_cleaned', 'input_denoised') for fn in fns]
return sorted(fns)
def gen_images(fns1, fns2):
"""
Grayscale image generator
"""
assert len(fns1) == len(fns2)
for fn1, fn2 in zip(fns1, fns2):
print(fn1, fn2)
sys.stdout.flush()
# Read files
img1 = cv2.imread(fn1)
img2 = cv2.imread(fn2)
assert img2.shape == img1.shape
# Convert to grayscale
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
assert img2.shape == img1.shape
yield(img1, img2)
def get_neighbors(noisy, sz):
"""
Get sz by sz neighbors and for each pixel value
Note: If grayscale, this will be sz x sz, if RGB, will be sz x sz x 3
Use grayscale
"""
window = (sz, sz)
neighbors = [cv2.getRectSubPix(noisy, window, (y, x)).ravel() \
for x, y in itertools.product(range(noisy.shape[0]), range(noisy.shape[1]))]
neighbors = np.asarray(neighbors)
return (neighbors / 255.0).astype('float32')
def get_ground_truth(ground_truth):
"""
Return the ground truth pixel values
"""
return (ground_truth / 255.0).astype('float32').ravel()
def grayscale_to_csv(filename, img):
"""
Convert a png file to a CSV file
"""
csv = ''
for j in range(img.shape[1]):
for i in range(img.shape[0]):
value = img[i, j]
assert value[0] == value[1]
assert value[0] == value[2]
csv += '%s_%s_%s,%s\n' % (filename, i+1, j+1, value[0] / 255.0)
return csv
```
|
{
"source": "jeffs/py-kart",
"score": 4
}
|
#### File: py-kart/pangram/main.py
```python
from dataclasses import dataclass
import argparse
import sys
from typing import Callable, Set
DEFAULT_MIN_LENGTH = 4
DEFAULT_WORDS_FILE = "/usr/share/dict/words"
@dataclass
class Command:
mandatory_letters: Set[str]
available_letters: Set[str]
min_length: int
words_file: str
def parse_args() -> Command:
parser = argparse.ArgumentParser(description="Find Spelling Bee answers.")
parser.add_argument(
"-m",
"--min-length",
default=DEFAULT_MIN_LENGTH,
help="omit words shorter than N characters",
metavar="N",
type=int,
)
parser.add_argument(
"letters",
help="available letters, capitalized if manadatory",
type=str,
)
parser.add_argument(
"words_file",
default=DEFAULT_WORDS_FILE,
help="available words, one per line",
metavar="words-file",
nargs="?",
type=str,
)
args = parser.parse_args()
return Command(
mandatory_letters=set(c.lower() for c in args.letters if c.isupper()),
available_letters=set(args.letters.lower()),
min_length=args.min_length,
words_file=args.words_file,
)
def make_validator(command: Command) -> Callable[[str], bool]:
def is_valid_char(c: str) -> bool:
return c in command.available_letters or not c.isalpha()
def is_valid_word(word: str) -> bool:
return (
len(word) >= command.min_length
and all(c in word for c in command.mandatory_letters)
and all(map(is_valid_char, word))
)
return is_valid_word
def main() -> None:
command = parse_args()
with open(command.words_file) as lines:
words = tuple(line.strip() for line in lines)
is_valid_word = make_validator(command)
valid_words = sorted(filter(is_valid_word, words), key=len)
for word in valid_words:
is_pangram = all(c in word for c in command.available_letters)
prefix = " *" if is_pangram else " "
print(prefix, word)
if __name__ == "__main__":
main()
```
#### File: py-kart/vimod/main.py
```python
import subprocess as _subprocess, sys as _sys
def main():
status = _subprocess.run(
("git", "status"), capture_output=True, check=True, encoding="utf-8"
).stdout
files = tuple(
line.split(maxsplit=1)[1]
for line in status.splitlines()
if line.startswith("\tmodified:")
)
if files:
_sys.exit(_subprocess.run(("vim", *files)).returncode)
print("no modified files", file=_sys.stderr)
if __name__ == "__main__":
main()
```
#### File: py-kart/words/main.py
```python
import re
import sys
from typing import Iterable
# We care only about links that include whitespace, thus affecting word count.
# Such links are generally either inline: [text]( http://... )
# Or on separate lines, like footnotes: [tag]: http://...
LINK_RE = re.compile('(?:]\([^)]*)|(?:^\[.*]:.*)')
def is_word(s: str) -> bool:
"""Return true if s contains any alphanumeric characters."""
return any(c.isalnum() for c in s)
def count_words(line: str) -> int:
"""Return the number of words in the specified line. """
return sum(1 for s in line.split() if is_word(s))
def count_markdown_words(lines: Iterable[str]) -> int:
"""
Return the total number of words on all of the specified lines, excluding
(most) Markdown links.
"""
# Python's re.sub method, unlike equivalent functions in other mainstream
# languages, and unlike Python's own str.replace, expects the replacement
# text before the subject text. 🤦 The type system can't catch incorrect
# ordering, even at runtime, because both parameters are strings.
return sum(count_words(LINK_RE.sub('', line)) for line in lines)
def main():
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
with open(arg) as lines:
total = count_markdown_words(lines)
print('{:8} {}'.format(total, arg))
else:
print(count_markdown_words(sys.stdin))
if __name__ == '__main__':
main()
```
|
{
"source": "jeffsrobertson/deepsign",
"score": 3
}
|
#### File: deepsign/training/augmentation.py
```python
import numpy as np
from scipy.ndimage import zoom, rotate
from scipy.ndimage.filters import gaussian_filter
def random_crop(vid_array, min_scale=.7, max_scale=1.3):
"""
Scales the video frames by some randomly generated value between min_scale and max_scale.
All frames are scaled by the same scale factor.
After scaling, randomly picks bounds of new frame, so some translation of the image will occur.
Input:
vid_array: (ndarray) 4d array of shape (3, frames, height, width)
min_scale: (float) Minimum allowed scale factor
max_scale: (float) Maximum allowed scale factor
Output:
scale_factor: (float) Scale factor used for this video
new_vid_array: (ndarray) 3d array of scaled video, same shape as the input array
"""
scale_factor = np.random.uniform(low=min_scale, high=max_scale)
num_colors, num_frames, old_rows, old_cols = vid_array.shape
new_rows, new_cols = zoom(vid_array[0, 0, :, :], scale_factor).shape
# If randomly-generated scale is ~1, just return original array
if new_rows == old_rows:
return scale_factor, vid_array
if scale_factor > 1:
new_x1 = np.random.randint(0, new_cols - old_cols)
new_x2 = new_x1 + old_cols
new_y1 = np.random.randint(0, new_rows - old_rows)
new_y2 = new_y1 + old_rows
else:
new_x1 = np.random.randint(0, old_cols - new_cols)
new_x2 = new_x1 + new_cols
new_y1 = np.random.randint(0, old_rows - new_rows)
new_y2 = new_y1 + new_rows
new_vid_array = np.zeros_like(vid_array)
for f in range(num_frames):
new_frame = []
for c in range(3):
new_frame.append(zoom(vid_array[c, f, :, :], scale_factor))
new_frame = np.array(new_frame)
if scale_factor > 1:
new_vid_array[:, f, :, :] = new_frame[:, new_y1:new_y2, new_x1:new_x2]
if scale_factor < 1:
new_vid_array[:, f, new_y1:new_y2, new_x1:new_x2] = new_frame
new_vid_array[new_vid_array > 255] = 255
new_vid_array[new_vid_array < 0] = 0
return scale_factor, new_vid_array
def random_horizontal_flip(vid_array, flip_chance=.5):
rng = np.random.random()
flipped = False
if rng > flip_chance:
vid_array = np.flip(vid_array, axis=-1)
flipped = True
vid_array[vid_array > 255] = 255
vid_array[vid_array < 0] = 0
return flipped, vid_array
def random_rotate(vid_array, min_degrees=-8, max_degrees=8):
"""
Rotates the video frames by some randomly generated value between min_degrees and and max_degrees.
All frames are rotated by the same degree.
Input:
vid_array: (ndarray) 4d array of shape (3, frames, height, width)
min_degrees: (float) minimum allowed degree to rotate
max_degrees: (float) maximum allowed degree to rotate
Output:
degree: (float) degree used to rotate this video
new_vid_array: (ndarray) 4d array of rotated video, same shape as input array
"""
degree = np.random.uniform(low=min_degrees, high=max_degrees)
new_vid_array = rotate(vid_array, degree, reshape=False, axes=(2, 3))
new_vid_array[new_vid_array > 255] = 255
new_vid_array[new_vid_array < 0] = 0
return degree, new_vid_array
def random_multiply_intensity(vid_array, min_scale=.9, max_scale=1.1):
"""
Uniformly multiplies the video intensity by a randomly chosen value between min_scale and max_scale.
Pixel values are automatically capped at 1.
Input:
vid_array: (ndarray) 4d array of shape (3, frames, height, width)
max_scale: (float) maximum allowed multiplicative factor for image intensity.
min_scale: (float) minimum allowed multiplicative factor for image intensity.
Output:
scale_factor: (float) scale factor used in generating new video.
new_vid_array: (ndarray) 4d array, same shape as input array
"""
if min_scale < 0:
raise ValueError("min_noise parameter for salt_and_pepper() must be greater than 0.")
if min_scale > max_scale:
raise ValueError("max_scale must be greater than min_scale in multiply_intensity()")
scale_factor = np.random.uniform(min_scale, max_scale)
new_vid_array = scale_factor*vid_array
new_vid_array[new_vid_array > 255] = 255
new_vid_array[new_vid_array < 0] = 0
return scale_factor, new_vid_array
def random_add_intensity(vid_array, min_add=-.3, max_add=.3):
"""
Uniformly adds a value to all pixel intensities.
Additive value is randomly selected to be between min_add*np.max(vid_array) and max_add*np.max(vid_array)
Pixel values are automatically capped to be between 0 and 1.
Input:
vid_array: (ndarray) 4d array of shape (color, frames, height, width)
max_add: (float) maximum allowed additive factor for image intensity.
min_scale: (float) minimum allowed additive factor for image intensity.
Output:
add_factor: (float) Additive factor used in generating new video.
new_vid_array: (ndarray) 4d array of modified video, same shape as input array.
"""
if min_add > max_add:
raise ValueError("max_add must be greater than min_add in random_add_intensity()")
add_factor = np.random.uniform(min_add, max_add)
new_vid_array = vid_array + add_factor*np.max(vid_array)
new_vid_array[new_vid_array > 255] = 255
new_vid_array[new_vid_array < 0] = 0
return add_factor, new_vid_array
def random_blur(vid_array, min_sigma=0, max_sigma=.01):
"""
Applies a gaussian blur to the image.
Standard deviation of blur is randomly choseen between min_sigma and max_sigma.
All frames/color channels are blurred by the same amount.
Input:
vid_array: (ndarray) 4d array of shape (color, frames, height, width)
max_sigma: (float) maximum allowed stdev of gaussian.
min_sigma: (float) minimum allowed stdev of gaussian.
Output:
add_factor: (float) Additive factor used in generating new video.
new_vid_array: (ndarray) 3d array of modified video, same shape as input array.
"""
num_colors, num_frames, num_rows, num_cols = vid_array.shape
sigma_factor = np.random.uniform(min_sigma, max_sigma)
sigma = num_rows*sigma_factor
blurred_vid = np.zeros_like(vid_array)
for f in range(num_frames):
for c in range(num_colors):
blurred_vid[c, f, :, :] = gaussian_filter(vid_array[c, f, :, :], sigma)
blurred_vid[blurred_vid > 255] = 255
blurred_vid[blurred_vid < 0] = 0
return sigma, blurred_vid
```
|
{
"source": "jeffssss/VideoPrizeAutomation-iOS",
"score": 3
}
|
#### File: VideoPrizeAutomation-iOS/src/auto.py
```python
import time
import os
#ios相关的包
import wda
import sys
reload(sys)
sys.setdefaultencoding('utf8')
c = wda.Client()
s = c.session()
estimatedCount = 50
def game_fun():
count = 1
while True:
time.sleep(5)
print('--------------------will trigger video, count=' + str(count))
s.tap(180,420)
print('sleep begin')
time.sleep(35)
print('sleep end, will close video')
s.tap(330,40)
print('finish once')
count = count + 1
if count > estimatedCount:
break
if __name__ == '__main__':
game_fun()
```
|
{
"source": "jeff-stacey/horizon_detection",
"score": 3
}
|
#### File: prototypes/vsearch/vsearch.py
```python
import imageio
import numpy as np
import struct
import math
import os
lines_searched = 0
def ray_from_centre(image, angle):
global lines_searched
lines_searched += 1
image_height = len(image)
image_width = len(image[0])
centre_y = image_height / 2
centre_x = image_width / 2
rise = math.sin(angle)
run = math.cos(angle)
t_min = 0
t_max = min(abs(0.5*(image_height - 1)/rise), abs(0.5*(image_width - 1)/run))
def pixel_along_line_in_hrz(t):
x = centre_x + run * t
y = centre_y + rise * t
# centre of pixel is (.5, .5)
x_index = round(x - 0.5)
y_index = round(image_height - (y + 0.5))
return image[y_index][x_index] > 0
min_in_hrz = pixel_along_line_in_hrz(t_min)
max_in_hrz = pixel_along_line_in_hrz(t_max)
if min_in_hrz == max_in_hrz:
# This means there is no edge along the line
return False, 0
epsilon = 0.5
while abs(t_min - t_max) > epsilon:
t_mid = 0.5 * (t_min + t_max)
if pixel_along_line_in_hrz(t_mid) == min_in_hrz:
# This means hrz is between mid and max
t_min = t_mid
else:
# This means hrz is between min and mid
t_max = t_mid
if min_in_hrz:
# This means the camera is pointing below the horizon
return True, -0.5 * (t_min + t_max)
else:
# This means the camera is pointing above the horizon
return True, 0.5 * (t_min + t_max)
def main():
image_folder = '../test_images/images'
data_folder = '../test_images/metadata'
images = np.sort(os.listdir(image_folder))
metadata = np.sort(os.listdir(data_folder))
test_results = []
for im, da in zip(images, metadata):
print("processing file " + im)
image = imageio.imread(image_folder + "/" + im)
# Find the vertex with the bisection method.
# To do this, an angle pointing at the horizon on either side of the vertex is needed.
test_angle = 0
found_hz = True
while found_hz:
test_angle += 0.05
found_hz, d = ray_from_centre(image, test_angle)
while not found_hz:
test_angle += 0.05
found_hz, d = ray_from_centre(image, test_angle)
above_hz = (d >= 0)
hz_start_angle = test_angle
while found_hz:
test_angle += 0.05
found_hz, d = ray_from_centre(image, test_angle)
hz_end_angle = test_angle - 0.05
if hz_end_angle >= math.pi:
hz_start_angle -= 2 * math.pi
hz_end_angle -= 2 * math.pi
found_hz, dstart = ray_from_centre(image, hz_start_angle)
assert(found_hz)
found_hz, dend = ray_from_centre(image, hz_end_angle)
assert(found_hz)
# Take the smallest d, so that we can find a symmetric point on the edge
if abs(dstart) > abs(dend):
target_d = dend
base_angle = hz_end_angle
a2 = hz_start_angle
d2 = dstart
else:
target_d = dstart
base_angle = hz_start_angle
a2 = hz_end_angle
d2 = dend
a1 = 0.5 * (base_angle + a2)
found_hz, d1 = ray_from_centre(image, a1)
# TODO: These aren't true in some edge cases
assert(found_hz)
assert(abs(d1) < abs(target_d))
epsilon = 0.01
while abs(a1 - a2) > epsilon:
test_angle = 0.5 * (a1 + a2)
found_hz, test_d = ray_from_centre(image, test_angle)
# TODO: This isn't true in some edge cases
assert(found_hz)
if abs(test_d) < abs(target_d):
a1 = test_angle
elif abs(test_d) > abs(target_d):
a2 = test_angle
else:
break
symmetric_angle = 0.5 * (a1 + a2)
found_hz, d = ray_from_centre(image, symmetric_angle)
assert(found_hz)
angle_to_vertex = 0.5 * (base_angle + symmetric_angle)
found_hz, vertex_d = ray_from_centre(image, angle_to_vertex)
assert(found_hz)
angle_above_hz = math.atan((vertex_d / (0.5 * len(image[0]))) * math.tan(0.5 * 57 * math.pi / 180))
earth_angular_radius = math.asin(6371.0 / 6871.0)
theta_x = earth_angular_radius + angle_above_hz
if angle_above_hz > 0:
theta_z = angle_to_vertex + 0.5 * math.pi
else:
theta_z = angle_to_vertex - 0.5 * math.pi
nadir = np.matrix([0, 0, -1]).T
th_x = -theta_x;
th_z = theta_z;
Rx = np.matrix([[1, 0, 0],
[0, math.cos(th_x), -1*math.sin(th_x)],
[0, math.sin(th_x), math.cos(th_x)]])
Rz = np.matrix([[math.cos(th_z), -1*math.sin(th_z), 0],
[math.sin(th_z), math.cos(th_z), 0],
[0, 0, 1]])
nadir = Rz * Rx * nadir;
with open(data_folder + '/' + da, "rb") as f:
chunk = f.read(44)
m = struct.unpack('ffiifffffff',chunk)
nadir_actual = np.matrix([m[8], m[9], m[10]])
angle_error = math.acos(nadir_actual * nadir)
print("Error: " + str(angle_error * 180 / math.pi) + " degrees")
print("Lines searched: " + str(lines_searched))
if __name__ == "__main__":
main()
```
|
{
"source": "JeffStrickland/Cryptopals",
"score": 4
}
|
#### File: JeffStrickland/Cryptopals/c7.py
```python
import base64
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
def ecb_decrypt_function(dcfile, key): # Function decrypts data
cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())
decryptor = cipher.decryptor()
decrypted_data = decryptor.update(dcfile) + decryptor.finalize()
message = decrypted_data
return message
def main(): # Read encrypted file, decrypt with decryption function, write decrypted data to new file
key = b"YELLOW SUBMARINE" # given
with open("7.txt") as f1, open('decrypted', 'w') as f2:
data = base64.b64decode(f1.read())
z = ecb_decrypt_function(data, key).decode()
f2.write(z)
# Writing decrpted data to new file is more
# practical than printing in the terminal
if __name__ == '__main__':
main()
'''
Solution is returned in a txt file name decrypted.txt.
SOLUTION -->
I'm back and I'm ringin' the bell
A rockin' on the mike while the fly girls yell
In ecstasy in the back of me
Well that's my DJ Deshay cuttin' all them Z's
Hittin' hard and the girlies goin' crazy
Vanilla's on the mike, man I'm not lazy.
I'm lettin' my drug kick in
It controls my mouth and I begin
To just let it flow, let my concepts go
My posse's to the side yellin', Go Vanilla Go!
Smooth 'cause that's the way I will be
And if you don't give a damn, then
Why you starin' at me
So get off 'cause I control the stage
There's no dissin' allowed
I'm in my own phase
The girlies sa y they love me and that is ok
And I can dance better than any kid n' play
Stage 2 -- Yea the one ya' wanna listen to
It's off my head so let the beat play through
So I can funk it up and make it sound good
1-2-3 Yo -- Knock on some wood
For good luck, I like my rhymes atrocious
Supercalafragilisticexpialidocious
I'm an effect and that you can bet
I can take a fly girl and make her wet.
I'm like Samson -- Samson to Delilah
There's no denyin', You can try to hang
But you'll keep tryin' to get my style
Over and over, practice makes perfect
But not if you're a loafer.
You'll get nowhere, no place, no time, no girls
Soon -- Oh my God, homebody, you probably eat
Spaghetti with a spoon! Come on and say it!
VIP. Vanilla Ice yep, yep, I'm comin' hard like a rhino
Intoxicating so you stagger like a wino
So punks stop trying and girl stop cryin'
Vanilla Ice is sellin' and you people are buyin'
'Cause why the freaks are jockin' like Crazy Glue
Movin' and groovin' trying to sing along
All through the ghetto groovin' this here song
Now you're amazed by the VIP posse.
Steppin' so hard like a German Nazi
Startled by the bases hittin' ground
There's no trippin' on mine, I'm just gettin' down
Sparkamatic, I'm hangin' tight like a fanatic
You trapped me once and I thought that
You might have it
So step down and lend me your ear
'89 in my time! You, '90 is my year.
You're weakenin' fast, YO! and I can tell it
Your body's gettin' hot, so, so I can smell it
So don't be mad and don't be sad
'Cause the lyrics belong to ICE, You can call me Dad
You're pitchin' a fit, so step back and endure
Let the witch doctor, Ice, do the dance to cure
So come up close and don't be square
You wanna battle me -- Anytime, anywhere
You thought that I was weak, Boy, you're dead wrong
So come on, everybody and sing this song
Say -- Play that funky music Say, go white boy, go white boy go
play that funky music Go white boy, go white boy, go
Lay down and boogie and play that funky music till you die.
Play that funky music Come on, Come on, let me hear
Play that funky music white boy you say it, say it
Play that funky music A little louder now
Play that funky music, white boy Come on, Come on, Come on
Play that funky music
'''
```
#### File: JeffStrickland/Cryptopals/c9.py
```python
x = b'YELLOW SUBMARINE'
def pkcs_padding(text, block_size):
length_of_padding = block_size - (len(text) % block_size)
if length_of_padding == 0:
length_of_padding = block_size
padding = bytes([length_of_padding]) * length_of_padding
return text + padding
# Test it out
print(pkcs_padding(x, 20))
# SOLUTION --> b'YELLOW SUBMARINE\x04\x04\x04\x04'
```
|
{
"source": "jeffsuto/Dashboard-Smart-Car-With-Map",
"score": 3
}
|
#### File: jeffsuto/Dashboard-Smart-Car-With-Map/ThreadEngine.py
```python
from queue import Queue
from threading import Thread
from PyQt5.QtCore import pyqtSignal
import random
import time
import data
global kecepatan
global jarak_tempuh
global temp_jarak_tempuh
global daya
kecepatan = 0
jarak_tempuh = 0
temp_jarak_tempuh = 0
daya = 30000
pesan_aksi_sensor_jarak = ''
def sensorDepan(queueDepan):
while True:
value = random.randint(100, 3000)
queueDepan.put(value)
ui.txtSensorDepan.setText(translate("MainWindow", str(value/100)))
time.sleep(1)
def sensorBelakang(queueBelakang):
while True:
value = random.randint(200, 3000)
queueBelakang.put(value)
ui.txtSensorBelakang.setText(translate("MainWindow", str(value/100)))
time.sleep(1)
def sensorKanan(queueKanan):
while True:
value = random.randint(50, 500)
queueKanan.put(value)
ui.txtSensorKanan.setText(translate("MainWindow", str(value/100)))
time.sleep(1)
def sensorKiri(queueKiri):
while True:
value = random.randint(50, 500)
queueKiri.put(value)
ui.txtSensorKiri.setText(translate("MainWindow", str(value/100)))
time.sleep(1)
def sensorDepanKiri(queueDepanKiri):
while True:
value = random.randint(50, 500)
queueDepanKiri.put(value)
ui.txtSensorDepanKiri.setText(translate("MainWindow", str(value/100)))
time.sleep(1)
def sensorDepanKanan(queueDepanKanan):
while True:
value = random.randint(50, 500)
queueDepanKanan.put(value)
ui.txtSensorDepanKanan.setText(translate("MainWindow", str(value/100)))
time.sleep(1)
def tambahKecepatan(up):
global kecepatan
kecepatan += up
def kurangiKecepatan(down):
global kecepatan
kecepatan -= down
def kontrolJarak(sensor_depan, sensor_kiri, sensor_kanan, sensor_depan_kanan, sensor_depan_kiri):
global kecepatan
global pesan_aksi_sensor_jarak
pesan_aksi_sensor_jarak = 'Jarak aman'
if sensor_depan in range(0, 200):
pesan_aksi_sensor_jarak = 'Jarak depan terlalu dekat. rem perlahan'
if kecepatan in range(30, 60):
kurangiKecepatan(20)
if sensor_kiri in range(0, 200):
pesan_aksi_sensor_jarak = 'Geser ke kanan'
if sensor_kanan in range(0, 200):
pesan_aksi_sensor_jarak = 'Geser ke kiri'
if sensor_depan_kanan in range(50, 150):
pesan_aksi_sensor_jarak = 'Geser serong kiri'
if sensor_depan_kiri in range(50, 150):
pesan_aksi_sensor_jarak = 'Geser serong kanan'
def kontrolKecepatan(sensor_depan):
global kecepatan
if sensor_depan in range(1000, 3000):
if kecepatan > 65:
kurangiKecepatan(5)
else:
tambahKecepatan(7)
elif sensor_depan in range(700, 1000):
if kecepatan > 65:
kecepatan = 45
kurangiKecepatan(5)
else:
tambahKecepatan(4)
elif sensor_depan in range(400, 700):
if kecepatan > 40:
kecepatan = 30
kurangiKecepatan(5)
else:
tambahKecepatan(2)
elif sensor_depan in range(200, 400):
if kecepatan > 40:
kecepatan = 20
kurangiKecepatan(5)
else:
tambahKecepatan(2)
def kontrolDaya():
global kecepatan
global daya
if daya > 0:
daya -= int(kecepatan / 3.6)
else:
time.sleep(10)
def GPS(queueGPS):
global temp_jarak_tempuh
global kecepatan
index = 0
mode = 'berangkat'
while True :
temp_jarak_tempuh += int(kecepatan / 3.6)
if temp_jarak_tempuh >= 10:
if index == len(data.coordinates)-1:
mode = 'pulang'
kecepatan = 0
ui.txtSpeedometer.setText(translate("MainWindow", str(kecepatan)))
time.sleep(2)
elif index == 0:
mode = 'berangkat'
kecepatan = 0
ui.txtSpeedometer.setText(translate("MainWindow", str(kecepatan)))
time.sleep(2)
if mode == 'berangkat':
index += 1
else:
index -= 1
temp_jarak_tempuh = 0
# set latitude and longitude
ui.txtLatitude.setText(translate("MainWindow", str(data.coordinates[index][0])))
ui.txtLongitude.setText(translate("MainWindow", str(data.coordinates[index][1])))
queueGPS.put(data.coordinates[index])
time.sleep(1)
def masterKontrol(queueDepan, queueBelakang, queueKanan, queueKiri, queueDepanKanan, queueDepanKiri, queueGPS):
global daya
global jarak_tempuh
global pesan_aksi_sensor_jarak
global kecepatan
global barDaya
ui.txtTotalDistance.setText(translate("MainWindow", str(int(data.getTotalDistance()))+' m'))
while True:
depan = queueDepan.get()
belakang = queueBelakang.get()
kanan = queueKanan.get()
kiri = queueKiri.get()
depanKanan = queueDepanKanan.get()
depanKiri = queueDepanKiri.get()
GPS = queueGPS.get()
# menampilkan kecepatan
kontrolKecepatan(depan)
ui.txtSpeedometer.setText(translate("MainWindow", str(kecepatan)))
# menampilkan jarak tempuh
jarak_tempuh += int(kecepatan / 3.6)
ui.txtMileage.setText(translate("MainWindow", str(jarak_tempuh)).zfill(10))
# menampilkan daya
kontrolDaya()
barDaya.emit(daya)
barDaya.connect(ui.energyBar.setValue)
# menampilkan pesan aksi sensor jarak
kontrolJarak(depan, kiri, kanan, depanKanan, depanKiri)
ui.txtSensorActionMessage.setText(translate("MainWindow", pesan_aksi_sensor_jarak))
# GPS
ui.marker.setLatLng(GPS)
ui.map.setView(GPS, 20)
def go(_ui, _translate, progress):
global ui
global translate
global barDaya
ui = _ui
translate = _translate
barDaya = progress
queueDepan = Queue()
queueBelakang = Queue()
queueKanan = Queue()
queueKiri = Queue()
queueDepanKanan = Queue()
queueDepanKiri = Queue()
queueGPS = Queue()
threadDepan = Thread(target=sensorDepan, args=(queueDepan,))
threadBelakang = Thread(target=sensorBelakang, args=(queueBelakang,))
threadKanan = Thread(target=sensorKanan, args=(queueKanan,))
threadKiri = Thread(target=sensorKiri, args=(queueKiri,))
threadDepanKanan = Thread(target=sensorDepanKanan, args=(queueDepanKanan,))
threadDepanKiri = Thread(target=sensorDepanKiri, args=(queueDepanKiri,))
threadGPS = Thread(target=GPS, args=(queueGPS,))
threadMasterKontrol = Thread(target=masterKontrol, args=(queueDepan, queueBelakang, queueKanan, queueKiri, queueDepanKanan, queueDepanKiri, queueGPS,))
threadDepan.start()
threadBelakang.start()
threadKanan.start()
threadKiri.start()
threadDepanKanan.start()
threadDepanKiri.start()
threadGPS.start()
threadMasterKontrol.start()
```
|
{
"source": "jeffswt/comTeXT",
"score": 3
}
|
#### File: comTeXT/comtext/error.py
```python
class ParserError(RuntimeError):
"""Raises error while intepreting document, should be catched and displayed
to user."""
default_value = {
'file': '', # filename of error occurence
'path': '', # path of file
'row': 0, # error occured at row #0
'col': 0, # error occured at column #0
'cause': '', # reason of error
}
def __init__(self, cause):
self.__parser_cause__ = cause
return
def cause(self):
return self.__parser_cause__
pass
```
#### File: comTeXT/comtext/__init__.py
```python
from . import loader
def parse(path, target, preload_libs=[], include_path=None):
return loader.parse_file(path, target, preload_libs=preload_libs,
include_path=include_path)
```
#### File: comTeXT/comtext/modules.py
```python
import os
from . import jitfunction
from . import keywords
from . import lang
from . import misc
from . import kernel
from .error import ParserError
class ParserFunction:
"""Function executed at certain substring occurences while parsing."""
def __init__(self):
return
def parse(self, parser, state):
return
pass
class PfChEscape(ParserFunction):
def parse(self, parser, state):
print(list(state.macros))
err_msg = lang.text('Parser.Error.Function.UnknownFunction')
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
pass
class PfComment(ParserFunction):
def parse(self, parser, state):
kwpos = parser.match_next_keyword(state, state.pos, '\n')
comment = ''
for i in range(state.pos, kwpos):
comment += state.document[i]
state.shift_forward_mul(comment)
return ''
pass
class PfScopeBegin(ParserFunction):
def parse(self, parser, state):
err_msg = lang.text('Parser.Error.Scope.UnexpectedBeginMarker') %\
keywords.scope_begin
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
pass
class PfScopeEnd(ParserFunction):
def parse(self, parser, state):
err_msg = lang.text('Parser.Error.Scope.UnexpectedEndMarker') %\
keywords.scope_end
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
pass
class PfChEscapedCharacter(ParserFunction):
def __init__(self, mapping):
self.mapping = mapping
return
def parse(self, parser, state):
return self.mapping.get(state.target, self.mapping['ctx'])
pass
class PfLoadLibrary(ParserFunction):
@staticmethod
def load_library(parser_i, state, module_name):
fpath = os.path.dirname(module_name)
fname = os.path.basename(module_name)
found = False
for folder in parser_i.include_path:
for ext in keywords.ctx_file_extensions:
npath = os.path.join(folder, fpath)
nname = fname + '.' + ext
if os.path.isfile(os.path.join(npath, nname)):
fpath, fname = npath, nname
found = True
break
if found:
break
# no module found
if not found:
err_msg = lang.text('Parser.Error.Library.FileNotFound')
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
# skip if re-importing an available library
absp = os.path.join(fpath, fname)
if absp in parser_i.loaded_libraries:
return ''
parser_i.loaded_libraries.add(absp)
# parse library and load functions into file
fhandle = open(absp, 'r', encoding=keywords.ctx_file_encoding)
fcontent = fhandle.read()
fhandle.close()
subp = kernel.Parser(filepath=fpath,
filename=fname,
document=fcontent,
target=parser_i.target,
include_path=[fpath] + parser_i.include_path)
subp.parse(functions=state.macros)
state.macros = subp.state.macros
return
def parse(self, parser_i, state):
module_name = parser_i.match_verbatim_scope(state)
PfLoadLibrary.load_library(parser_i, state, module_name)
return ''
pass
class PfDefFunction(ParserFunction):
@staticmethod
def check_brackets(parser, state, text):
err_msg = ''
if not text.startswith(keywords.func_param_left):
err_msg = lang.text('Parser.Error.Scope.'
'ExpectedBeginMarker') %\
keywords.func_param_left
text = text[len(keywords.func_param_left):]
if not text.endswith(keywords.func_param_right):
err_msg = lang.text('Parser.Error.Scope.'
'ExpectedEndMarker') %\
keywords.func_param_right
text = text[:-len(keywords.func_param_right)]
if text.find(keywords.func_param_left) != -1:
err_msg = lang.text('Parser.Error.Scope.'
'UnexpectedBeginMarker') %\
keywords.func_param_left
if text.find(keywords.func_param_right) != -1:
err_msg = lang.text('Parser.Error.Scope.'
'UnexpectedEndMarker') %\
keywords.func_param_right
if err_msg != '':
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
return text.strip()
@staticmethod
def pfd_lang_args(parser, state, param, out):
found = False
# language and arguments
for l in keywords.func_lang:
if not param.startswith(l):
continue
found = True
# already has language (conflict)?
if out['lang'] != '':
err_msg = lang.text('Parser.Error.Function.'
'ConflictLanguage')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
# check brackets
args = param[len(l):]
args = PfDefFunction.check_brackets(parser, state, args)
# parse arguments
args = list(i.strip() for i in args.split(keywords.
func_param_split)) if args != '' else []
out['lang'] = l
for arg in args:
verbatim = False
if arg.startswith(keywords.func_param_verbatim):
arg = arg[len(keywords.func_param_verbatim):].strip()
verbatim = True
# check argument name validity
for ch in keywords.func_param_forbid_chars:
if ch not in arg:
continue
err_msg = lang.text('Parser.Error.Function.ForbidChar')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path':
state.filepath, 'cause': err_msg})
# valid and push in
out['args'].append({'name': arg, 'verbatim': verbatim})
pass
return found
@staticmethod
def pfd_function_mode(parser, state, param, out):
if param in keywords.func_proc:
if out['mode'] != '':
err_msg = lang.text('Parser.Error.Function.ConflictMode')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
out['mode'] = param
return True
return False
@staticmethod
def pfd_auto_break(parser, state, param, out):
if param in keywords.func_brk:
if out['autobreak'] != '':
err_msg = lang.text('Parser.Error.Function.ConflictBreak')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
out['autobreak'] = param
return True
return False
@staticmethod
def parse_function_def(parser, state, text):
# get function name
func_name, *fdm_args = text.split(keywords.func_def_marker)
if len(fdm_args) == 0:
err_msg = lang.text('Parser.Error.Function.MissingDefMarker')
raise ParserError({'row': state.row, 'col': state.col, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
if len(fdm_args) > 1:
err_msg = lang.text('Parser.Error.Function.TooManyDefMarkers')
raise ParserError({'row': state.row, 'col': state.col +
len(func_name) + len(fdm_args[0]) - 1 +
len(keywords.func_def_marker), 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
func_name = func_name.strip()
params = list(i.strip() for i in fdm_args[0].split(
keywords.func_def_split))
# parse params
out = {
'name': func_name,
'lang': '',
'args': [], # {'name': '...', 'verbatim': True}
'mode': '',
'autobreak': '',
}
for param in params:
if PfDefFunction.pfd_lang_args(parser, state, param, out):
continue
if PfDefFunction.pfd_function_mode(parser, state, param, out):
continue
if PfDefFunction.pfd_auto_break(parser, state, param, out):
continue
# unknown parameter
err_msg = lang.text('Parser.Error.Function.UnknownParam')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
# default values
if out['lang'] == '':
out['lang'] = keywords.func_lang_raw
if out['mode'] == '':
out['mode'] = keywords.func_proc_src_after
if out['autobreak'] == '':
out['autobreak'] = keywords.func_brk_wrapinblk
return out
@staticmethod
def parse_function(parser, state):
# get indentation
indent = parser.get_current_indent(state)
# analyze function description
func_desc = parser.match_verbatim_scope(state)
params = PfDefFunction.parse_function_def(parser, state, func_desc)
# enforce code block format
if parser.match_to_next_occurence(state, '{') != '':
err_msg = lang.text('Parser.Error.Scope.ExpectedBeginMarker')
raise ParserError({'row': state.row, 'col': state.col, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
if state.document[state.pos] != '\n':
err_msg = lang.text('Parser.Error.Function.ExpectedLineBreak')
raise ParserError({'row': state.row, 'col': state.col + 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
content = parser.match_to_next_occurence(state, '\n' + ' ' * indent +
keywords.scope_end)
if len(content) > 0:
content = content[1:]
# checking content indentation
lines = content.split('\n')
min_indent = misc.get_block_indent(content)
if min_indent <= indent:
err_msg = lang.text('Parser.Error.Scope.Outdented') % indent
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
code = '\n'.join(i[min_indent:] for i in lines)
return params, code
@staticmethod
def available_modes(parser):
modes = {keywords.func_proc_src_after,
{'doc': keywords.func_proc_doc_after,
'web': keywords.func_proc_web_after
}.get(parser.target, '')}
return modes
def parse(self, parser, state):
params, code = PfDefFunction.parse_function(parser, state)
# retrieve dynamic function
fname = keywords.kw_dyn_function % params['name']
if state.has_function(fname):
is_new = False
func = state.get_function_by_name(fname)
else:
is_new = True
func = PfDynamicFunction()
# update parameters and code
if not func.update_config(params):
err_msg = lang.text('Parser.Error.Function.ParamMismatch')
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
func.update_function(parser, state, params, code)
if is_new and params['mode'] in PfDefFunction.available_modes(parser):
state.add_function(fname, func)
return ''
pass
class PfDefEnvironment(ParserFunction):
def parse(self, parser, state):
params, code = PfDefFunction.parse_function(parser, state)
# retrieve dynamic function
fname = keywords.kw_dyn_environment_begin % params['name']
if state.has_function(fname):
is_new = False
func = state.get_function_by_name(fname)
else:
is_new = True
func = PfDynamicEnvironment()
# addition limits
if len(params['args']) == 0:
err_msg = lang.text('Parser.Error.Environment.TooFewArgs')
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
if not params['args'][-1]['verbatim']:
err_msg = lang.text('Parser.Error.Environment.LastMustVerbatim')
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
# update parameters and code
if not func.update_config(params):
err_msg = lang.text('Parser.Error.Function.ParamMismatch')
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
func.update_function(parser, state, params, code)
if is_new and params['mode'] in PfDefFunction.available_modes(parser):
state.add_function(fname, func)
return ''
pass
class PfEnvironmentBegin(ParserFunction):
def parse(self, parser, state):
err_msg = lang.text('Parser.Error.Environment.UnknownEnvironment')
raise ParserError({'row': state.row, 'col': state.col, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
pass
class PfEnvironmentEnd(ParserFunction):
def parse(self, parser, state):
err_msg = lang.text('Parser.Error.Scope.UnexpectedEndMarker') %\
keywords.kw_environment_end
raise ParserError({'row': state.row, 'col': state.col - 1, 'file':
state.filename, 'path': state.filepath,
'cause': err_msg})
pass
class PfDynamicFunction(ParserFunction):
def __init__(self):
self.function_name = None
self.args_vb = None # verbatim parse or not
self.mode = None
self.py_func = None
self.raw_func = None
self.autobreak = None
return
def update_config(self, params):
"""Update configuration of current function. Newly updated parameters
must be the same as the original.
@param params(dict(...)) taken from PfDefFunction
@returns bool True if modification succeeded"""
if self.function_name is not None:
if self.function_name != params['name']:
return False
if len(self.args_vb) != len(params['args']):
return False
for i in range(0, len(self.args_vb)):
if self.args_vb[i] != params['args']['verbatim']:
return False
if self.mode != params['mode']:
return False
if self.autobreak != params['autobreak']:
return False
return True
self.function_name = params['name']
self.args_vb = list(i['verbatim'] for i in params['args'])
self.mode = params['mode']
self.autobreak = params['autobreak']
return True
def update_function(self, parser, state, params, code):
fname = params['name']
args = list(i['name'] for i in params['args'])
if params['lang'] == keywords.func_lang_py:
if self.py_func is not None:
err_msg = lang.text('Parser.Error.Function.ConflictCode')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
self.py_func = jitfunction.JitFunctionPy(fname, args, code)
elif params['lang'] == keywords.func_lang_raw:
if self.raw_func is not None:
err_msg = lang.text('Parser.Error.Function.ConflictCode')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
self.raw_func = jitfunction.JitFunctionRaw(fname, args, code)
return
def check_do_exec(self, state):
return (state.target, self.mode) in {
('ctx', keywords.func_proc_src_after),
('doc', keywords.func_proc_doc_after),
('web', keywords.func_proc_web_after)}
def process_break(self, parser, state):
# process autobreak
res = parser.flush_auto_break(state)
if state.autobreak.enabled:
if self.autobreak == keywords.func_brk_wrapinblk:
res += parser.open_auto_break(state)
elif self.autobreak == keywords.func_brk_leaveblk:
res += parser.close_auto_break(state)
return res
def call_function(self, parser, state, do_exec, args, res):
tmp = ''
if do_exec:
if self.raw_func is not None:
tmp = str(self.raw_func.eval(*args))
elif self.py_func is not None:
tmp = str(self.py_func.eval(*args))
if state.target == 'ctx':
tmp = parser.parse_blob(state, tmp)
res += tmp
return res
def parse(self, parser, state):
do_exec = self.check_do_exec(state)
res = self.process_break(parser, state)
# load arguments
prev_abs = state.autobreak.enabled
state.autobreak.enabled = False
args = []
for verbatim in self.args_vb:
if verbatim:
args.append(parser.match_verbatim_scope(state))
else:
args.append(parser.match_parsable_scope(state))
state.autobreak.enabled = prev_abs
# call function
res = self.call_function(parser, state, do_exec, args, res)
# execute this if not executing
if not do_exec:
res += keywords.kw_dyn_function % self.function_name + ''.join(
(keywords.scope_begin + i + keywords.scope_end)
for i in args)
state.exec_count -= 1
return res
pass
class PfDynamicEnvironment(ParserFunction):
def __init__(self):
self.function_name = None
self.args_vb = [] # verbatim parse or not
self.mode = None
self.py_func = None
self.raw_func = None
return
def update_config(self, params):
return PfDynamicFunction.update_config(self, params)
def update_function(self, parser, state, params, code):
return PfDynamicFunction.update_function(self, parser, state,
params, code)
def parse(self, parser, state):
do_exec = PfDynamicFunction.check_do_exec(self, state)
res = PfDynamicFunction.process_break(self, parser, state)
# load arguments
prev_abs = state.autobreak.enabled
state.autobreak.enabled = False
args = []
indent = parser.get_current_indent(state)
for verbatim in self.args_vb[:-1]:
if verbatim:
args.append(parser.match_verbatim_scope(state))
else:
args.append(parser.match_parsable_scope(state))
if state.document[state.pos] != '\n':
err_msg = lang.text('Parser.Error.Environment.ExpectedLineBreak')
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
state.shift_forward('\n')
fn_end = keywords.kw_dyn_environment_end % self.function_name
args.append(parser.match_to_next_occurence(state, '\n' + ' ' * indent +
fn_end, sub_display_error=fn_end))
state.autobreak.enabled = prev_abs
# process indentation on last one
has_indent = args[-1]
args[-1] = '\n'.join(i[indent:] for i in has_indent.split('\n'))
# call function
res = PfDynamicFunction.call_function(self, parser, state,
do_exec, args, res)
# execute this if not executing
if not do_exec:
res += keywords.kw_dyn_environment_begin % self.function_name
res += ''.join((keywords.scope_begin + i + keywords.scope_end) for
i in args[:-1])
res += '\n' + args[-1] + '\n' + fn_end
state.exec_count -= 1
return res
pass
class PfParagraph(ParserFunction):
def parse(self, parser, state):
output = parser.open_auto_break(state, reopen=True)
prev_abs = state.autobreak.enabled
state.autobreak.enabled = False
tmp = parser.match_parsable_scope(state)
state.autobreak.mode = prev_abs
output += tmp + parser.close_auto_break(state)
return output
pass
class PfMathMode(ParserFunction):
def parse(self, parser, state):
# process auto break
res = parser.flush_auto_break(state)
if state.autobreak.enabled:
res += parser.open_auto_break(state)
# retrieve contents
escaped = False
found = False
output = ''
mark = keywords.ch_esc_chars['dollar']['default']
while state.pos < len(state.document):
ch = state.document[state.pos]
if ch == keywords.ch_escape:
escaped = not escaped
elif ch == mark:
if not escaped:
state.shift_forward(ch)
found = True
break
else:
escaped = False
output += ch
state.shift_forward(ch)
# no end marker
if not found:
err_msg = lang.text('Parser.Error.Scope.ExpectedEndMarker') % mark
raise ParserError({'row': state.row, 'col': state.col - 1,
'file': state.filename, 'path': state.
filepath, 'cause': err_msg})
# process
if state.target == 'doc':
res += keywords.math_mode_doc % output
elif state.target == 'web':
res += keywords.math_mode_web % (output.replace('<', '\\lt').
replace('>', '\\rt'))
else:
res += keywords.math_mode_ctx % output
return res
pass
```
|
{
"source": "jeffswt/hitwifi-automata",
"score": 2
}
|
#### File: jeffswt/hitwifi-automata/libhitwa.py
```python
import base64
import cryptography.hazmat.primitives.ciphers
import cryptography.hazmat.backends
import json
import os
import platform
import requests
import re
import socket
import subprocess
import urllib
__all__ = [
'get_msg_lang',
'transcript_data',
'net_login',
'net_logout',
'NetworkConnectivityBuffer',
]
def get_msg_lang(msg, locale='en'):
""" get_msg_lang(msg, locale='en'): Retrieve language string for codename
@param msg <- str: the codename
@param locale <- str: language to retrieve
@return msg -> str: translated string """
langs = {'en', 'zh', 'jp'}
lang_list = {
'ALREADY_ONLINE': {
'en': 'The device is already connected to the Internet.',
'zh': '设备已连接到互联网。',
'jp': 'このデバイスはすでにインターネットに接続されています。',
},
'ALREADY_OFFLINE': {
'en': 'The device is already disconnected.',
'zh': '用户已登出校园网。',
'jp': 'このデバイスはすでにインターネットから切断されています。',
},
'NO_NETWORK': {
'en': 'The device is not properly connected to HIT campus '
'network (or any).',
'zh': '用户不处于校园网环境中。',
'jp': 'このデバイスがキャンパスネットワークに適切に接続'
'されていません。',
},
'MISSING_EPORTAL': {
'en': 'Cannot locate the ePortal address.',
'zh': '无法获取认证服务器地址。',
'jp': 'イーポータルアドレスが見つかりません。',
},
'NO_REPONSE': { # No Response (
'en': 'ePortal server did not response.',
'zh': '认证服务器未应答。',
'jp': '認証サーバーが応答しませんでした。',
},
'EMPTY_USERNAME': {
'en': 'Username should not be empty.',
'zh': '用户名不得为空。',
'jp': 'ユーザー名は空にしないでください。',
},
'EMPTY_PASSWORD': {
'en': 'Password should not be empty.',
'zh': '密码不得为空。',
'jp': 'パスワードは空にしないでください。',
},
'INCORRECT_USERNAME': {
'en': 'The user does not exist.',
'zh': '用户名不存在。',
'jp': 'ユーザーが存在しません。',
},
'INCORRECT_PASSWORD': {
'en': 'The password is incorrect.',
'zh': '密码输入错误。',
'jp': 'パスワードが間違っています。',
},
'LOGIN_SUCCESS': {
'en': 'Successfully connected to HIT campus network!',
'zh': '成功连接到校园网!',
'jp': 'ログインに成功しました!',
},
'LOGOUT_SUCCESS': {
'en': 'Successfully disconnected!',
'zh': '已登出校园网。',
'jp': 'ログアウトしました!',
},
'LOGOUT_FAILED': {
'en': 'Failed to logout (what the ****)',
'zh': '登出失败 (smg)',
'jp': 'ログアウトに失敗しました (なに)',
},
}
if msg not in lang_list:
return msg
if locale not in langs:
locale = 'en'
return lang_list[msg][locale]
def parse_url(url):
""" parse_url(url): Parse URL according to urllib.parse
@param url <- str: the URL string
@return components -> dict(str: *): the URL components """
_1, _2, _3, _4, _5, _6 = urllib.parse.urlparse(url)
_5 = urllib.parse.parse_qs(_5, keep_blank_values=True)
return {
'scheme': _1,
'netloc': _2,
'path': _3,
'params': _4,
'query': _5,
'fragment': _6,
}
def join_query(queries):
""" join_query(queries): Join urllib queries
@param queries <- dict(str: list(str)): queries parsed by
urllib.parse.parse_qs
@return qs -> str: the query string """
return '&'.join('&'.join(urllib.parse.quote(i) + '=' +
urllib.parse.quote(j) for j in queries[i])
for i in queries)
def ping(host, timeout=1.0):
""" ping(host, timeout): Ping given host and return if accessible
@param host <- str: hostname
@param time <- float: timeout in seconds """
if platform.system().lower() == 'windows':
params = ['ping.exe', host, '-n', '1',
'-w', str(int(timeout * 1000))]
else:
params = ['ping', host, '-c', '1',
'-w', str(timeout)]
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(
args=params,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=si
)
try:
ret = proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
return False
return ret == 0
def transcript_data(data, encrypt=True):
if encrypt:
backend = cryptography.hazmat.backends.default_backend()
key = os.urandom(32) # key
iv = os.urandom(16) # initialization vector
# Generate encrypted message
cipher = cryptography.hazmat.primitives.ciphers.Cipher(
cryptography.hazmat.primitives.ciphers.algorithms.AES(key),
cryptography.hazmat.primitives.ciphers.modes.CBC(iv),
backend=backend)
encryptor = cipher.encryptor()
data = data.encode('utf-8')
padded_data = data + b'\x00' * (len(data) // 16 * 16 + 16 - len(data))
message = encryptor.update(padded_data) + encryptor.finalize()
# pack into data
blob = key + iv + (str(len(data)) + ';').encode('utf-8') + message
return base64.b64encode(blob).decode('utf-8')
else:
blob = base64.b64decode(data.encode('utf-8'))
# unpack values
key = blob[:32]
blob = blob[32:]
iv = blob[:16]
blob = blob[16:]
dsize = blob.split(b';')[0]
message = blob[(len(dsize) + 1):]
# retrieve message
backend = cryptography.hazmat.backends.default_backend()
cipher = cryptography.hazmat.primitives.ciphers.Cipher(
cryptography.hazmat.primitives.ciphers.algorithms.AES(key),
cryptography.hazmat.primitives.ciphers.modes.CBC(iv),
backend=backend)
decryptor = cipher.decryptor()
text = decryptor.update(message) + decryptor.finalize()
return text[:int(dsize.decode('utf-8'))].decode('utf-8')
return
def net_login(username, password):
""" net_login(username, password): Login to HIT campus network
@param username <- str: the 10-digit username you would enter
@param password <- str: the password you specified
@return status -> bool: True if connected to network
@return message -> str: describes the reason related to status """
urls = {
'redirect': 'http://www.msftconnecttest.com/redirect',
'auth-ip': '172.16.17.32:8080',
'auth-domain': 'http://172.16.17.32:8080',
'auth-index': '/eportal/index.jsp',
'auth-login': '/eportal/InterFace.do?method=login'
}
# retrieve access point names
try:
req = requests.get(urls['redirect'])
req.encoding = 'utf-8'
except Exception as err:
return False, 'NO_NETWORK'
if 'https://go.microsoft.com/fwlink/' in req.text:
return True, 'ALREADY_ONLINE'
probable_urls = re.findall(r'[\'\"]([^\'\"]*?)[\'\"]', req.text)
eportal_url = list(filter(lambda x: x.startswith(urls['auth-domain'] +
urls['auth-index']),
probable_urls))
if len(eportal_url) == 0:
return False, 'MISSING_EPORTAL'
eportal_url = eportal_url[0]
# generate login query
post_query = {
'userId': [username],
'password': [password],
'service': [''],
'queryString': [urllib.parse.quote(join_query(parse_url(
eportal_url)['query']))],
'operatorPwd': [''],
'operatorUserId': [''],
'validcode': [''],
'passwordEncrypt': ['<PASSWORD>'],
}
post_string = join_query(post_query)
headers = {
'Host': urls['auth-ip'],
'Origin': urls['auth-domain'],
'Referer': eportal_url,
'User-Agent': ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' +
'AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/75.0.3770.142 Safari/537.36'),
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Content-Length': str(len(post_string)),
}
try:
req = requests.post(urls['auth-domain'] + urls['auth-login'],
data=post_string,
headers=headers)
req.encoding = 'utf-8'
except Exception as err:
return False, 'NO_NETWORK'
if type(req.text) != dict:
try:
result = json.loads(req.text)
except Exception as err:
return False, 'NO_REPONSE'
else:
result = req.text
if result.get('result', 'fail') != 'success':
msg = result.get('message', '')
info_map = {
('用户名不能为空', 'EMPTY_USERNAME'),
('用户不存在', 'INCORRECT_USERNAME'),
('用户密码错误', 'INCORRECT_PASSWORD'),
('密码不能为空', 'EMPTY_PASSWORD'),
}
for _ in info_map:
if msg.startswith(_[0]):
return False, _[1]
return False, msg
return True, 'LOGIN_SUCCESS'
def net_logout():
""" net_logout(): Logout from HIT campus network
@return status -> bool: True if disconnected from Internet
@return message -> str: describes the reason related to status """
urls = {
'auth-ip': '172.16.17.32:8080',
'auth-domain': 'http://172.16.17.32:8080',
'auth-login': '/eportal/InterFace.do?method=logout'
}
payload = ''
headers = {
'Host': urls['auth-ip'],
'Origin': urls['auth-domain'],
'User-Agent': ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' +
'AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/75.0.3770.142 Safari/537.36'),
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Content-Length': str(len(payload)),
}
try:
req = requests.post(urls['auth-domain'] + urls['auth-login'],
data=payload,
headers=headers)
req.encoding = 'utf-8'
except Exception as err:
return True, 'NO_NETWORK'
if type(req.text) != dict:
try:
result = json.loads(req.text)
except Exception as err:
return False, 'NO_REPONSE'
else:
result = req.text
if (result.get('result', '') == 'fail' and
result.get('message', '') == '用户已不在线'):
return True, 'ALREADY_OFFLINE'
if result.get('result', 'fail') != 'success':
return False, result.get('message', 'LOGOUT_FAILED')
return True, 'LOGOUT_SUCCESS'
class NetworkConnectivityBuffer:
def __init__(self):
self.buffers = []
self.buffer_size = 3
return
def check_connectivity(self, timeout=1.0):
www_addr = ('www.baidu.com', 80)
cnet_addr = ('172.16.17.32', 8080)
result = {
'any-network': False,
'campus-network': False,
'internet': False,
}
# determine if a network is connected
try:
socket.create_connection(www_addr, timeout=timeout)
result['any-network'] = True
except OSError:
return result
# check if is under campus network
try:
socket.create_connection(cnet_addr, timeout=timeout)
result['campus-network'] = True
except OSError:
pass
# check if is connected to internet
if ping(www_addr[0], timeout=timeout):
result['internet'] = True
return result
def update_status(self):
res = self.check_connectivity(timeout=0.5)
res_hash = '%d,%d,%d' % (res['any-network'], res['campus-network'],
res['internet'])
res_map = {
'1,0,1': 'wan-connected', # Wide Area Network
'1,1,0': 'can-disconnected', # Campus Area Network
'1,1,1': 'can-connected',
}
res = res_map.get(res_hash, 'no-network')
self.buffers.append(res)
if len(self.buffers) > self.buffer_size:
self.buffers.pop(0)
return
def get_status(self):
if len(self.buffers) < self.buffer_size:
return 'detecting'
res = 0
ranking = {
'paused': -2,
'detecting': -1,
'no-network': 0,
'wan-connected': 1,
'can-disconnected': 2,
'can-connected': 3,
}
inv_ranking = dict((ranking[i], i) for i in ranking)
for stat in self.buffers:
res = max(res, ranking[stat])
return inv_ranking[res]
def clear_status(self):
self.buffers = []
return
pass
```
|
{
"source": "jeffswt/yumina",
"score": 3
}
|
#### File: yumina/yumina/renderer.py
```python
import os
import subprocess
import threading
__global_phonogram_renderer = None
class PhonogramRenderer:
def __init__(self):
# javac -encoding utf-8 -classpath kuromoji.jar TokenizerCaller.java
# java -classpath ./kuromoji.jar;./TokenizerCaller.class; TokenizerCaller
self.proc = subprocess.Popen(
['java', '-classpath', './kuromoji.jar;./TokenizerCaller.class;', 'TokenizerCaller'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.th_lock = threading.Lock()
return
def __communicate__(self, cont, noread=False):
lines = []
gl_lock = threading.Lock()
gl_lock.acquire()
def out_thr(proc):
if noread:
gl_lock.release()
return
while True:
s = proc.stdout.readline().decode('utf-8', 'ignore').replace('\r', '').replace('\n', '')
if s == '__ANALYSIS_COMPLETE__':
break
lines.append(s)
gl_lock.release()
return
threading.Thread(target=out_thr, args=[self.proc]).start()
self.proc.stdin.write(cont.encode('utf-8', 'ignore'))
self.proc.stdin.flush()
gl_lock.acquire()
gl_lock.release()
return lines
def __match_romaji(self, a, b):
# align katakana to kanji, marking pronunciation
def kana_eq(p, q): # p is char, q is katakana
if p == q:
return True
if '\u3040' <= p <= '\u309f':
return chr(ord(p) - ord('\u3040') + ord('\u30a0')) == q
return False
# convert strings
M = a
N = b
m = len(M)
n = len(N)
R = []
# dynamic programming
dist = list(list((0, '') for j in range(0, n + 2)) for i in range(0, m + 2))
dist[0][0] = (0, '')
for i in range(0, m + 1):
dist[i][0] = (i, M[:i], ' ' * i)
for j in range(0, n + 1):
dist[0][j] = (j, ' ' * j, N[:j])
for i in range(1, m + 1):
for j in range(1, n + 1):
if kana_eq(M[i-1], N[j-1]):
dist[i][j] = (dist[i-1][j-1][0], dist[i-1][j-1][1] + M[i-1], dist[i-1][j-1][2] + N[j-1])
else:
mat_1 = dist[i][j-1][0] + 1
mat_2 = dist[i-1][j][0] + 1
mat_3 = dist[i-1][j-1][0] + 1
mat = min(mat_1, mat_2, mat_3)
if mat_2 == mat: # skip one
dist[i][j] = (dist[i-1][j][0] + 1, dist[i-1][j][1] + M[i-1], dist[i-1][j][2] + ' ')
elif mat_1 == mat: # add one
dist[i][j] = (dist[i][j-1][0] + 1, dist[i][j-1][1] + ' ', dist[i][j-1][2] + N[j-1])
else:
dist[i][j] = (dist[i-1][j-1][0] + 1, dist[i-1][j-1][1] + M[i-1], dist[i-1][j-1][2] + N[j-1])
_, s_src, s_prn = dist[m][n]
mode = 'kana' if kana_eq(s_src[0], s_prn[0]) else 'kanji'
s_src += '?'
s_prn += '?'
buff_l = s_src[0]
buff_r = s_prn[0]
for i in range(1, len(s_src)):
if mode == 'kanji' and (kana_eq(s_src[i], s_prn[i]) or s_src[i] == '?'):
R.append(('phonogram', buff_l.replace(' ', ''), buff_r.replace(' ', '')))
buff_l = buff_r = ''
mode = 'kana'
elif mode == 'kana' and (not kana_eq(s_src[i], s_prn[i]) or s_src[i] == '?'):
R.append(('regular', buff_l))
buff_l = buff_r = ''
mode = 'kanji'
buff_l += s_src[i]
buff_r += s_prn[i]
return R
def __direct_convert(self, cont, hiragana):
if '\n' in cont:
raise ValueError('should not contain line breaks')
q1 = []
for s in self.__communicate__(cont + '\n'):
a, b = tuple(s.split('__SPLIT__'))
if a == b:
q1.append(('regular', a))
elif b == '':
q1.append(('regular', a))
else:
q2 = self.__match_romaji(a, b)
for i in q2:
q1.append(i)
# convert katakana to hiragana
for i in range(0, len(q1)):
if q1[i][0] == 'phonogram':
if len(q1[i]) < 2:
q1[i] = ('regular', q1[i][1])
if hiragana:
q2 = list(q1[i][2])
for j in range(0, len(q2)):
if q2[j] not in {'ー'}:
q2[j] = chr(ord(q2[j]) - ord('\u30a0') + ord('\u3040'))
q1[i] = (q1[i][0], q1[i][1], ''.join(q2))
# joining similar items
q2 = []
for i in q1:
if len(q2) > 0 and i[0] == q2[-1][0] == 'regular':
q2[-1] = ('regular', q2[-1][1] + i[1])
else:
q2.append(i)
return q2
def convert(self, content, hiragana=False):
self.th_lock.acquire()
res = self.__direct_convert(content, hiragana)
self.th_lock.release()
return res
def close(self):
global __global_phonogram_renderer
self.proc.terminate()
self.proc.wait()
__global_phonogram_renderer = None
return
pass
def get_phonogram_renderer():
global __global_phonogram_renderer
if __global_phonogram_renderer == None:
__global_phonogram_renderer = PhonogramRenderer()
return __global_phonogram_renderer
def phoneticize(articles, phonogram_renderer=None, hiragana=False):
if phonogram_renderer == None:
phonogram_renderer = get_phonogram_renderer()
q1 = []
for line in articles:
if line[0] == 'break':
q1.append(line)
else:
q1.append(('line', phonogram_r.convert(line[1][0][1], hiragana=hiragana)))
return q1
for i in {'kuromoji.jar', 'TokenizerCaller.class'}:
if not os.path.exists(i):
raise RuntimeError('requires "%s" to execute' % i)
```
#### File: yumina/yumina/syosetu.py
```python
import bs4
import json
import os
import re
import requests
import sqlite3
from . import renderer
def get_webpage(*args, **kwargs):
""" get_webpage(...) -- request webpage content / text """
return requests.get(*args, **kwargs).text.encode('ISO-8859-1').decode('utf-8')
def map_num(s):
""" map_num(str) -- change all full-width characters to half-width. """
s = s.replace('0', '0')\
.replace('1', '1')\
.replace('2', '2')\
.replace('3', '3')\
.replace('4', '4')\
.replace('5', '5')\
.replace('6', '6')\
.replace('7', '7')\
.replace('8', '8')\
.replace('9', '9')\
.replace('\u3000', ' ')
return s
def get_chapter_list(web_id):
sel_1 = r'<div class="chapter_title">.*?</div>'
sel_2 = r'<dd class="subtitle">\n<a href="/%s/\d+/">.*?</a>\n</dd>' % web_id
q1 = map_num(get_webpage('http://ncode.syosetu.com/%s/' % web_id))
q2 = re.findall('(%s|%s)' % (sel_1, sel_2), q1)
q3 = []
for i in q2:
if re.findall(sel_1, i) != []:
sel_3 = r'^<div class="chapter_title">第(\d+)章 (.*?)</div>$'
j = int(re.sub(sel_3, r'\1', i))
k = re.sub(sel_3, r'\2', i)
q3.append(('chapter_title', j, k))
else:
sel_3 = r'^<dd class="subtitle">\n<a href="/%s/(\d+)/">(.*?)</a>\n</dd>$' % web_id
k = int(re.sub(sel_3, r'\1', i))
l = re.sub(r'^[##].*? (.*?)$', r'\1', re.sub(sel_3, r'\2', i))
q3.append(('subtitle', k, l))
return q3
def get_chapter(web_id, chap_id):
q1 = map_num(get_webpage('http://ncode.syosetu.com/%s/%d/' % (web_id, chap_id)))
q2 = bs4.BeautifulSoup(q1, 'html5lib')
q3 = q2.find_all(id='novel_honbun')[0].text
# stylize paragraphs
q3 = re.sub(r'\n +', r'\n', q3)
q3 = re.sub(r'\n\n+', r'\n\n', q3)
q3 = re.sub(r'(^\n+|\n+$)', r'', q3)
# split into lines
q4 = q3.split('\n')
q5 = []
for i in q4:
if re.findall(r'^ *$', i) != []:
q5.append(('break',))
else:
q5.append(('line', [('regular', i.replace(' ', ''))]))
return q5
class SyosetuDatabase:
def __init__(self, filename, syosetu_id, force_clear=False):
found = os.path.exists(filename)
self.base = sqlite3.connect(filename)
self.cur = self.base.cursor()
self.sid = syosetu_id
if not found or force_clear:
self.cur.execute("DROP TABLE IF EXISTS toc;")
self.cur.execute("DROP TABLE IF EXISTS cont;")
self.cur.execute("""
CREATE TABLE toc (
e_type TEXT,
e_id INTEGER,
e_title TEXT
);""");
self.cur.execute("""
CREATE TABLE cont (
t_idx INTEGER,
t_jpn JSONB,
t_jpn_lit JSONB
);""");
return
def get_contents(self):
q1 = []
for i in self.cur.execute("SELECT * FROM toc;"):
q1.append((i[0], i[1], i[2]))
return q1
def get_chapter_title(self, typ, num):
for i in self.get_contents():
if i[0] == typ and i[1] == num:
return i
return (typ, num, '無題')
def get_contents_chapters_id(self):
q1 = []
for i in self.get_contents():
if i[0] == 'subtitle':
q1.append(i[1])
return sorted(list(set(q1)))
def update_contents(self):
toc = get_chapter_list(self.sid)
self.cur.execute("DELETE FROM toc;")
for i in toc:
self.cur.execute("INSERT INTO toc (e_type, e_id, e_title) VALUES (?, ?, ?)",
(i[0], i[1], i[2]))
return
def get_chapter(self, chap_id):
q1 = []
for i in self.cur.execute("SELECT * FROM cont WHERE t_idx = ?", (chap_id,)):
q1.append(i)
if q1 == []:
return []
q = [[], []]
for num in range(0, 2):
for i in json.loads(q1[0][num + 1]):
if i[0] == 'line':
q[num].append(('line', list(tuple(i) for i in i[1])))
else:
q[num].append(('break',))
return q[0], q[1]
def has_chapter(self, chap_id):
q1 = []
for i in self.cur.execute("SELECT * FROM cont WHERE t_idx = ?", (chap_id,)):
q1.append(i)
return q1 != []
def update_chapter(self, chap_id, phonogram_renderer=None):
chap1 = get_chapter(self.sid, chap_id)
cj1 = json.dumps(chap1)
chap2 = renderer.phoneticize(chap1, phonogram_renderer=phonogram_renderer)
cj2 = json.dumps(chap2)
self.cur.execute("DELETE FROM cont WHERE t_idx = ?;", (chap_id,))
self.cur.execute("INSERT INTO cont (t_idx, t_jpn, t_jpn_lit) VALUES (?, ?, ?)",
(chap_id, cj1, cj2))
return
def update_all(self, phonogram_renderer=None, display_progress_bar=False):
self.update_contents()
self.commit()
ch = self.get_contents_chapters_id()
for i in ch:
if not self.has_chapter(i):
self.update_chapter(i, phonogram_renderer=phonogram_renderer)
self.commit()
if display_progress_bar:
print('%s|%s\r' % (str(i).rjust(4), ('=' * int(i / len(ch) * 70)).ljust(70, '.')), end='')
return
def commit(self):
self.base.commit()
return
def close(self):
self.commit()
self.base.close()
return
pass
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.