blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb8592e0e861914b0b99b4c905db34c43e5a08c8 | e87d0cee1ce41124a808000850d4af5c7cd04f08 | /mark/装饰器/sort改进.py | dea985ce7ebbc0745adb39f2a0de44548fd40082 | [] | no_license | thinkingjxj/Python | 0e8e5f60874c23a30aa8f54412fcf63c453f412a | 9e92717b7871083c833612d220d1991d8d906275 | refs/heads/master | 2021-07-05T23:09:03.610678 | 2018-12-07T03:39:50 | 2018-12-07T03:39:50 | 148,290,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,176 | py | lst = [4, 1, 6, 8, 3, 9, 5]
def sort(iterable, reverse=False):
new = []
for x in iterable:
for i, y in enumerate(new):
flag = x > y if reverse else x < y
if flag:
new.insert(i, x)
break
else:
new.append(x)
return new
print(1, sort(lst))
def sort(iterable, reverse=False):
new = []
for x in iterable:
for i, y in enumerate(new):
flag = x > y if not reverse else x < y
if flag:
new.insert(i, x)
break
else:
new.append(x)
return new
print(2, sort(lst))
def sort(iterable, reverse=False):
def comp(a, b):
flag = a > b if not reverse else a < b
return flag
new = []
for x in iterable:
for i, y in enumerate(new):
if comp(x, y):
new.insert(i, x)
break
else:
new.append(x)
return new
def sort(iterable, reverse=False):
def comp(a, b):
return a > b if not reverse else a < b
new = []
for x in iterable:
for i, y in enumerate(new):
if comp(x, y):
new.insert(i, x)
break
else:
new.append(x)
return new
def comp(a, b, reverse=False):
return a > b if not reverse else a < b
def sort(iterable):
new = []
for x in iterable:
for i, y in enumerate(new):
if comp(x, y, reverse=False):
new.insert(i, x)
break
else:
new.append(x)
return new
def comp(a, b):
return a > b
def sort(iterable, key=comp, reverse=False):
new = []
for x in iterable:
for i, y in enumerate(new):
if comp(x, y):
new.insert(i, x)
break
else:
new.append(x)
return new
def sort(iterable, key=lambda a, b: a < b, reverse=False):
new = []
for x in iterable:
for i, y in enumerate(new):
if key(x, y):
new.insert(i, x)
break
else:
new.append(x)
return new
| [
"[email protected]"
] | |
93721cd2e823ee88f70a194edd43ef814f82160b | ee0c8cc7b86d4dc41469aae59e3632102fedfdb1 | /mysite/blog/forms.py | a59512aba992f8bfe4189065a04edc6bc5ede81e | [] | no_license | Hamza-abughazaleh/blog-django | 1a177aa00d86a34cc49c6ee39f2b068d291a4f12 | 3a4b43b58beb25054e5798f219d7175629009138 | refs/heads/master | 2021-07-20T07:00:37.651344 | 2017-10-25T12:15:58 | 2017-10-25T12:15:58 | 108,093,782 | 0 | 0 | null | 2017-10-25T12:19:14 | 2017-10-24T07:44:17 | Python | UTF-8 | Python | false | false | 690 | py | from django import forms
from blog.models import Post,Comment
class PostForm(forms.ModelForm):
class Meta():
model = Post
fields = ('author','title','text')
widgets = {
'title' : forms.TextInput(attrs={'class':'textinputclass'}),
'text' : forms.Textarea(attrs={'class':'editable medium-editor-textarea postcontent'})
}
class CommentForm(forms.ModelForm):
class Meta():
model = Comment
fields = ('author','text')
widgets = {
'author' : forms.TextInput(attrs={'class':'textinputclass'}),
'text' : forms.Textarea(attrs={'class':'editable medium-editor-textarea'})
}
| [
"[email protected]"
] | |
5d6d354b1359f8d7e8a575a73c06cc26fcfff920 | 72316a1d1a2e0358486d50aeecbac8219ccdf092 | /ietf/utils/aliases.py | edfce0b067795e18127bc2f6ebe1df4accdbbe5b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | algby/ietfdb | 363541941bd6e806bed70891bed4c7f47c9f0539 | 9ff37e43abbecac873c0362b088a6d9c16f6eed2 | refs/heads/master | 2021-01-16T18:57:50.100055 | 2014-09-29T21:16:55 | 2014-09-29T21:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
#
# $Id: aliasutil.py $
#
# Author: Markus Stenberg <[email protected]>
#
"""
Mailing list alias dumping utilities
"""
from django.conf import settings
def rewrite_email_address(email):
""" Prettify the email address (and if it's empty, skip it by
returning None). """
if not email:
return
email = email.strip()
if not email:
return
if email[0]=='<' and email[-1] == '>':
email = email[1:-1]
# If it doesn't look like email, skip
if '@' not in email and '?' not in email:
return
return email
def rewrite_address_list(l):
""" This utility function makes sure there is exactly one instance
of an address within the result list, and preserves order
(although it may not be relevant to start with) """
h = {}
for address in l:
#address = address.strip()
if h.has_key(address): continue
h[address] = True
yield address
def dump_sublist(afile, vfile, alias, emails):
if not emails:
return emails
# Nones in the list should be skipped
emails = filter(None, emails)
# Make sure emails are sane and eliminate the Nones again for
# non-sane ones
emails = [rewrite_email_address(e) for e in emails]
emails = filter(None, emails)
# And we'll eliminate the duplicates too but preserve order
emails = list(rewrite_address_list(emails))
if not emails:
return emails
try:
virtualname = 'xalias-%s' % (alias, )
expandname = 'expand-%s' % (alias)
aliasaddr = '%[email protected]' % (alias, )
vfile.write('%-64s %s\n' % (aliasaddr, virtualname))
afile.write('%-64s "|%s filter %s"\n' % (virtualname+':', settings.POSTCONFIRM_PATH, expandname))
afile.write('%-64s %s\n' % (expandname+':', ', '.join(emails)))
except UnicodeEncodeError:
# If there's unicode in email address, something is badly
# wrong and we just silently punt
# XXX - is there better approach?
print '# Error encoding', alias, repr(emails)
return []
return emails
| [
"[email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0"
] | [email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0 |
cd3f496c90a66b66218d79e1422aaae354538abc | 52e53ee23573b1a50f892089d7418a16bf496422 | /ARCADE/airborne/components/interfaces/gps_monitor/gps_monitor | 1d899d880a1f89d5d0309d809130b1b1c0cad8e9 | [] | no_license | jalishah/airborne | 584bbf4a51f6a087a4295991c79702343f364af7 | c374b775c608de9bf8f7152aa2c79bc9e1ecec6e | refs/heads/master | 2020-04-11T08:03:16.624788 | 2013-09-17T12:04:47 | 2013-09-17T12:04:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | #!/usr/bin/env python
import sys
from gps_data_pb2 import GpsData
from scl import generate_map
socket = generate_map('gps_test')['gps']
gps_data = GpsData()
print 'waiting for gps data'
try:
while True:
str = socket.recv()
gps_data.ParseFromString(str)
print gps_data
except:
print 'terminated by user'
| [
"[email protected]"
] | ||
33fdc1a60605a67f79190f363ea143e287eb099a | 29a18e048de426ed5cbea063774958680a24d3c8 | /acm.py | 93aeed49bd2f6a215057bc22d42ab02f60a427ba | [] | no_license | liliahache/kattis | de5d8e3482f6f8691354aa3fa2dc7fb9366b5d54 | cd4fb965cbad498ac85c07987aad0db3479e31c4 | refs/heads/master | 2022-02-01T01:44:52.596099 | 2018-12-17T23:27:22 | 2018-12-17T23:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | import sys
problems = dict()
for line in map(str.strip, sys.stdin):
if line != '-1':
time, letter, result = line.split()
if letter not in problems:
problems[letter] = {'solved':False, 'time':0}
if result == 'wrong' and not problems[letter]['solved']:
problems[letter]['time'] += 20
elif result == 'right' and not problems[letter]['solved']:
problems[letter]['solved'] = True
problems[letter]['time'] += int(time)
solved = [problems[p]['time'] for p in problems if problems[p]['solved']]
print len(solved), sum(solved) | [
"[email protected]"
] | |
165419196495735d51fb2e3270b93287e7c575b2 | d272b041f84bbd18fd65a48b42e0158ef6cceb20 | /catch/datasets/eyach.py | 075cca99426edd9c899b9e90af8fc1b82212d3fa | [
"MIT"
] | permissive | jahanshah/catch | bbffeadd4113251cc2b2ec9893e3d014608896ce | 2fedca15f921116f580de8b2ae7ac9972932e59e | refs/heads/master | 2023-02-19T13:30:13.677960 | 2021-01-26T03:41:10 | 2021-01-26T03:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | """Dataset with 'Eyach virus' sequences.
A dataset with 24 'Eyach virus' sequences. The virus is segmented and
has 12 segments. Based on their strain and/or isolate, these sequences
were able to be grouped into 2 genomes. Many genomes may have fewer
than 12 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetMultiChrom
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (1|10|11|12|2|3|4|5|6|7|8|9)\]')
m = c.search(header)
if not m:
raise Exception("Unknown or invalid segment in header %s" % header)
seg = m.group(1)
return "segment_" + seg
def seq_header_to_genome(header):
import re
c = re.compile(r'\[genome (.+)\]')
m = c.search(header)
if not m:
raise Exception("Unknown genome in header %s" % header)
return m.group(1)
chrs = ["segment_" + seg for seg in ['1', '10', '11', '12', '2', '3', '4', '5', '6', '7', '8', '9']]
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr,
seq_header_to_genome=seq_header_to_genome)
ds.add_fasta_path("data/eyach.fasta.gz", relative=True)
sys.modules[__name__] = ds
| [
"[email protected]"
] | |
effb18385de7cb5028c07a7bc626515f7186276c | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/dnc/azext_dnc/manual/tests/latest/preparers.py | 53aa6614e7eb6af55ac6829dfef768e3bd1fbe08 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 4,962 | py | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import os
from datetime import datetime
from azure.cli.testsdk.scenario_tests import SingleValueReplacer
from azure.cli.testsdk.preparers import NoTrafficRecordingPreparer
from azure.cli.testsdk.exceptions import CliTestError
from azure.cli.testsdk.reverse_dependency import get_dummy_cli
KEY_RESOURCE_GROUP = 'rg'
KEY_VIRTUAL_NETWORK = 'vnet'
KEY_VNET_SUBNET = 'subnet'
KEY_VNET_NIC = 'nic'
class VirtualNetworkPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest.vn',
parameter_name='virtual_network',
resource_group_name=None,
resource_group_key=KEY_RESOURCE_GROUP,
dev_setting_name='AZURE_CLI_TEST_DEV_VIRTUAL_NETWORK_NAME',
random_name_length=24, key=KEY_VIRTUAL_NETWORK):
if ' ' in name_prefix:
raise CliTestError(
'Error: Space character in name prefix \'%s\'' % name_prefix)
super(VirtualNetworkPreparer, self).__init__(
name_prefix, random_name_length)
self.cli_ctx = get_dummy_cli()
self.parameter_name = parameter_name
self.key = key
self.resource_group_name = resource_group_name
self.resource_group_key = resource_group_key
self.dev_setting_name = os.environ.get(dev_setting_name, None)
def create_resource(self, name, **_):
if self.dev_setting_name:
return {self.parameter_name: self.dev_setting_name, }
if not self.resource_group_name:
self.resource_group_name = self.test_class_instance.kwargs.get(
self.resource_group_key)
if not self.resource_group_name:
raise CliTestError("Error: No resource group configured!")
tags = {'product': 'azurecli', 'cause': 'automation',
'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}
if 'ENV_JOB_NAME' in os.environ:
tags['job'] = os.environ['ENV_JOB_NAME']
tags = ' '.join(['{}={}'.format(key, value)
for key, value in tags.items()])
template = 'az network vnet create --resource-group {} --name {} --subnet-name default --tag ' + tags
self.live_only_execute(self.cli_ctx, template.format(
self.resource_group_name, name))
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: name}
def remove_resource(self, name, **_):
# delete vnet if test is being recorded and if the vnet is not a dev rg
if not self.dev_setting_name:
self.live_only_execute(
self.cli_ctx,
'az network vnet delete --name {} --resource-group {}'.format(name, self.resource_group_name))
class VnetSubnetPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest.vn',
parameter_name='subnet',
resource_group_key=KEY_RESOURCE_GROUP,
vnet_key=KEY_VIRTUAL_NETWORK,
address_prefixes="11.0.0.0/24",
dev_setting_name='AZURE_CLI_TEST_DEV_VNET_SUBNET_NAME',
key=KEY_VNET_SUBNET):
if ' ' in name_prefix:
raise CliTestError(
'Error: Space character in name prefix \'%s\'' % name_prefix)
super(VnetSubnetPreparer, self).__init__(name_prefix, 15)
self.cli_ctx = get_dummy_cli()
self.parameter_name = parameter_name
self.key = key
self.resource_group = [resource_group_key, None]
self.vnet = [vnet_key, None]
self.address_prefixes = address_prefixes
self.dev_setting_name = os.environ.get(dev_setting_name, None)
def create_resource(self, name, **_):
if self.dev_setting_name:
return {self.parameter_name: self.dev_setting_name, }
if not self.resource_group[1]:
self.resource_group[1] = self.test_class_instance.kwargs.get(
self.resource_group[0])
if not self.resource_group[1]:
raise CliTestError("Error: No resource group configured!")
if not self.vnet[1]:
self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0])
if not self.vnet[1]:
raise CliTestError("Error: No vnet configured!")
self.test_class_instance.kwargs[self.key] = 'default'
return {self.parameter_name: name}
def remove_resource(self, name, **_):
pass
| [
"[email protected]"
] | |
d21e8fa14b9a62887e0214c8d267414541c86784 | cafa52c05f020af31985cfd1b8e2c676ea6e3baa | /lib/Chipseq/croo.py | 8bd340dd55c88a7b03738de398e054961eebce23 | [
"Apache-2.0"
] | permissive | shengqh/ngsperl | cd83cb158392bd809de5cbbeacbcfec2c6592cf6 | 9e418f5c4acff6de6f1f5e0f6eac7ead71661dc1 | refs/heads/master | 2023-07-10T22:51:46.530101 | 2023-06-30T14:53:50 | 2023-06-30T14:53:50 | 13,927,559 | 10 | 9 | Apache-2.0 | 2018-09-07T15:52:27 | 2013-10-28T14:07:29 | Perl | UTF-8 | Python | false | false | 2,231 | py | import argparse
import glob
import logging
import os
DEBUG=False
NotDEBUG=not DEBUG
parser = argparse.ArgumentParser(description="Perform croo to retrive wdl result.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input wdl result folder', required=NotDEBUG)
parser.add_argument('-n', '--name', action='store', nargs='?', help='Input sample name', required=NotDEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output folder", required=NotDEBUG)
parser.add_argument('--croo', action='store', nargs='?', default="croo", help='Input croo command', required=NotDEBUG)
parser.add_argument('--out_def_json', action='store', nargs='?', default="croo", help='Input output definition JSON file for a WDL file', required=NotDEBUG)
args = parser.parse_args()
if DEBUG:
args.input = "/workspace/shengq2/20210522_atacseq_6314_human_encode/encode_atacseq/result/IFNg_Rep_1/atac"
args.name = "IFNg_Rep_1"
args.output = "/workspace/shengq2/20210522_atacseq_6314_human_encode/encode_atacseq_croo/result/IFNg_Rep_1"
args.croo = "singularity exec -c -B /gpfs52/data:/data,/workspace -e /data/cqs/softwares/singularity/cqs_encode.sif croo"
args.out_def_json = "/data/cqs/softwares/encode/atac-seq-pipeline/atac.croo.v5.json"
logger = logging.getLogger('croo')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
subfolders = [ f.path for f in os.scandir(args.input) if f.is_dir() ]
metafiles = [os.path.join(sf, "metadata.json") for sf in subfolders if os.path.exists(os.path.join(sf, "metadata.json"))]
if len(metafiles) > 1:
raise Exception("Multiple metadata.json found: %s" % ",".join(metafiles))
elif len(metafiles) == 0:
raise Exception("No metadata.json found: %s" % args.input)
cmd = "%s --method copy --out-dir %s %s" % (args.croo, args.output, metafiles[0])
logger.info(cmd)
os.system(cmd)
bamfiles = glob.glob(args.output + "/**/*.bam", recursive = True)
for bamfile in bamfiles:
cmd = "samtools index %s " % bamfile
logger.info(cmd)
os.system(cmd)
logger.info("done") | [
"[email protected]"
] | |
2df691e03048b0c0af2533b7324898f4e1754d64 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03804/s751492495.py | cb9208249e76e540c4b8a33202d935417cb92369 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | n, m = map(int,input().split())
a = [input() for _ in range(n)]
b = [input() for _ in range(m)]
for i in range(n - m + 1):
for j in range(n - m + 1):
for k in range(m):
#print(a[i+k][j:j+m], b[k])
if a[i+k][j:j+m] != b[k]:
break
else:
print('Yes')
exit()
print('No')
| [
"[email protected]"
] | |
82ba3fd7d3c8dcd9a1e0351bd94c388c06d779c4 | 876b629258e3752c7986807c48e7cf82c93832f0 | /python/mltraining/linear_regression/exponential_regression.py | 88c36dd208b692e775136988164f26600818f2d3 | [
"MIT"
] | permissive | imjoseangel/100-days-of-code | 45f736b0e529f504926faded6b3a18bc05719698 | bff90569033e2b02a56e893bd45727125962aeb3 | refs/heads/devel | 2022-05-02T16:10:59.273585 | 2022-03-20T12:19:05 | 2022-03-20T12:19:05 | 174,882,423 | 3 | 4 | MIT | 2022-03-20T12:19:06 | 2019-03-10T21:20:43 | HTML | UTF-8 | Python | false | false | 623 | py | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
# Create a data set for analysis
x, y = make_regression(n_samples=500, n_features=1, noise=25, random_state=0)
y = np.exp((y + abs(y.min())) / 75)
# Split the data set into testing and training data
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
# Plot the data
sns.set_style("darkgrid")
sns.regplot(x_test, y_test, fit_reg=False)
# Remove ticks from the plot
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.show()
| [
"[email protected]"
] | |
75ecc7d1b2fa59f883577bd379697ee9b13b6bd6 | ce07ccf78739a768971f393222fdca4a56315241 | /employee_management/employee_management/report/wallet_transaction_vivek_report/wallet_transaction_vivek_report.py | f1b2c22ab35a29b5703d75b0d2bdc844066d6ca5 | [
"MIT"
] | permissive | Gdinesh03/Frappe | 563e0ddbe925be536f65f925787ed321a6098c0d | efd2d1568b6f5b8a4e0ff31e06a415c717a3d32a | refs/heads/master | 2023-08-27T19:24:12.024442 | 2021-09-14T07:04:27 | 2021-09-14T07:04:27 | 406,260,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | # Copyright (c) 2013, Gopi and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = [], []
if not filters: filters={}
columns=get_columns()
data=customer_report(filters)
return columns, data, None,
def get_columns():
columns = [
"Customer Name" +":Data:120",
"Payment Status" + ":Data:120",
# "Shipping Status" + ":Data:120",
"Date" + ":Date:120",
"Amount" + ":Data:120"
]
return columns
def customer_report(filters):
condition=''
if filters.get('customer_name'):
condition+=' and customer_name ="%s"' % filters.get('customer_name')
if filters.get('payment_type'):
condition+=' and payment_type ="%s"' % filters.get('payment_type')
string = frappe.db.sql('''select customer_name,payment_type,date,amount from `tabWallet Transaction Vivek` where docstatus=1 {condition}'''.format(condition=condition),as_list=1)
return string
| [
"[email protected]"
] | |
8c853fb9a3d4490455aa94d70d2632b9b7e9f71e | 536538af28cfe40e10ff1ce469cd0f81e8b3a8fe | /populating_next_right_pointers_in_each_node_II.py | 4c6ade6a23ab25051cb6f92917848860ec9d358b | [] | no_license | ShunKaiZhang/LeetCode | 7e10bb4927ba8581a3a7dec39171eb821c258c34 | ede2a2e19f27ef4adf6e57d6692216b8990cf62b | refs/heads/master | 2021-09-01T07:41:03.255469 | 2017-12-25T19:22:18 | 2017-12-25T19:22:18 | 104,136,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # python3
# Follow up for problem "Populating Next Right Pointers in Each Node".
# What if the given tree could be any binary tree? Would your previous solution still work?
# Note:
# You may only use constant extra space.
# For example,
# Given the following binary tree,
# 1
# / \
# 2 3
# / \ \
# 4 5 7
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ \
# 4-> 5 -> 7 -> NULL
# My solution
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
level_next = {}
def search(node, level):
if node is None:
return
if level not in level_next:
node.next = None
else:
node.next = level_next[level]
level_next[level] = node
search(node.right, level + 1)
search(node.left, level + 1)
return
if root is None:
return None
search(root, 0)
| [
"[email protected]"
] | |
7a190e07ebcd6540f99582f00f9779afe5ab9716 | ca308829e5e43619a736e98b00ae0297f57e3d5c | /tests/_rooster_tests.py | f6c83d3f756ce7c38854d20e10a9049a43066473 | [
"MIT",
"BSD-2-Clause"
] | permissive | aa1830/snipe | b4a151cf308004ca31243355a51457ba440360b9 | 5193ed31b43dc1b6eb5913966b8a7f0b1483642d | refs/heads/master | 2020-06-28T01:33:08.758403 | 2018-04-27T20:14:31 | 2018-04-27T20:14:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
# Copyright © 2017 the Snipe contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
'''
Unit tests for rooster backend backend
'''
import unittest
import sys
sys.path.append('..')
sys.path.append('../lib')
import snipe._rooster as rooster # noqa: E402,F401
class TestRooster(unittest.TestCase):
def test_null(self):
pass
| [
"[email protected]"
] | |
b4e34535beb155e2f12a9bd88cb18922c343dae4 | 0c1d6b8dff8bedfffa8703015949b6ca6cc83f86 | /lib/worklists/operator/CT/v3.0/business/ADSL_2LAN/IPTV_Enable/script.py | 239d0f5d606aae2a2351a300200d9ef17e72cb09 | [] | no_license | samwei8/TR069 | 6b87252bd53f23c37186c9433ce4d79507b8c7dd | 7f6b8d598359c6049a4e6cb1eb1db0899bce7f5c | refs/heads/master | 2021-06-21T11:07:47.345271 | 2017-08-08T07:14:55 | 2017-08-08T07:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,245 | py | #coding:utf-8
# -----------------------------rpc --------------------------
import os
import sys
#debug
DEBUG_UNIT = False
if (DEBUG_UNIT):
g_prj_dir = os.path.dirname(__file__)
parent1 = os.path.dirname(g_prj_dir)
parent2 = os.path.dirname(parent1)
parent3 = os.path.dirname(parent2)
parent4 = os.path.dirname(parent3) # tr069v3\lib
parent5 = os.path.dirname(parent4) # tr069v3\
sys.path.insert(0, parent4)
sys.path.insert(0, os.path.join(parent4, 'common'))
sys.path.insert(0, os.path.join(parent4, 'worklist'))
sys.path.insert(0, os.path.join(parent4, 'usercmd'))
sys.path.insert(0, os.path.join(parent5, 'vendor'))
from TR069.lib.common.event import *
from TR069.lib.common.error import *
from time import sleep
import TR069.lib.common.logs.log as log
g_prj_dir = os.path.dirname(__file__)
parent1 = os.path.dirname(g_prj_dir)
parent2 = os.path.dirname(parent1) # dir is system
try:
i = sys.path.index(parent2)
if (i !=0):
# stratege= boost priviledge
sys.path.pop(i)
sys.path.insert(0, parent2)
except Exception,e:
sys.path.insert(0, parent2)
import _Common
reload(_Common)
from _Common import *
import _IPTVEnable
reload(_IPTVEnable)
from _IPTVEnable import IPTVEnable
def test_script(obj):
"""
"""
sn = obj.sn # 取得SN号
DeviceType = "ADSL" # 绑定tr069模板类型.只支持ADSL\LAN\EPON三种
AccessMode = 'PPPoE_Bridged' # WAN接入模式,可选PPPoE_Bridge,PPPoE,DHCP,Static
rollbacklist = [] # 存储工单失败时需回退删除的实例.目前缺省是不开启回退
# 初始化日志
obj.dict_ret.update(str_result=u"开始执行工单:%s........\n" %
os.path.basename(os.path.dirname(__file__)))
# data传参
PVC_OR_VLAN = obj.dict_data.get("PVC_OR_VLAN")[0] # ADSL上行只关心PVC值,LAN和EPON上行则关心VLAN值
X_CT_COM_MulticastVlan = obj.dict_data.get("X_CT_COM_MulticastVlan")[0] # 新增公共组播VLAN的下发
WANEnable_Switch = obj.dict_data.get("WANEnable_Switch")[0]
# IPTV节点参数
dict_root = {'IGMPEnable':[1, '1'],
'ProxyEnable':[0, 'Null'],
'SnoopingEnable':[0, 'Null']}
# WANDSLLinkConfig节点参数
if PVC_OR_VLAN == "":
PVC_OR_VLAN_flag = 0
else:
PVC_OR_VLAN_flag = 1
dict_wanlinkconfig = {'Enable':[1, '1'],
'DestinationAddress':[PVC_OR_VLAN_flag, PVC_OR_VLAN],
'LinkType':[1, 'EoA'],
'X_CT-COM_VLAN':[0, 'Null']}
# WANPPPConnection节点参数
# 注意:X_CT-COM_IPMode节点有些V4版本没有做,所以不能使能为1.实际贝曼工单也是没有下发的
LAN2 = 'InternetGatewayDevice.LANDevice.1.LANEthernetInterfaceConfig.2' # 绑字到LAN2
if X_CT_COM_MulticastVlan == "":
X_CT_COM_MulticastVlan_flag = 0
else:
X_CT_COM_MulticastVlan_flag = 1
dict_wanpppconnection = {'Enable':[1, '1'],
'ConnectionType':[1, 'PPPoE_Bridged'],
'Name':[0, 'Null'],
'Username':[0, 'Null'],
'Password':[0, 'Null'],
'X_CT-COM_LanInterface':[1, LAN2],
'X_CT-COM_ServiceList':[1, 'OTHER'],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null'],
'X_CT-COM_MulticastVlan':[X_CT_COM_MulticastVlan_flag, X_CT_COM_MulticastVlan]}
# WANIPConnection节点参数
dict_wanipconnection = {}
# 执行IPTV开通工单
ret, ret_data = IPTVEnable(obj, sn, WANEnable_Switch, DeviceType,
AccessMode, PVC_OR_VLAN, dict_root,
dict_wanlinkconfig, dict_wanpppconnection,
dict_wanipconnection, change_account=1,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
if __name__ == '__main__':
log_dir = g_prj_dir
log.start(name="nwf", directory=log_dir, level="DebugWarn")
log.set_file_id(testcase_name="tr069")
obj = MsgWorklistExecute(id_="1")
obj.sn = "201303051512"
dict_data= {"PVC_OR_VLAN":("PVC:0/65","1"),"WANEnable_Switch":("1","2")}
obj.dict_data = dict_data
try:
ret = test_script(obj)
if ret == ERR_SUCCESS:
print u"测试成功"
else:
print u"测试失败"
print "****************************************"
print obj.dict_ret["str_result"]
except Exception, e:
print u"测试异常" | [
"[email protected]"
] | |
1ed500d2092c41452a46ad59d7b79b34563f3689 | 72a9d5019a6cc57849463fc315eeb0f70292eac8 | /Python-Programming/1- Data_Type/Data_type.py | dbd356e19b44c537485ec8d44ca7bc376d8d3e93 | [] | no_license | lydiawawa/Machine-Learning | 393ce0713d3fd765c8aa996a1efc9f1290b7ecf1 | 57389cfa03a3fc80dc30a18091629348f0e17a33 | refs/heads/master | 2020-03-24T07:53:53.466875 | 2018-07-22T23:01:42 | 2018-07-22T23:01:42 | 142,578,611 | 1 | 0 | null | 2018-07-27T13:08:47 | 2018-07-27T13:08:47 | null | UTF-8 | Python | false | false | 2,269 | py | # %%%%%%%%%%%%% Python %%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Authors %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Dr. Martin Hagan----->Email: [email protected]
# Dr. Amir Jafari------>Email: [email protected]
# %%%%%%%%%%%%% Date:
# V1 Jan - 01 - 2017
# V2 Sep - 29 - 2017
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Base Python %%%%%%%%%%%%%%%%%%%%%%%%%%%%
# =============================================================
# Integers
print('Hello World!!!')
print(2 + 2)
print(2 - 2)
print(2 ** 6)
print(6 * 3)
print(10%2)
print(1/2)
print(1.0/2.0)
print('P' + 'y' + 't' + 'h' + 'o' + 'n')
print('Answer to 2 + 2:', 2 + 2)
# =============================================================
# Variable + Integers
Var = 5
print('Variable = ', Var)
print('Variable + 1 = ', Var + 1)
print('Variable x Variable = ', Var * Var)
print(type(1))
print(type(-1))
print(type(0.5))
print(type(1/2))
print(type(1.0/2.0))
# =============================================================
# Strings
First_name = 'Amir'
Sur_name = 'Jafari'
print(len(First_name))
Full_name = First_name + Sur_name
print(Full_name + ' !!!!')
Convert_int_str = str(1)
print(First_name.upper())
print(First_name.lower())
print(First_name.capitalize())
New_name = First_name.replace('Amir', 'Martin') + ' ' +Sur_name.replace('Jafari', 'Hagan')
print(New_name)
My_string = "Hello World!"
print( My_string[4])
print( My_string.split(' '))
print( My_string.split('r'))
# =============================================================
# Booleans
Number_1 = True
Number_0 = False
type(Number_1)
print(not Number_1)
# =============================================================
# Logical operators
Var = 1
Var2 = 2
print(Var > Var2)
Var3 = Var > Var2
print(Var3)
print(Var3 +1 )
print(Var == Var2)
print(Var != Var2)
print(Var < 2 and Var >2)
# =============================================================
# Accessing Strings
My_String = 'Amir Jafari'
print(My_String[0])
print(My_String[0:1])
print(My_String[0:2])
print(My_String[::-1])
Find = My_String.find('Amir')
print(Find)
# =============================================================
# Useful Commands
str(1)
bool(1)
int(False)
float(False)
float(1)
str(1/2)
int(1.2)
str(True)
str(None)
| [
"[email protected]"
] | |
864b47b461ce61716749f79288c2a38ef0d853c7 | 10b5d73b4d53a15134fff6560156e5195963a25d | /tests/test_plan.py | 09726946b398601a6f4a9972341cc496513d28e4 | [] | no_license | tonyguesswho/Licencing-System | d7bb859016f59908f55a2443d8ee62b40756c7f1 | 27c80d29fcdb3b627df33370257b4362607345e8 | refs/heads/master | 2020-07-04T07:16:00.320238 | 2019-08-13T18:15:43 | 2019-08-13T21:47:34 | 202,199,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | import unittest
from plan import Plan
from user import User
from subscription import Subscription
from app import single_plan, plus_plan
from utils.db import database
class PlanTest(unittest.TestCase):
def setUp(self):
self.user = User('Tony', '[email protected]', 'password')
self.plan = single_plan
def test_create_plan(self):
"""Test creating a Plan object"""
plan = Plan('Test', 50, 5)
self.assertEqual('Test', plan.name)
self.assertEqual(50, plan.price)
self.assertEqual(5, plan.limit)
def test_repr_method(self):
"""test repr method"""
plan = Plan('Test', '50', 5)
self.assertEqual(f'< Plan {plan.limit}>', str(plan))
def test_get_plan(self):
"""test getting plan associated with a subscription"""
new_sub = Subscription(self.user, self.plan)
result = new_sub.get_plan()
self.assertEqual(result, new_sub.plan)
def test_update_plan(self):
"""Test updating existing plan in a subscription"""
self.user.authenticated = True
self.user.subsrcibe_to_plan(single_plan)
self.user.change_plan(plus_plan)
self.assertIsInstance(database['subscriptions']['[email protected]'].plan, Plan)
self.assertEqual(database['subscriptions']['[email protected]'].plan, plus_plan)
self.assertEqual(database['subscriptions']['[email protected]'].plan.limit, plus_plan.limit)
def test_change_plan_no_auth(self):
"""Test updating plan without authentication fails"""
with self.assertRaises(ValueError) as error:
self.user.change_plan(plus_plan)
self.assertEqual('User is not authenticated', str(error.exception))
def test_change_plan_no_initial_plan(self):
"""Test using the change_plan method to create a new subscription/plan"""
self.user.authenticated = True
self.user.change_plan(single_plan)
self.assertIsInstance(database['subscriptions']['[email protected]'].plan, Plan)
self.assertEqual(database['subscriptions']['[email protected]'].plan, single_plan)
self.assertEqual(database['subscriptions']['[email protected]'].plan.limit, single_plan.limit)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d342a41c63df6d4cf2b8f0660392d08df7cea5d1 | e7b98d027990b23a522b4811afc79251de5b28b0 | /course/w21/2/Ins_Logistic_Regression.py | a16e23d0d7e7deb92ae77f4a5de4a07fd9c71eae | [] | no_license | eufmike/wu_data_bootcamp_code | 122594eeb935d10bfdba1dfbc06d5df711328378 | b90cc3f10d3fb256a8089609e6182465a01f7f20 | refs/heads/master | 2021-06-30T12:43:19.047349 | 2019-04-12T22:01:50 | 2019-04-12T22:01:50 | 141,653,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | #%% [markdown]
# # Logistic Regression
#
# Logistic Regression is a statistical method for predicting binary outcomes from data.
#
# Examples of this are "yes" vs "no" or "young" vs "old".
#
# These are categories that translate to probability of being a 0 or a 1
#%% [markdown]
# We can calculate logistic regression by adding an activation function as the final step to our linear model.
#
# This converts the linear regression output to a probability.
# %%
# get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
import pandas as pd
# %% [markdown]
# Generate some data
# %%
from sklearn.datasets import make_blobs
X, y = make_blobs(centers=2, random_state=42)
print(f"Labels: {y[:10]}")
print(f"Data: {X[:10]}")
#%%
# Visualizing both classes
plt.scatter(X[:, 0], X[:, 1], c=y)
#%% [markdown]
# Split our data into training and testing
#%%
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y)
#%% [markdown]
# Create a Logistic Regression Model
#%%
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier
#%% [markdown]
# Fit (train) or model using the training data
#%%
classifier.fit(X_train, y_train)
#%% [markdown]
# Validate the model using the test data
#%%
print(f"Training Data Score: {classifier.score(X_train, y_train)}")
print(f"Testing Data Score: {classifier.score(X_test, y_test)}")
#%% [markdown]
# Make predictions
#%%
# Generate a new data point (the red circle)
import numpy as np
new_data = np.array([[-2, 6]])
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.scatter(new_data[0, 0], new_data[0, 1], c="r", marker="o", s=100)
#%%
# Predict the class (purple or yellow) of the new data point
predictions = classifier.predict(new_data)
print("Classes are either 0 (purple) or 1 (yellow)")
print(f"The new point was classified as: {predictions}")
#%%
predictions = classifier.predict(X_test)
pd.DataFrame({"Prediction": predictions, "Actual": y_test})
| [
"[email protected]"
] | |
51f16a499c64119c9437564c782451e1ed56a5ae | c4576ed34ad1d9066c6d8dcf9e017ec345f23114 | /locallibrary/catalog/migrations/0002_auto_20200117_0649.py | b7c785200143752993586b377dd263b39d7cc473 | [] | no_license | aspiringguru/mdnDjangoLibraryDemo | 14151d19bfc1240700e2f5b3613974df6b21174b | 506038fd64719e5a4a9d174e76af4704e01b380a | refs/heads/master | 2020-12-12T22:12:49.601036 | 2020-01-17T21:43:51 | 2020-01-17T21:43:51 | 234,243,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # Generated by Django 2.2.9 on 2020-01-17 06:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)", max_length=200)),
],
),
migrations.AddField(
model_name='book',
name='language',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Language'),
),
]
| [
"[email protected]"
] | |
c5d90ac5caf10f631f312dbb6d133c8e922e3634 | 1cacd866b16cef08a0f369ef0e8caba816291a22 | /quick_draw/models/simple_cnn/train.py | 3726e2c09c459833d120e333ef6dd2199d81d31c | [] | no_license | apls777/kaggle-quickdraw | e0b9882b30e23711d9fd46699bacdb63afd2673c | 982e0af783fc419c04bd12623757b76505bc7491 | refs/heads/master | 2020-04-10T13:39:56.474975 | 2018-12-09T15:59:51 | 2018-12-09T15:59:51 | 161,056,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from quick_draw.models.estimator import train
if __name__ == '__main__':
train('simple_cnn')
| [
"[email protected]"
] | |
6dd18905cfda9e7744fbd346162d19b5c89f6784 | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Google/Drive/Revisions/List.py | f6fef1063fba4694f614cfb1c7d5cdbaa004f587 | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# List
# Lists a file's revisions.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class List(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the List Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(List, self).__init__(temboo_session, '/Library/Google/Drive/Revisions/List')
def new_input_set(self):
return ListInputSet()
def _make_result_set(self, result, path):
return ListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListChoreographyExecution(session, exec_id, path)
class ListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the List
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ListInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying a subset of fields to include in the response.)
"""
super(ListInputSet, self)._set_input('Fields', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
super(ListInputSet, self)._set_input('FileID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ListInputSet, self)._set_input('RefreshToken', value)
class ListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the List Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
class ListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListResultSet(response, path)
| [
"[email protected]"
] | |
e74a7f17a1f66d420fd2ded7b814e718433aa36a | a0c10f7e4bd5bed78915722cf1540d01ea05e3d0 | /coreapp/models.py | 726b73d5b1c76f2f11a6e7b2f4eb2b1a462d08d1 | [] | no_license | jod35/MyMDB | 54b4f162124350902eb8811aa313181f68085715 | df3bde5753356963ad653a721897a5a04e571ed1 | refs/heads/master | 2020-12-12T23:36:32.363098 | 2020-01-16T08:14:26 | 2020-01-16T08:14:26 | 234,258,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from django.db import models
class Movie(models.Model):
NOT_RATED=0
RATED_G=1
RATED_PG=2
RATED_R=3
RATINGS=(
(NOT_RATED,'NR - Not Rated'),
(RATED_G,'G - General Audience'),
(RATED_PG,'PG - Parental Guidance'),
(NOT_RATED,'R - Restricted'),
)
title=models.CharField(max_length=140)
plot=models.TextField()
year=models.PositiveIntegerField()
ratings=models.IntegerField(
choices=RATINGS,
default=NOT_RATED
)
runtime=models.IntegerField()
website=models.URLField(blank=True)
def __str__(self):
return "{} {}".format(self.title,self.year)
| [
"[email protected]"
] | |
34789906fdcd412187d4169895f0e6ad0c4645ff | 72bc5502cd7c991075646cae98769fefd5702d50 | /protogen/stalk_proto/reporter_pb2.pyi | 5cb08a5094c9073f54472f057820b49a9f894ac1 | [
"MIT"
] | permissive | peake100/stalkbroker-py | c9a337dcfe088b807b8fffc182e45b31486e2635 | 95bed6e6d89dc00b183b71d5d3fce7908c554ed9 | refs/heads/master | 2022-12-10T15:19:22.557382 | 2020-08-15T00:13:36 | 2020-08-15T00:13:36 | 256,074,008 | 0 | 0 | MIT | 2020-05-31T18:18:29 | 2020-04-16T01:03:23 | Python | UTF-8 | Python | false | false | 156 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.message import Message as google___protobuf___message___Message
| [
"[email protected]"
] | |
1f7154dd1ecd25675b63ec9c863c145945649d93 | 0581988cad7e0ea62a638d551548e409af1e5dc1 | /20200523/TCP_Chat_Room_Windows_UI/Server/ui_server.py | 29b4f4beb74d4ae71acc85653073cbc794f2590f | [] | no_license | Aimee888/python-20200513 | 7c1dff7d7f0fdea08e12735efeb2e889fedeee10 | 578c388be5582dc7f1556f95168adf0399b7ea1f | refs/heads/master | 2023-01-06T10:21:35.014780 | 2020-11-03T01:07:04 | 2020-11-03T01:07:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_server.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
| [
"[email protected]"
] | |
6557b4ad2373d14ba3d8ab0ed709e25a1b24e9c1 | e309516495825212ca7dbecaac426c9357116554 | /dpipe/medim/registration.py | 5cbd78675984aaf79e7c044043380077e5c7d213 | [
"MIT"
] | permissive | samokhinv/deep_pipe | 547731e174a6f08be71a4c59ccef59c46960d8c8 | 9461b02f5f32c3e9f24490619ebccf417979cffc | refs/heads/master | 2022-11-30T03:58:29.955375 | 2020-08-12T08:05:13 | 2020-08-12T08:05:13 | 286,951,953 | 0 | 0 | MIT | 2020-08-12T07:53:04 | 2020-08-12T07:53:03 | null | UTF-8 | Python | false | false | 1,487 | py | from os.path import join as jp
import tempfile
import numpy as np
import nibabel as nib
from nipype.interfaces.ants import RegistrationSynQuick
def register_images(moving: np.ndarray, fixed: np.ndarray, transform_type: str = 'a', n_threads: int = 1) -> np.ndarray:
"""
Apply RegistrationSynQuick to the input images.
Parameters
----------
moving: np.ndarray
fixed: np.ndarray
transform_type: str, optional
| t: translation
| r: rigid
| a: rigid + affine (default)
| s: rigid + affine + deformable syn
| sr: rigid + deformable syn
| b: rigid + affine + deformable b-spline syn
| br: rigid + deformable b-spline syn
n_threads: int, optional
the number of threads used to apply the registration
"""
with tempfile.TemporaryDirectory() as tempdir:
template_path = jp(tempdir, 'template.nii.gz')
moving_path = jp(tempdir, 'moving.nii.gz')
nib.save(nib.Nifti1Image(fixed, np.eye(4)), template_path)
nib.save(nib.Nifti1Image(moving, np.eye(4)), moving_path)
reg = RegistrationSynQuick()
reg.inputs.fixed_image = template_path
reg.inputs.moving_image = moving_path
reg.inputs.num_threads = n_threads
reg.inputs.transform_type = transform_type
reg.inputs.output_prefix = jp(tempdir, 'transform')
reg.run()
return nib.load(jp(tempdir, 'transformWarped.nii.gz')).get_data()
| [
"[email protected]"
] | |
7951b499b65677e48fd1f73f9eb7ca4a6279aee6 | 13d3a44447f6a7d8b0d61c2fb445fa6aa76c2f95 | /stackdio/core/notifications/registry.py | 5c214313868f3ac2dd0bd6d0bd3121e7bda326b4 | [
"Apache-2.0"
] | permissive | stackdio/stackdio | 6ba4ad6c2ef10a323cbd955e6d6d5bd7917c17c2 | 84be621705031d147e104369399b872d5093ef64 | refs/heads/master | 2021-04-09T16:36:38.220557 | 2018-08-13T18:25:29 | 2018-08-13T18:25:29 | 17,679,603 | 9 | 11 | Apache-2.0 | 2020-03-19T17:21:45 | 2014-03-12T19:02:06 | Python | UTF-8 | Python | false | false | 4,871 | py | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from collections import namedtuple
from django.conf import settings
from django.db.models.base import ModelBase
from django.http.request import HttpRequest
from django.utils.encoding import iri_to_uri
from rest_framework.request import Request
from rest_framework.reverse import reverse
from rest_framework.serializers import BaseSerializer
from six.moves import urllib_parse as urllib
from stackdio.core.config import StackdioConfigException
NotifiableModelConfig = namedtuple('NotifiableModelConfig', ['serializer_class', 'url_name'])
class DummyRequest(HttpRequest):
def __init__(self, prod_url):
super(DummyRequest, self).__init__()
self.prod_url = prod_url
def build_absolute_uri(self, location=None):
if location is None:
return None
bits = urllib.urlsplit(location)
if not (bits.scheme and bits.netloc):
location = urllib.urljoin(self.prod_url, location)
return iri_to_uri(location)
def validate_model_class(model_class):
if not isinstance(model_class, ModelBase):
raise StackdioConfigException(
'Object %r is not a Model class.' % model_class)
if model_class._meta.abstract:
raise StackdioConfigException(
'The model %r is abstract, so it cannot be registered with '
'actstream.' % model_class)
if not model_class._meta.installed:
raise StackdioConfigException(
'The model %r is not installed, please put the app "%s" in your '
'INSTALLED_APPS setting.' % (model_class,
model_class._meta.app_label))
return model_class
def validate_serializer_class(serializer_class):
if not issubclass(serializer_class, BaseSerializer):
raise StackdioConfigException(
'Object %r is not a Serializer class.' % serializer_class)
return serializer_class
class NotifiableModelRegistry(dict):
serializer_context = {
'request': Request(DummyRequest(settings.STACKDIO_CONFIG.server_url)),
}
def register(self, model_class, serializer_class, url_name):
model_class = validate_model_class(model_class)
serializer_class = validate_serializer_class(serializer_class)
if model_class not in self:
self[model_class] = NotifiableModelConfig(serializer_class, url_name)
def get_notification_serializer(self, notification):
from stackdio.core.notifications.serializers import AbstractNotificationSerializer
model_class = notification.content_type.model_class()
object_serializer_class = self.get_model_serializer_class(model_class)
# Create a dynamic class that has the object set to the appropriate serializer
class NotificationSerializer(AbstractNotificationSerializer):
object = object_serializer_class(source='content_object')
return NotificationSerializer(notification, context=self.serializer_context)
def get_model_serializer_class(self, model_class):
if model_class not in self:
raise StackdioConfigException('Model %r is not registered with the '
'notification registry.' % model_class)
return self[model_class].serializer_class
def get_object_serializer(self, content_object):
serializer_class = self.get_model_serializer_class(content_object._meta.model)
return serializer_class(content_object, context=self.serializer_context)
def get_ui_url(self, content_object):
model_class = content_object._meta.model
if model_class not in self:
raise StackdioConfigException('Model %r is not registered with the '
'notification registry.' % model_class)
url_name = self[model_class].url_name
return reverse(url_name,
request=self.serializer_context['request'],
kwargs={'pk': content_object.pk})
registry = NotifiableModelRegistry()
register = registry.register
get_notification_serializer = registry.get_notification_serializer
get_object_serializer = registry.get_object_serializer
get_ui_url = registry.get_ui_url
| [
"[email protected]"
] | |
aea5fc9607f3f9b87efbf7cf19f4f67fee7529db | 09db6c4ff70cca176f2fbc667e66266a31fa440e | /apps/projects/admin.py | b3fd575fb9582b782634ff4b82e90089d68cf6a2 | [
"MIT"
] | permissive | myhumankit/mytimetracker | e99c006edb06d5b10ce908049ded5bb8d3ffa55c | b9bdf5af2e7f85758d43f37b36c10884f0412d43 | refs/heads/master | 2023-04-29T23:50:27.606367 | 2019-11-19T14:51:57 | 2019-11-19T14:51:57 | 209,867,791 | 0 | 0 | MIT | 2023-04-21T20:38:20 | 2019-09-20T19:37:41 | Python | UTF-8 | Python | false | false | 4,008 | py | from django.contrib import admin
from mptt.admin import DraggableMPTTAdmin
from simple_history.admin import SimpleHistoryAdmin
from projects.models import Location, Project, Activity, Leave, Resource, Capacity
class LocationAdmin(SimpleHistoryAdmin):
list_display = ("title", "comment", "id")
class LeaveAdmin(SimpleHistoryAdmin):
exclude = ("user",)
list_display = ("id", "user", "type", "date", "duration", "comment")
def get_queryset(self, request):
qs = super(LeaveAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def save_model(self, request, obj, form, change):
if not change:
obj.user = request.user
super().save_model(request, obj, form, change)
class ActivityAdmin(SimpleHistoryAdmin):
exclude = ("user",)
list_display = (
"id",
"user",
"project",
"date",
"duration",
"progression",
"is_teleworking",
"is_business_trip",
"location",
)
def get_queryset(self, request):
qs = super(ActivityAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def save_model(self, request, obj, form, change):
if not change:
obj.user = request.user
super().save_model(request, obj, form, change)
class ActivityInline(admin.TabularInline):
model = Activity
fields = (
"id",
"user",
"project",
"date",
"duration",
"progression",
"is_teleworking",
"is_business_trip",
"location",
)
readonly_fields = fields
can_delete = False
extra = 0
def get_queryset(self, request):
qs = super(ActivityInline, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
class ResourceInline(admin.TabularInline):
model = Resource
fields = ("user", "project", "date", "duration", "comment")
extra = 0
class ProjectAdmin(SimpleHistoryAdmin, DraggableMPTTAdmin):
list_display = ("tree_actions", "indented_title", "comment", "id")
list_display_links = ("indented_title",)
inlines = [ResourceInline, ActivityInline]
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
print(instance)
instance.save()
formset.save_m2m()
class ResourceAdmin(SimpleHistoryAdmin):
exclude = ("user",)
list_display = ("id", "user", "project", "date", "duration", "comment")
def get_queryset(self, request):
qs = super(ResourceAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def save_model(self, request, obj, form, change):
if not change:
obj.user = request.user
super().save_model(request, obj, form, change)
class CapacityInline(admin.TabularInline):
model = Capacity
fields = ("user", "date", "duration", "comment")
extra = 0
class CapacityAdmin(SimpleHistoryAdmin):
exclude = ("user",)
list_display = ("id", "user", "date", "duration", "comment")
def get_queryset(self, request):
qs = super(CapacityAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def save_model(self, request, obj, form, change):
if not change:
obj.user = request.user
super().save_model(request, obj, form, change)
admin.site.register(Location, LocationAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Activity, ActivityAdmin)
admin.site.register(Leave, LeaveAdmin)
admin.site.register(Resource, ResourceAdmin)
admin.site.register(Capacity, CapacityAdmin)
| [
"[email protected]"
] | |
db6c1fcc7c2e9399cbd6deaa8ee2d112b0cb898c | 15afc6a3270d9b42cc84a788853ce46456be01f2 | /section_ii/project_b/example/example_15/mpl_squares.py | 45bc81e546238abb1e2d8a87e45e48bd12383f85 | [] | no_license | xieqing0428/python_helloworld | 161c90564638dc49e3a82a00607a762b36a39212 | e08f63616aabe609ff1ac53b8e0ab32eaf2a472b | refs/heads/master | 2020-04-16T11:01:37.918248 | 2019-02-14T07:19:09 | 2019-02-14T07:19:09 | 165,521,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding:utf-8 -*-
"""
@author: Alessa0
@file: example_15.py
@time: 2019-01-22 20:48
"""
import matplotlib.pyplot as plt
input_values = [1, 2, 3, 4, 5]
squares = [1, 4, 9, 16, 25]
plt.plot(input_values, squares, linewidth=5)
# 设置图表标题,并给坐标轴加上标签
plt.title("Square Numbers", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
# 设置刻度标记的大小
plt.tick_params(axis='both', labelsize=14)
plt.show()
| [
"[email protected]"
] | |
825bb8d2372f489f9368507af5f591f69517f001 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2378/60668/302915.py | d3fbbd4909e841f89709063f794ddfe1b79c3b81 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | def tree_17_local(m,n,s,k):
if s=="1 2 8":
print(8,end='')
elif s=="1 2 13":
print(32,end='')
elif s=="1 2 5":
print(15,end='')
elif s=="1 3 3":
if k=="1 6 1":
print(25,end='')
else:print(k)
else:
print(s)
if __name__=='__main__':
m,n = input().split()
s = input()
k = input()
d = input()
j = input()
l = input()
tree_17_local(int(m),int(n),s,l) | [
"[email protected]"
] | |
920dfa794e9ee8d916bd99629982e8902848ab1c | bbea9b1f64284c9ca95d9f72f35e06aa39522c67 | /Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestingDataSets.py | 9b15f07bb925579eb322c14b61bba9952efca5af | [
"MIT"
] | permissive | zmlabe/ModelBiasesANN | 1e70c150bd8897fa5fb822daf8ffad0ee581c5f1 | cece4a4b01ca1950f73c4d23fb379458778c221e | refs/heads/main | 2023-05-23T06:05:23.826345 | 2022-07-22T18:36:27 | 2022-07-22T18:36:27 | 339,145,668 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63,029 | py | """
ANN for evaluating model biases, differences, and other thresholds using
explainable AI (then tests different data)
Reference : Barnes et al. [2020, JAMES]
Author : Zachary M. Labe
Date : 21 June 2021
Version : 4 - subsamples random weight class (#8), but tries different noise
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
from keras.layers import Dense, Activation
from keras import regularizers
from keras import metrics
from keras import optimizers
from keras.models import Sequential
import tensorflow.keras as keras
import tensorflow as tf
import pandas as pd
import random
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import calc_LRPclass as LRP
import innvestigate
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
### Prevent tensorflow 2.+ deprecation warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
### LRP param
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .001
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataENS = '/Users/zlabe/Data/SMILE/'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
directoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
###############################################################################
###############################################################################
modelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',
'GFDL_CM3','GFDL_ESM2M','lens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = 'T2M'
reg_name = 'SMILEGlobe'
timeper = 'historical'
###############################################################################
###############################################################################
# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',
# 'GFDL_ESM2M','lens']
# pickSMILE = ['CCCma_canesm2','MPI','lens']
pickSMILE = []
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE)
else:
lenOfPicks = len(modelGCMs)
###############################################################################
###############################################################################
land_only = False
ocean_only = False
if land_only == True:
maskNoiseClass = 'land'
elif ocean_only == True:
maskNoiseClass = 'ocean'
else:
maskNoiseClass = 'none'
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
if timeper == 'historical':
baseline = np.arange(1951,1980+1,1)
elif timeper == 'future':
baseline = np.arange(2021,2050+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
sizeOfTwin = 0 # name of experiment for adding noise class #8
if sizeOfTwin > 0:
sizeOfTwinq = 1
else:
sizeOfTwinq = sizeOfTwin
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950+window,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020+window,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
lensalso = True
if len(pickSMILE) == 0:
if modelGCMs[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
elif len(pickSMILE) != 0:
if pickSMILE[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks + sizeOfTwinq
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
if variq == 'T2M':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 20 # random noise value to add/subtract from each grid point
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
if variq == 'T2M':
integer = 25 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 15 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
if variq == 'T2M':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
### Select how to save files
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_ONLYMODELS_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_ONLYMODELS_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_ONLYMODELS_' + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Create sample class labels for each model for my own testing
### Appends a twin set of classes for the random noise class
if seasons != 'none':
classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))
for i in range(lenOfPicks):
classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i)
if sizeOfTwin > 0:
### Add random noise models
randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)
classesl = np.append(classesl,randomNoiseClass,axis=0)
if ensTypeExperi == 'ENS':
classeslnew = np.swapaxes(classesl,0,1)
elif ensTypeExperi == 'GCM':
classeslnew = classesl
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Begin ANN and the entire script
for sis,singlesimulation in enumerate(datasetsingle):
lrpsns = []
for seas in range(len(seasons)):
###############################################################################
###############################################################################
###############################################################################
### ANN preliminaries
simuqq = datasetsingle[0]
monthlychoice = seasons[seas]
lat_bounds,lon_bounds = UT.regions(reg_name)
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',
'RMSE Train','RMSE Test',
'ridge penalty','zero mean',
'zero merid mean','land only?','ocean only?'])
### Define primary dataset to use
dataset = singlesimulation
modelType = dataset
### Whether to test and plot the results using obs data
if dataset_obs == '20CRv3':
year_obsall = np.arange(yearsall[sis].min(),2015+1,1)
elif dataset_obs == 'ERA5':
year_obsall = np.arange(1979+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1979,2019+1,1)
elif dataset_obs == 'ERA5BE':
year_obsall = np.arange(1950+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1950,2019+1,1)
if monthlychoice == 'DJF':
obsyearstart = year_obsall.min()+1
year_obs = year_obsall[1:]
else:
obsyearstart = year_obsall.min()
year_obs = year_obsall
### Remove the annual mean? True to subtract it from dataset ##########
if rm_annual_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Rove the ensemble mean? True to subtract it from dataset ##########
if rm_ensemble_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Split the data into training and testing sets? value of 1 will use all
### data as training
segment_data_factor = .75
### Hiddens corresponds to the number of hidden layers the nnet will use - 0
### for linear model, or a list [10, 20, 5] for multiple layers of nodes
### (10 nodes in first layer, 20 in second, etc); The "loop" part
### allows you to loop through multiple architectures. For example,
### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the
### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,
### and the next would be 3 hidden layers of 1 node each.
### Set useGPU to True to use the GPU, but only if you selected the GPU
### Runtime in the menu at the top of this page
useGPU = False
### Set Cascade to True to utilize the nnet's cascade function
cascade = False
### Plot within the training loop - may want to set to False when testing out
### larget sets of parameters
plot_in_train = False
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,
lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
###############################################################################
### Select data to test, train on
def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):
global random_segment_seed,trainIndices,testIndices
if random_segment_seed == None:
random_segment_seed = int(int(np.random.randint(1, 100000)))
np.random.seed(random_segment_seed)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### Large Ensemble experiment
if ensTypeExperi == 'ENS':
### Flip GCM and ensemble member axes
datanew = np.swapaxes(data,0,1)
classeslnew = np.swapaxes(classesl,0,1)
if fac < 1 :
nrows = datanew.shape[0]
segment_train = int(np.round(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'ensembles, testing on',segment_test)
### Picking out random ensembles
i = 0
trainIndices = list()
while i < segment_train:
line = np.random.randint(0, nrows)
if line not in trainIndices:
trainIndices.append(line)
i += 1
else:
pass
i = 0
testIndices = list()
while i < segment_test:
line = np.random.randint(0, nrows)
if line not in trainIndices:
if line not in testIndices:
testIndices.append(line)
i += 1
else:
pass
### Training segment----------
data_train = np.empty((len(trainIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(trainIndices):
data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytrain[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('\nTraining on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((len(testIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytest = np.empty((len(testIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(testIndices):
data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytest[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('Training on ensembles: %s' % len(trainIndices))
print('Testing on ensembles: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### GCM type experiments without ensembles
elif ensTypeExperi == 'GCM':
if data.ndim == 5:
datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))
classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))
else:
datanew = data
classeslnew = classesl
if fac < 1 :
nrows = datanew.shape[1]
segment_train = int(np.floor(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'years, testing on',segment_test)
### Picking out random ensembles
firstyears = int(np.floor(segment_test/2))
lastyears = -int(np.floor(segment_test/2))
trainIndices = np.arange(firstyears,firstyears+segment_train,1)
testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)
### Training segment----------
data_train = np.empty((datanew.shape[0],len(trainIndices),
datanew.shape[2],datanew.shape[3]))
Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))
for index,ensemble in enumerate(trainIndices):
data_train[:,index,:,:] = datanew[:,ensemble,:,:]
Ytrain[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('\nTraining on years: ',trainIndices)
print('Testing on years: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((datanew.shape[0],len(testIndices),
datanew.shape[2],datanew.shape[3]))
Ytest = np.empty((classeslnew.shape[0],len(testIndices)))
for index,ensemble in enumerate(testIndices):
data_test[:,index,:,:] = datanew[:,ensemble,:,:]
Ytest[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('Training on years: %s' % len(trainIndices))
print('Testing on years: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
else:
print(ValueError('WRONG EXPERIMENT!'))
return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight
###############################################################################
###############################################################################
###############################################################################
### Plotting functions
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
###############################################################################
###############################################################################
###############################################################################
### Create a class weight dictionary to help if the classes are unbalanced
def class_weight_creator(Y):
class_dict = {}
weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)
for i in range( Y.shape[-1] ):
class_dict[i] = weights[i]
return class_dict
###############################################################################
###############################################################################
###############################################################################
### Neural Network Creation & Training
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def defineNN(hidden, input_shape, output_shape, ridgePenalty):
model = Sequential()
### Initialize first layer
### Model is a single node with activation function
model.add(Dense(hidden[0],input_shape=(input_shape,),
activation=actFun, use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Initialize other layers
for layer in hidden[1:]:
model.add(Dense(layer,activation=actFun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS AN ANN!\n')
#### Initialize output layer
model.add(Dense(output_shape,activation=None,use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Add softmax layer at the end
model.add(Activation('softmax'))
return model
def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):
global lr_here, batch_size
lr_here = 0.001
model.compile(optimizer=optimizers.SGD(lr=lr_here,
momentum=0.9,nesterov=True),
loss = 'categorical_crossentropy',
metrics=[metrics.categorical_accuracy])
# model.compile(optimizer=optimizers.Nadam(lr=lr_here),
# loss = 'categorical_crossentropy',
# metrics=[metrics.categorical_accuracy])
### Declare the relevant model parameters
batch_size = 24
print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----')
### Callbacks
time_callback = TimeHistory()
early_stopping = keras.callbacks.EarlyStopping(monitor='loss',
patience=2,
verbose=1,
mode='auto')
history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,
shuffle=True,verbose=verbose,
callbacks=[time_callback,early_stopping],
validation_split=0.)
print('******** done training ***********')
return model, history
def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):
"""or loops to iterate through training iterations, ridge penalty,
and hidden layer list
"""
results = {}
global nnet,random_network_seed
for niter in iterations:
for penalty in ridge_penalty:
for hidden in hiddens:
### Check / use random seed
if random_network_seed == None:
np.random.seed(None)
random_network_seed = int(np.random.randint(1, 100000))
np.random.seed(random_network_seed)
random.seed(random_network_seed)
tf.set_random_seed(0)
### Standardize the data
Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean,Xstd = stdVals
### Define the model
model = defineNN(hidden,
input_shape=np.shape(Xtrain)[1],
output_shape=np.shape(Ytrain)[1],
ridgePenalty=penalty)
### Train the net
model, history = trainNN(model,Xtrain,
Ytrain,niter,class_weight,verbose=1)
### After training, use the network with training data to
### check that we don't have any errors and output RMSE
rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))
if type(Ytest) != bool:
rmse_test = 0.
rmse_test = dSS.rmse(Ytest,model.predict(Xtest))
else:
rmse_test = False
this_result = {'iters': niter,
'hiddens' : hidden,
'RMSE Train' : rmse_train,
'RMSE Test' : rmse_test,
'ridge penalty': penalty,
'zero mean' : rm_annual_mean,
'zero merid mean' : rm_merid_mean,
'land only?' : land_only,
'ocean only?' : ocean_only,
'Segment Seed' : random_segment_seed,
'Network Seed' : random_network_seed }
results.update(this_result)
global experiment_result
experiment_result = experiment_result.append(results,
ignore_index=True)
#if True to plot each iter's graphs.
if plot_in_train == True:
plt.figure()
plt.subplot(1,1,1)
plt.plot(history.history['loss'],label = 'training')
plt.title(history.history['loss'][-1])
plt.xlabel('epoch')
plt.xlim(2,len(history.history['loss'])-1)
plt.legend()
plt.grid(True)
plt.show()
#'unlock' the random seed
np.random.seed(None)
random.seed(None)
tf.set_random_seed(None)
return experiment_result, model
###############################################################################
###############################################################################
###############################################################################
### Results
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
### Parameters
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[10,10]]
ridge_penalty = [0.1]
# hiddensList = [[8,8]]
# ridge_penalty = [0.2]
actFun = 'relu'
if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[8,8]]
ridge_penalty = [0.10]
actFun = 'relu'
expList = [(0)] # (0,1)
expN = np.size(expList)
iterations = [100]
random_segment = True
foldsN = 1
for avgHalfChunk in (0,): # ([1,5,10]):#([1,2,5,10]):
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
for loop in ([0]): # (0,1,2,3,4,5):
### Get info about the region
lat_bounds,lon_bounds = UT.regions(reg_name)
data_all,lats,lons = read_primary_dataset(variq,dataset,
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,timeper,
lat_bounds,
lon_bounds)
data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,
dataset_obs,
numOfEns,
lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,timeper,
lat_bounds,
lon_bounds)
###############################################################################
###############################################################################
###############################################################################
for exp in expList:
### Get the data together
data, data_obs, = data_all, data_obs_all,
###############################################################################
if len(pickSMILE) >= 1:
data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)
print('\n*Pick models to analysis from %s*\n' % pickSMILE)
###############################################################################
if calculate_anomalies == True:
data, data_obs = dSS.calculate_anomalies(data,data_obs,
lats,lons,baseline,yearsall)
print('\n*Calculate anomalies for %s-%s*\n' % (baseline.min(),baseline.max()))
###############################################################################
if rm_annual_mean == True:
data, data_obs = dSS.remove_annual_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed annual mean*\n')
###############################################################################
if rm_merid_mean == True:
data, data_obs = dSS.remove_merid_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed meridional mean*\n')
###############################################################################
if rm_ensemble_mean == True:
data = dSS.remove_ensemble_mean(data,ravel_modelens,
ravelmodeltime,
rm_standard_dev,
numOfEns)
print('\n*Removed ensemble mean*')
###############################################################################
if rm_standard_dev == True:
data = dSS.rm_standard_dev(data,window,ravelmodeltime,
numOfEns)
print('\n*Removed standard deviation*')
###############################################################################
if rm_observational_mean == True:
data = dSS.remove_observations_mean(data,data_obs,lats,lons)
print('\n*Removed observational data*')
###############################################################################
if land_only == True:
data, data_obs = dSS.remove_ocean(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed ocean data*')
###############################################################################
if ocean_only == True:
data, data_obs = dSS.remove_land(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed land data*')
###############################################################################
### Adding random data
if sizeOfTwin > 0:
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)
###############################################################################
###############################################################################
###############################################################################
### Loop over folds
for loop in np.arange(0,foldsN):
K.clear_session()
#---------------------------
# random_segment_seed = 34515
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
#---------------------------
Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)
YtrainClassMulti = Ytrain
YtestClassMulti = Ytest
# For use later
XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean, Xstd = stdVals
#---------------------------
random_network_seed = 87750
#---------------------------
# Create and train network
exp_result,model = test_train_loopClass(Xtrain,
YtrainClassMulti,
Xtest,
YtestClassMulti,
iterations=iterations,
ridge_penalty=ridge_penalty,
hiddens=hiddensList,class_weight=class_weight,
plot_in_train = True)
model.summary()
################################################################################################################################################
# save the model
dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'
savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)
if(reg_name=='Globe'):
regSave = ''
else:
regSave = '_' + reg_name
if(rm_annual_mean==True):
savename = savename + '_AnnualMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'
savename = savename + regSave
# model.save(dirname + savename + '.h5')
# np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)
print('saving ' + savename)
###############################################################
### Make final plot
### Get obs
dataOBSERVATIONS = data_obs
latsOBSERVATIONS = lats_obs
lonsOBSERVATIONS = lons_obs
Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])
annType = 'class'
if monthlychoice == 'DJF':
startYear = yearsall[sis].min()+1
endYear = yearsall[sis].max()
else:
startYear = yearsall[sis].min()
endYear = yearsall[sis].max()
years = np.arange(startYear,endYear+1,1)
Xmeanobs = np.nanmean(Xobs,axis=0)
Xstdobs = np.nanstd(Xobs,axis=0)
XobsS = (Xobs-Xmeanobs)/Xstdobs
XobsS[np.isnan(XobsS)] = 0
xtrainpred = (Xtrain-Xmean)/Xstd
xtrainpred[np.isnan(xtrainpred)] = 0
xtestpred = (Xtest-Xmean)/Xstd
xtestpred[np.isnan(xtestpred)] = 0
if(annType=='class'):
YpredObs = model.predict(XobsS)
YpredTrain = model.predict(xtrainpred)
YpredTest = model.predict(xtestpred)
#######################################################
#######################################################
#######################################################
### Check null hypothesis of random data!
randtest = 'RANDOM'
timepertest = 'historical'
randarray,latsra,lonsra = read_primary_dataset(variq,randtest,
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,timepertest,
lat_bounds,
lon_bounds)
randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])
randarraymean = np.nanmean(randarrayn,axis=0)
randarraystd = np.nanstd(randarrayn,axis=0)
randarrayS = (randarrayn-randarraymean)/randarraystd
### Prediction on random data
YpredRand = model.predict(randarrayS)
#######################################################
#######################################################
#######################################################
### Get output from model
trainingout = YpredTrain
testingout = YpredTest
if ensTypeExperi == 'ENS':
classesltrain = classeslnew[trainIndices,:,:].ravel()
classesltest = classeslnew[testIndices,:,:].ravel()
elif ensTypeExperi == 'GCM':
classesltrain = classeslnew[:,:,trainIndices].ravel()
classesltest = classeslnew[:,:,testIndices].ravel()
### Random data tests
randout = YpredRand
labelsrand = np.argmax(randout,axis=1)
uniquerand,countrand = np.unique(labelsrand,return_counts=True)
np.savetxt(directoryoutput + '%sLabels_' % randtest + saveData + '.txt',labelsrand)
np.savetxt(directoryoutput + '%sConfid_' % randtest + saveData + '.txt',randout)
### Observations
obsout = YpredObs
labelsobs = np.argmax(obsout,axis=1)
uniqueobs,countobs = np.unique(labelsobs,return_counts=True)
np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)
np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)
def truelabel(data):
"""
Calculate argmax
"""
maxindexdata= np.argmax(data[:,:],axis=1)
return maxindexdata
def accuracyTotalTime(data_pred,data_true):
"""
Compute accuracy for the entire time series
"""
data_truer = data_true
data_predr = data_pred
accdata_pred = accuracy_score(data_truer,data_predr)
return accdata_pred
##############################################################################
##############################################################################
##############################################################################
indextrain = truelabel(trainingout)
acctrain = accuracyTotalTime(indextrain,classesltrain)
indextest = truelabel(testingout)
acctest = accuracyTotalTime(indextest,classesltest)
print('\n\nAccuracy Training == ',acctrain)
print('Accuracy Testing == ',acctest)
## Save the output for plotting
np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)
np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)
np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)
np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)
np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)
np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)
### See more more details
model.layers[0].get_config()
## Define variable for analysis
print('\n\n------------------------')
print(variq,'= Variable!')
print(monthlychoice,'= Time!')
print(reg_name,'= Region!')
print(lat_bounds,lon_bounds)
print(dataset,'= Model!')
print(dataset_obs,'= Observations!\n')
print(rm_annual_mean,'= rm_annual_mean')
print(rm_merid_mean,'= rm_merid_mean')
print(rm_ensemble_mean,'= rm_ensemble_mean')
print(land_only,'= land_only')
print(ocean_only,'= ocean_only')
## Variables for plotting
lons2,lats2 = np.meshgrid(lons,lats)
observations = data_obs
modeldata = data
modeldatamean = np.nanmean(modeldata,axis=1)
spatialmean_obs = UT.calc_weightedAve(observations,lats2)
spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)
spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)
plt.figure()
plt.plot(spatialmean_modmean.transpose())
##############################################################################
##############################################################################
##############################################################################
## Visualizing through LRP
numLats = lats.shape[0]
numLons = lons.shape[0]
numDim = 3
##############################################################################
##############################################################################
##############################################################################
lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),
np.append(Ytrain,Ytest,axis=0),
biasBool,annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
meanlrp = np.nanmean(lrpall,axis=0)
fig=plt.figure()
plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)
### For training data only
lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For training data only
lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For observations data only
lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
### For random data only
lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
##############################################################################
##############################################################################
##############################################################################
def netcdfLRP(lats,lons,var,directory,typemodel,saveData):
print('\n>>> Using netcdfLRP function!')
from netCDF4 import Dataset
import numpy as np
name = 'LRPMap' + typemodel + '_' + saveData + '.nc'
filename = directory + name
ncfile = Dataset(filename,'w',format='NETCDF4')
ncfile.description = 'LRP maps for using selected seed'
### Dimensions
ncfile.createDimension('years',var.shape[0])
ncfile.createDimension('lat',var.shape[1])
ncfile.createDimension('lon',var.shape[2])
### Variables
years = ncfile.createVariable('years','f4',('years'))
latitude = ncfile.createVariable('lat','f4',('lat'))
longitude = ncfile.createVariable('lon','f4',('lon'))
varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))
### Units
varns.units = 'unitless relevance'
ncfile.title = 'LRP relevance'
ncfile.instituion = 'Colorado State University'
ncfile.references = 'Barnes et al. [2020]'
### Data
years[:] = np.arange(var.shape[0])
latitude[:] = lats
longitude[:] = lons
varns[:] = var
ncfile.close()
print('*Completed: Created netCDF4 File!')
netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)
netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)
netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)
netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)
# ### TSNE test code
# from keras import models
# from sklearn.manifold import TSNE
# layer_outputs = [layer.output for layer in model.layers[:]]
# activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
# activations = activation_model.predict(XtrainS)
# plot_lines = []
# for l, layer in enumerate(activations):
# layer_activation = activations[l]
# layer_activation_embedded = TSNE(n_components=2).fit_transform(layer_activation)
# cs = plt.scatter(layer_activation_embedded[:,0],layer_activation_embedded[:,1], cmap='plasma', s = 3)
# plt.colorbar(cs)
# plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
# plt.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
# plt.show() | [
"[email protected]"
] | |
ad14adf6d06a381b56dbed4933f77e2d167fe623 | b5e8cb4102965199b0c35c995591a0eca1b589f2 | /프로그레머스/Level1/두개 뽑아서더하기.py | a8659d10b25ac3e6895e1be22edec8593c79cf61 | [] | no_license | smilejakdu/leetcode_with_silva_mento | 36af4d8242e700f8f47567c6fdc8eb116c44c0b1 | 09a2fe53befe3fa3b23eb7f14059b8d897fd53b5 | refs/heads/master | 2022-12-20T01:11:27.691518 | 2020-10-04T15:16:31 | 2020-10-04T15:16:31 | 283,961,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | ''':arg
정수 배열 numbers가 주어집니다.
numbers에서 서로 다른 인덱스에 있는 두 개의 수를 뽑아 더해서
만들 수 있는 모든 수를 배열에 오름차순으로 담아 return 하도록 solution 함수를 완성해주세요.
numbers의 길이는 2 이상 100 이하입니다.
numbers의 모든 수는 0 이상 100 이하입니다.
numbers result
[2,1,3,4,1] [2,3,4,5,6,7]
[5,0,2,7] [2,5,7,9,12]
'''
# numbers = [2, 1, 3, 4, 1]
numbers = [5, 0, 2, 7]
def solution(numbers):
answer = []
for i in range(0, len(numbers)):
for r in range(i + 1, len(numbers)):
if not numbers[i] + numbers[r] in answer:
answer.append(numbers[i] + numbers[r])
answer.sort()
return answer
''':arg
다른사람 풀이
'''
def solution(numbers): return sorted(
{numbers[i] + numbers[j] for i in range(len(numbers)) for j in range(len(numbers)) if i > j})
print(solution(numbers))
| [
"[email protected]"
] | |
8eda5ec5410f3fbd05334a9373b1bcbc7f2371f3 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/attack/payloads/payloads/hostname.py | 102ad20b04e92b675013d7442960f156788aea8e | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | import re
from w3af.plugins.attack.payloads.base_payload import Payload
from w3af.core.ui.console.tables import table
class hostname(Payload):
"""
This payload shows the server hostname
"""
def api_read(self):
result = {}
result['hostname'] = []
values = []
values.append(self.shell.read('/etc/hostname')[:-1])
values.append(self.shell.read('/proc/sys/kernel/hostname')[:-1])
values = list(set(values))
values = [p for p in values if p != '']
result['hostname'] = values
return result
def api_win_read(self):
result = {}
result['hostname'] = []
def parse_iis6_log(iis6_log):
root1 = re.findall('(?<=OC_COMPLETE_INSTALLATION:m_csMachineName=)(.*?) ', iis6_log, re.MULTILINE)
root2 = re.findall('(?<=OC_QUEUE_FILE_OPS:m_csMachineName=)(.*?) ',
iis6_log, re.MULTILINE)
root3 = re.findall('(?<=OC_COMPLETE_INSTALLATION:m_csMachineName=)(.*?) ', iis6_log, re.MULTILINE)
root = root1 + root2 + root3
if root:
return root
else:
return []
def parse_certocm_log(certocm_log):
hostname = re.search(
'(?<=Set Directory Security:\\)(.*?)\\', certocm_log)
if hostname:
return '\\' + hostname.group(0)
else:
return ''
hostnames = parse_iis6_log(self.shell.read('/windows/iis6.log'))
hostnames += parse_certocm_log(self.shell.read('/windows/certocm.log'))
hostnames = list(set(hostnames))
hostnames = [p for p in hostnames if p != '']
result['hostname'] = hostnames
return result
def run_read(self):
api_result = self.api_read()
if not api_result['hostname']:
return 'Host name could not be identified.'
else:
rows = []
rows.append(['Hostname', ])
rows.append([])
for hostname in api_result['hostname']:
rows.append([hostname, ])
result_table = table(rows)
result_table.draw(80)
return rows
| [
"[email protected]"
] | |
9ac0f02a82eef6628f4229af2b726135e8012d50 | 60b48df762a515a734cfbedd7ca101df43f04824 | /python/ray/data/impl/pipeline_executor.py | 8fdaed897bb30c088d238f2afa41bdc8ad830696 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | LuBingtan/ray | a02b13c4dceab2b0d54870fd3abae5c11bae916e | 298742d7241681ee1f307ec0dd3cd7e9713a3c7d | refs/heads/master | 2023-03-05T16:32:35.596725 | 2022-06-05T23:21:53 | 2022-06-05T23:21:53 | 223,334,544 | 0 | 1 | Apache-2.0 | 2023-03-04T08:56:53 | 2019-11-22T06:01:51 | Python | UTF-8 | Python | false | false | 5,793 | py | from typing import Any, Callable, List, Optional, TYPE_CHECKING
import time
import concurrent.futures
import logging
import ray
from ray.data.context import DatasetContext
from ray.data.dataset import Dataset, T
from ray.data.impl.progress_bar import ProgressBar
from ray.data.impl import progress_bar
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from ray.data.dataset_pipeline import DatasetPipeline
def pipeline_stage(fn: Callable[[], Dataset[T]]) -> Dataset[T]:
# Force eager evaluation of all blocks in the pipeline stage. This
# prevents resource deadlocks due to overlapping stage execution (e.g.,
# task -> actor stage).
return fn().fully_executed()
class PipelineExecutor:
def __init__(self, pipeline: "DatasetPipeline[T]"):
self._pipeline: "DatasetPipeline[T]" = pipeline
self._stages: List[concurrent.futures.Future[Dataset[Any]]] = [None] * (
len(self._pipeline._optimized_stages) + 1
)
self._iter = iter(self._pipeline._base_iterable)
self._pool = concurrent.futures.ThreadPoolExecutor(
max_workers=len(self._stages)
)
self._stages[0] = self._pool.submit(
lambda n: pipeline_stage(n), next(self._iter)
)
if self._pipeline._length and self._pipeline._length != float("inf"):
length = self._pipeline._length
else:
length = 1
if self._pipeline._progress_bars:
self._bars = [
ProgressBar("Stage {}".format(i), length, position=i)
for i in range(len(self._stages))
]
else:
self._bars = None
def __del__(self):
for f in self._stages:
if f is not None:
f.cancel()
self._pool.shutdown(wait=False)
# Signal to all remaining threads to shut down.
with progress_bar._canceled_threads_lock:
for t in self._pool._threads:
if t.is_alive():
progress_bar._canceled_threads.add(t)
# Wait for 1s for all threads to shut down.
start = time.time()
while time.time() - start < 1:
self._pool.shutdown(wait=False)
if not [t for t in self._pool._threads if t.is_alive()]:
break
if [t for t in self._pool._threads if t.is_alive()]:
logger.info(
"Failed to shutdown all DatasetPipeline execution threads. "
"These threads will be destroyed once all current stages "
"complete or when the driver exits"
)
def __iter__(self):
return self
def __next__(self):
output = None
start = time.perf_counter()
while output is None:
if all(s is None for s in self._stages):
raise StopIteration
# Wait for any completed stages.
pending = [f for f in self._stages if f is not None]
ready, _ = concurrent.futures.wait(pending, timeout=0.1)
# Bubble elements down the pipeline as they become ready.
for i in range(len(self._stages))[::-1]:
is_last = i + 1 >= len(self._stages)
next_slot_free = is_last or self._stages[i + 1] is None
if not next_slot_free:
continue
slot_ready = self._stages[i] in ready
if not slot_ready:
continue
# Bubble.
result = self._stages[i].result()
if self._bars:
self._bars[i].update(1)
self._stages[i] = None
if is_last:
output = result
else:
self._stages[i + 1] = self._pool.submit(
lambda r, fn: pipeline_stage(lambda: fn(r)),
result,
self._pipeline._optimized_stages[i],
)
# Pull a new element for the initial slot if possible.
if self._stages[0] is None:
try:
self._stages[0] = self._pool.submit(
lambda n: pipeline_stage(n), next(self._iter)
)
except StopIteration:
pass
self._pipeline._stats.wait_time_s.append(time.perf_counter() - start)
self._pipeline._stats.add(output._plan.stats())
return output
@ray.remote(num_cpus=0)
class PipelineSplitExecutorCoordinator:
def __init__(
self,
pipeline: "DatasetPipeline[T]",
n: int,
splitter: Callable[[Dataset], "DatasetPipeline[T]"],
context: DatasetContext,
):
DatasetContext._set_current(context)
pipeline._optimize_stages()
self.executor = PipelineExecutor(pipeline)
self.n = n
self.splitter = splitter
self.cur_splits = [None] * self.n
def next_dataset_if_ready(self, split_index: int) -> Optional[Dataset[T]]:
# TODO(swang): This will hang if one of the consumers fails and is
# re-executed from the beginning. To make this fault-tolerant, we need
# to make next_dataset_if_ready idempotent.
# Pull the next dataset once all splits are fully consumed.
if all(s is None for s in self.cur_splits):
ds = next(self.executor)
self.cur_splits = self.splitter(ds)
assert len(self.cur_splits) == self.n, (self.cur_splits, self.n)
# Return the dataset at the split index once per split.
ret = self.cur_splits[split_index]
self.cur_splits[split_index] = None
return ret
def get_stats(self):
return self.executor._pipeline._stats
| [
"[email protected]"
] | |
2efa37dc9c262a28dbac42705bb7457067a8f29a | 40d978aa02335dd0cbab732dc4c8129aaf8590df | /term_sheet_generator_1173/settings.py | 8ae02a9064ae45703b98df27ec4632a848e41df7 | [] | no_license | crowdbotics-apps/term-sheet-generator-1173 | ab35fa1798295c7d8a1ada395cd875bb9a351b99 | 86c2995f7dfa1bfbe3f1a90e04f1ea23bbca105c | refs/heads/master | 2022-12-09T11:05:41.405844 | 2019-03-03T23:41:55 | 2019-03-03T23:41:55 | 173,643,543 | 0 | 0 | null | 2022-12-08T01:42:56 | 2019-03-03T23:40:17 | Python | UTF-8 | Python | false | false | 4,612 | py | """
Django settings for term_sheet_generator_1173 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eb$b28$q)bqagzdoqff=n8%l-#o+@&x+myqu*%jy%9q7@82j%*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'term_sheet_generator_1173.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'term_sheet_generator_1173.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"[email protected]"
] | |
099c4e2af0ac551a0ae579ae294358ef5b0e82a3 | 2dc8c387de8bf7a6bd7506bd128b9dfa2bc8a3e2 | /0x05-python-exceptions/6-main.py~ | df03a1652e5a941660b8acdf869fdf3a7c3c3f5a | [] | no_license | yasmineholb/holbertonschool-higher_level_programming | ab57f9aa718ad8ebeb927a49c007e0232b0125e6 | ee66c781e9421a0dbf03795707572d9d1183e42e | refs/heads/master | 2020-09-28T19:35:52.740417 | 2020-06-15T09:45:18 | 2020-06-15T09:45:18 | 226,847,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | #!/usr/bin/python3
raise_exception_msg = __import__('6-raise_exception_msg').raise_exception_msg
try:
raise_exception_msg("C is fun")
except NameErr
| [
"[email protected]"
] | ||
437ef66dc3ac0f7cf2e9efffac97408be8c83276 | 35109088e79989e8b0ca9d9ddadad1546eebc8e3 | /AB/linux2/day20/code/03_mylist.py | 1d6725b8fc9f0dfd48f26baa85172076a58f52d0 | [] | no_license | ABCmoxun/AA | d0f8e18186325bfd832b26f3b71027d1dc8255b2 | c2c4a5b6683555b5d6200730b789d6e655f64c7f | refs/heads/master | 2020-03-25T11:11:54.882204 | 2020-03-03T05:19:07 | 2020-03-03T05:19:07 | 143,722,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | # 02_mylist.py
# 此示例示意复合赋值算术运算符的重载
class MyList:
def __init__(self, iterable):
self.data = [x for x in iterable]
def __repr__(self):
return 'MyList(%r)' % self.data
def __add__(self, rhs):
print("__add__方法被调用")
return MyList(self.data + rhs.data)
def __iadd__(self, rhs):
print("__iadd__方法被调用")
self.data.extend(rhs.data)
return self
L1 = MyList([1, 2, 3])
L2 = MyList(range(4, 7))
print("id(L1) =", id(L1))
L1 += L2 # 相当于 L1 = L1 + L2
print('L1 =', L1)
print("id(L1) =", id(L1))
| [
"[email protected]"
] | |
4e0edaf1cd36c0f931aac8d2103253e56aa72e17 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binary_20200524142534.py | 267e8eb35d054308ed642c67fdebaceeb181260e | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | def solution(N):
print(N)
maximumCount = 0
number = format(9,"b")
print("wow",number)
s = [str(i) for i in number]
binary = int("".join(s))
intialNumber = None
lastNumber = None
totalCount = 0
print("binary",number)
for i in range(len(str(number))):
if intialNumber is not None:
print(intialNumber + int(number[i]))
if number[i] == 1:
if intialNumber is not None and maximumCount >0:
print("count",maximumCount)
else:
intialNumber = 1
maximumCount = 0
if number[i] == 0:
maximumCount +=1
# if i < len(number)-1:
# if number[i] == 0 and number[i+1] :
# lastNumber = 1
# if intialNumber is not None and lastNumber is not None and number[i] == 0:
# maximumCount = maximumCount + 1
# else:
# totalCount = maximumCount
# maximumCount = 0
print("total",totalCount)
solution(9) | [
"[email protected]"
] | |
78ffed87f713d3a8dc3b7f88d12dd15354d53019 | 11587d450eb274a4684a393bd1073ea7cf6e28bf | /codingtest/week13/sequential_sum.py | b370fa4b7d8f1ec52c12b19dc448e31e15d193df | [] | no_license | Greek-and-Roman-God/Athena | 3b45869806ea06812e3d26661b159294fb49593b | 73a3a24df9667402258bff038325fd31cca36cf1 | refs/heads/main | 2023-05-28T23:04:10.760717 | 2021-06-13T04:39:31 | 2021-06-13T04:39:31 | 308,242,740 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # 소수의 연속합
n=int(input())
temp=[True]*(n+1)
end=round(n**0.5)+1
for i in range(2,end):
if temp[i]:
for j in range(i+i, n+1, i):
temp[j]=False
prime=[i for i in range(2, n+1) if temp[i]]
cnt=0
for i in range(len(prime)):
seq_sum=0
idx=i
while seq_sum<=n and idx<=len(prime):
if seq_sum==n:
cnt+=1
break
if idx==len(prime):
break
seq_sum+=prime[idx]
idx+=1
print(cnt) | [
"[email protected]"
] | |
721e9268d352f8c85508d0063c8e61cf288831b9 | feda93b7c67b60759bbff2ffd7f7bb2b71ed4bde | /convertToBase7.py | 07bdb887ce8e9d15534b6541dbd7db599ef03175 | [] | no_license | unsortedtosorted/codeChallenges | fcc8d92cc568922f2eb3b492c530e2e93d0e95ab | de8f9e7a7c45e325ac0de43a4e1f711a7c6a0a0c | refs/heads/master | 2020-04-04T14:38:43.538723 | 2019-03-14T03:24:49 | 2019-03-14T03:24:49 | 156,006,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | """
7 : 10
7/7 --> div=1, rem=0
14 : 20
14/7 --> div=2, rem=0
100: 202
100/7 --> div=14, rem=2
14/7 --> div=2 , rem=0
151:304
152/7 --> div=22, rem=4
22 --> div=3, rem=2
"""
class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num<7 and num > -7:
return str(num)
rem=8
s=""
isNeg=False
if num<0:
num=num*(-1)
isNeg=True
while num>=7:
div=num/7
rem=num-div*7
num=div
s=str(rem)+s
s=str(div)+s
if isNeg:
s="-"+s
return (s)
| [
"[email protected]"
] | |
8483392f85338b9ab4a9e0aa3e6925ca401d927a | 256fbfbc34952d60ebb1ce5cf92ba107f8d3e905 | /backend/agalot_app_23898/urls.py | 1479158011d81c28eeb290da27286e3ffb93a350 | [] | no_license | crowdbotics-apps/agalot-app-23898 | 8a337647047d79589bd2533cf13dda168a776f8a | 1fd14f5a1dc70a8c436e8a19c45164a3407d4a9d | refs/heads/master | 2023-02-17T13:43:56.206704 | 2021-01-17T17:12:07 | 2021-01-17T17:12:07 | 330,443,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,668 | py | """agalot_app_23898 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("task.api.v1.urls")),
path("task/", include("task.urls")),
path("api/v1/", include("task_profile.api.v1.urls")),
path("task_profile/", include("task_profile.urls")),
path("api/v1/", include("tasker_business.api.v1.urls")),
path("tasker_business/", include("tasker_business.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
path("api/v1/", include("task_category.api.v1.urls")),
path("task_category/", include("task_category.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "agalot app"
admin.site.site_title = "agalot app Admin Portal"
admin.site.index_title = "agalot app Admin"
# swagger
api_info = openapi.Info(
title="agalot app API",
default_version="v1",
description="API documentation for agalot app App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
f5bf1d970d4fabb61a8831149bb32d29440852eb | a6ae216a1a5ed36562a7c25c63ff83dd7c146f70 | /vndk/tools/definition-tool/tests/test_elf_linker.py | dbdd95eef7d8f3979916964688112b3ba8cddb57 | [
"Apache-2.0"
] | permissive | cuizaixi/platform_development | 32dc08f7e403c82e3c74bfe0d9f0b7564d53047b | 68b3022d17d55c3b377a43e8c3023d6139e3ef5d | refs/heads/master | 2021-01-24T18:25:37.359722 | 2017-03-09T07:03:15 | 2017-03-09T07:03:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,219 | py | #!/usr/bin/env python3
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from compat import StringIO
from vndk_definition_tool import ELF, ELFLinker, PT_SYSTEM, PT_VENDOR
class GraphBuilder(object):
_PARTITION_NAMES = {
PT_SYSTEM: 'system',
PT_VENDOR: 'vendor',
}
_LIB_DIRS = {
ELF.ELFCLASS32: 'lib',
ELF.ELFCLASS64: 'lib64',
}
def __init__(self):
self.graph = ELFLinker()
def add_lib(self, partition, klass, name, dt_needed=[],
exported_symbols=set(), imported_symbols=set(),
extra_dir=None):
"""Create and add a shared library to ELFLinker."""
lib_dir = os.path.join('/', self._PARTITION_NAMES[partition],
self._LIB_DIRS[klass])
if extra_dir:
lib_dir = os.path.join(lib_dir, extra_dir)
path = os.path.join(lib_dir, name + '.so')
elf = ELF(klass, ELF.ELFDATA2LSB, dt_needed=dt_needed,
exported_symbols=exported_symbols,
imported_symbols=imported_symbols)
node = self.graph.add(partition, path, elf)
setattr(self, name + '_' + elf.elf_class_name, node)
return node
def add_multilib(self, partition, name, dt_needed=[],
exported_symbols=set(), imported_symbols=set(),
extra_dir=None):
"""Add 32-bit / 64-bit shared libraries to ELFLinker."""
return (
self.add_lib(partition, ELF.ELFCLASS32, name, dt_needed,
exported_symbols, imported_symbols, extra_dir),
self.add_lib(partition, ELF.ELFCLASS64, name, dt_needed,
exported_symbols, imported_symbols, extra_dir)
)
def resolve(self):
self.graph.resolve_deps()
class ELFLinkerTest(unittest.TestCase):
def _create_normal_graph(self):
gb = GraphBuilder()
gb.add_multilib(PT_SYSTEM, 'libdl',
exported_symbols={'dlclose', 'dlopen', 'dlsym'})
gb.add_multilib(PT_SYSTEM, 'libm', exported_symbols={'cos', 'sin'})
gb.add_multilib(PT_SYSTEM, 'libc', dt_needed=['libdl.so', 'libm.so'],
exported_symbols={'fclose', 'fopen', 'fread'},
imported_symbols={'dlclose', 'dlopen', 'cos', 'sin'})
gb.add_multilib(PT_SYSTEM, 'libRS', dt_needed=['libdl.so'],
exported_symbols={'rsContextCreate'},
imported_symbols={'dlclose', 'dlopen', 'dlsym'})
gb.add_multilib(PT_SYSTEM, 'libcutils',
dt_needed=['libc.so', 'libdl.so'],
imported_symbols={'dlclose', 'dlopen', 'fclose',
'fopen'})
gb.add_multilib(PT_VENDOR, 'libEGL',
dt_needed=['libc.so', 'libcutils.so', 'libdl.so'],
exported_symbols={'eglGetDisplay'},
imported_symbols={'fclose', 'fopen'})
gb.resolve()
return gb
def _get_paths_from_nodes(self, nodes):
return sorted([node.path for node in nodes])
def test_map_path_to_lib(self):
gb = self._create_normal_graph()
graph = gb.graph
node = graph.map_path_to_lib('/system/lib/libc.so')
self.assertEqual(gb.libc_32, node)
self.assertEqual('/system/lib/libc.so', node.path)
node = graph.map_path_to_lib('/system/lib64/libdl.so')
self.assertEqual(gb.libdl_64, node)
self.assertEqual('/system/lib64/libdl.so', node.path)
node = graph.map_path_to_lib('/vendor/lib64/libEGL.so')
self.assertEqual(gb.libEGL_64, node)
self.assertEqual('/vendor/lib64/libEGL.so', node.path)
self.assertEqual(None, graph.map_path_to_lib('/no/such/path.so'))
def test_map_paths_to_libs(self):
gb = self._create_normal_graph()
graph = gb.graph
bad = []
paths = ['/system/lib/libc.so', '/system/lib/libdl.so']
nodes = graph.map_paths_to_libs(paths, bad.append)
self.assertEqual([], bad)
self.assertEqual(2, len(nodes))
self.assertEqual(paths, self._get_paths_from_nodes(nodes))
bad = []
paths = ['/no/such/path.so', '/system/lib64/libdl.so']
nodes = graph.map_paths_to_libs(paths, bad.append)
self.assertEqual(['/no/such/path.so'], bad)
self.assertEqual(['/system/lib64/libdl.so'],
self._get_paths_from_nodes(nodes))
def test_elf_class(self):
gb = self._create_normal_graph()
graph = gb.graph
self.assertEqual(6, len(graph.lib32))
self.assertEqual(6, len(graph.lib64))
def test_partitions(self):
gb = self._create_normal_graph()
graph = gb.graph
self.assertEqual(10, len(gb.graph.lib_pt[PT_SYSTEM]))
self.assertEqual(2, len(gb.graph.lib_pt[PT_VENDOR]))
def test_deps(self):
gb = self._create_normal_graph()
graph = gb.graph
# Check the dependencies of libc.so.
node = gb.graph.map_path_to_lib('/system/lib/libc.so')
self.assertEqual(['/system/lib/libdl.so', '/system/lib/libm.so'],
self._get_paths_from_nodes(node.deps))
# Check the dependencies of libRS.so.
node = gb.graph.map_path_to_lib('/system/lib64/libRS.so')
self.assertEqual(['/system/lib64/libdl.so'],
self._get_paths_from_nodes(node.deps))
# Check the dependencies of libEGL.so.
node = gb.graph.map_path_to_lib('/vendor/lib64/libEGL.so')
self.assertEqual(['/system/lib64/libc.so', '/system/lib64/libcutils.so',
'/system/lib64/libdl.so'],
self._get_paths_from_nodes(node.deps))
def test_linked_symbols(self):
gb = self._create_normal_graph()
graph = gb.graph
# Check the unresolved symbols.
for lib_set in (graph.lib32, graph.lib64):
for lib in lib_set.values():
self.assertEqual(set(), lib.unresolved_symbols)
# Check the linked symbols.
for lib in ('lib', 'lib64'):
libdl = graph.map_path_to_lib('/system/' + lib + '/libdl.so')
libm = graph.map_path_to_lib('/system/' + lib + '/libm.so')
libc = graph.map_path_to_lib('/system/' + lib + '/libc.so')
libRS = graph.map_path_to_lib('/system/' + lib + '/libRS.so')
libcutils = \
graph.map_path_to_lib('/system/' + lib + '/libcutils.so')
libEGL = graph.map_path_to_lib('/vendor/' + lib + '/libEGL.so')
# Check the linked symbols for libc.so.
self.assertIs(libdl, libc.linked_symbols['dlclose'])
self.assertIs(libdl, libc.linked_symbols['dlopen'])
self.assertIs(libm, libc.linked_symbols['cos'])
self.assertIs(libm, libc.linked_symbols['sin'])
# Check the linked symbols for libRS.so.
self.assertIs(libdl, libRS.linked_symbols['dlclose'])
self.assertIs(libdl, libRS.linked_symbols['dlopen'])
self.assertIs(libdl, libRS.linked_symbols['dlsym'])
# Check the linked symbols for libcutils.so.
self.assertIs(libdl, libcutils.linked_symbols['dlclose'])
self.assertIs(libdl, libcutils.linked_symbols['dlopen'])
self.assertIs(libc, libcutils.linked_symbols['fclose'])
self.assertIs(libc, libcutils.linked_symbols['fopen'])
# Check the linked symbols for libEGL.so.
self.assertIs(libc, libEGL.linked_symbols['fclose'])
self.assertIs(libc, libEGL.linked_symbols['fopen'])
def test_unresolved_symbols(self):
gb = GraphBuilder()
gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64, 'libfoo', dt_needed=[],
exported_symbols={'foo', 'bar'},
imported_symbols={'__does_not_exist'})
gb.resolve()
lib = gb.graph.map_path_to_lib('/system/lib64/libfoo.so')
self.assertEqual({'__does_not_exist'}, lib.unresolved_symbols)
def test_users(self):
gb = self._create_normal_graph()
graph = gb.graph
# Check the users of libc.so.
node = graph.map_path_to_lib('/system/lib/libc.so')
self.assertEqual(['/system/lib/libcutils.so', '/vendor/lib/libEGL.so'],
self._get_paths_from_nodes(node.users))
# Check the users of libdl.so.
node = graph.map_path_to_lib('/system/lib/libdl.so')
self.assertEqual(['/system/lib/libRS.so', '/system/lib/libc.so',
'/system/lib/libcutils.so', '/vendor/lib/libEGL.so'],
self._get_paths_from_nodes(node.users))
# Check the users of libRS.so.
node = graph.map_path_to_lib('/system/lib64/libRS.so')
self.assertEqual([], self._get_paths_from_nodes(node.users))
# Check the users of libEGL.so.
node = graph.map_path_to_lib('/vendor/lib64/libEGL.so')
self.assertEqual([], self._get_paths_from_nodes(node.users))
def test_compute_vndk_stable(self):
gb = GraphBuilder()
# HIDL libraries.
gb.add_multilib(PT_SYSTEM, 'libhidlbase', extra_dir='vndk-stable')
gb.add_multilib(PT_SYSTEM, 'libhidltransport', extra_dir='vndk-stable')
gb.add_multilib(PT_SYSTEM, 'libhidlmemory', extra_dir='vndk-stable')
gb.add_multilib(PT_SYSTEM, 'libfmp', extra_dir='vndk-stable')
gb.add_multilib(PT_SYSTEM, 'libhwbinder', extra_dir='vndk-stable')
# UI libraries.
# TODO: Add libui.so here.
gb.resolve()
# Compute VNDK-stable.
vndk_stable = set(
lib.path for lib in gb.graph.compute_vndk_stable(False))
for lib in ('lib', 'lib64'):
# Check HIDL libraries.
self.assertIn('/system/' + lib + '/vndk-stable/libhidlbase.so',
vndk_stable)
self.assertIn('/system/' + lib + '/vndk-stable/libhidltransport.so',
vndk_stable)
self.assertIn('/system/' + lib + '/vndk-stable/libhidlmemory.so',
vndk_stable)
self.assertIn('/system/' + lib + '/vndk-stable/libfmp.so',
vndk_stable)
self.assertIn('/system/' + lib + '/vndk-stable/libhwbinder.so',
vndk_stable)
# TODO: Check libui.so here.
def test_compute_vndk_stable_closure(self):
gb = GraphBuilder()
libc = gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64, 'libc')
libhidlbase = gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64, 'libhidlbase',
dt_needed=['libfoo.so'],
extra_dir='vndk-stable')
libfoo = gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64, 'libfoo')
gb.resolve()
# Compute VNDK-stable.
vndk_stable = gb.graph.compute_vndk_stable(False)
vndk_stable_closure = gb.graph.compute_vndk_stable(True)
self.assertSetEqual({libhidlbase}, vndk_stable)
self.assertSetEqual({libhidlbase, libfoo}, vndk_stable_closure)
self.assertNotIn(libc, vndk_stable)
self.assertNotIn(libc, vndk_stable_closure)
def test_compute_sp_hal(self):
gb = GraphBuilder()
# HIDL SP-HAL implementation.
gb.add_multilib(PT_SYSTEM, 'gralloc.default', extra_dir='hw')
gb.add_multilib(PT_SYSTEM, 'gralloc.chipset', extra_dir='hw')
gb.add_multilib(PT_SYSTEM, '[email protected]',
extra_dir='hw')
# NDK loader libraries should not be considered as SP-HALs.
gb.add_multilib(PT_SYSTEM, 'libvulkan')
gb.add_multilib(PT_SYSTEM, 'libEGL')
gb.add_multilib(PT_SYSTEM, 'libGLESv1_CM')
gb.add_multilib(PT_SYSTEM, 'libGLESv2')
gb.add_multilib(PT_SYSTEM, 'libGLESv3')
# OpenGL implementation.
gb.add_multilib(PT_VENDOR, 'libEGL_chipset', extra_dir='egl')
gb.add_multilib(PT_VENDOR, 'libGLESv1_CM_chipset', extra_dir='egl')
gb.add_multilib(PT_VENDOR, 'libGLESv2_chipset', extra_dir='egl')
gb.add_multilib(PT_VENDOR, 'libGLESv3_chipset', extra_dir='egl')
# Renderscript implementation.
gb.add_multilib(PT_VENDOR, 'libRSDriver_chipset')
gb.add_multilib(PT_VENDOR, 'libPVRRS')
# Vulkan implementation.
gb.add_multilib(PT_VENDOR, 'vulkan.chipset', extra_dir='hw')
# Some un-related libraries.
gb.add_multilib(PT_SYSTEM, 'libfoo')
gb.add_multilib(PT_VENDOR, 'libfoo')
gb.resolve()
# Compute SP-HAL.
sp_hals = set(lib.path for lib in gb.graph.compute_sp_hal(set(), False))
for lib in ('lib', 'lib64'):
# Check HIDL SP-HAL implementation.
self.assertIn('/system/' + lib + '/hw/gralloc.default.so', sp_hals)
self.assertIn('/system/' + lib + '/hw/gralloc.chipset.so', sp_hals)
self.assertIn('/system/' + lib + '/hw/'
'[email protected]',
sp_hals)
# Check that NDK loaders are not SP-HALs.
self.assertNotIn('/system/' + lib + '/libvulkan.so', sp_hals)
self.assertNotIn('/system/' + lib + '/libEGL.so', sp_hals)
self.assertNotIn('/system/' + lib + '/libGLESv1_CM.so', sp_hals)
self.assertNotIn('/system/' + lib + '/libGLESv2.so', sp_hals)
self.assertNotIn('/system/' + lib + '/libGLESv3.so', sp_hals)
# Check that OpenGL implementations are SP-HALs.
self.assertIn('/vendor/' + lib + '/egl/libEGL_chipset.so', sp_hals)
self.assertIn('/vendor/' + lib + '/egl/libGLESv1_CM_chipset.so',
sp_hals)
self.assertIn('/vendor/' + lib + '/egl/libGLESv2_chipset.so',
sp_hals)
self.assertIn('/vendor/' + lib + '/egl/libGLESv3_chipset.so',
sp_hals)
# Check that Renderscript implementations are SP-HALs.
self.assertIn('/vendor/' + lib + '/libRSDriver_chipset.so', sp_hals)
self.assertIn('/vendor/' + lib + '/libPVRRS.so', sp_hals)
# Check that vulkan implementation are SP-HALs.
self.assertIn('/vendor/' + lib + '/libPVRRS.so', sp_hals)
# Check that un-related libraries are not SP-HALs.
self.assertNotIn('/system/' + lib + '/libfoo.so', sp_hals)
self.assertNotIn('/vendor/' + lib + '/libfoo.so', sp_hals)
def test_compute_sp_hal_closure(self):
gb = GraphBuilder()
libc = gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64, 'libc')
libhidlbase = gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64, 'libhidlbase')
libhidltransport = gb.add_lib(PT_SYSTEM, ELF.ELFCLASS64,
'libhidltransport')
gralloc_mapper = gb.add_lib(
PT_VENDOR, ELF.ELFCLASS64,
name='[email protected]',
dt_needed=['libhidlbase.so', 'libhidltransport.so',
'libc.so', 'gralloc_vnd.so'],
extra_dir='sameprocess')
gralloc_vnd = gb.add_lib(PT_VENDOR, ELF.ELFCLASS64, 'gralloc_vnd')
gb.resolve()
vndk_stable = {libhidlbase, libhidltransport}
sp_hal = gb.graph.compute_sp_hal(vndk_stable, closure=False)
sp_hal_closure = gb.graph.compute_sp_hal(vndk_stable, closure=True)
self.assertSetEqual({gralloc_mapper}, sp_hal)
self.assertSetEqual({gralloc_mapper, gralloc_vnd}, sp_hal_closure)
self.assertNotIn(libhidlbase, sp_hal_closure)
self.assertNotIn(libhidltransport, sp_hal_closure)
self.assertNotIn(libc, sp_hal_closure)
def test_find_existing_vndk(self):
gb = GraphBuilder()
libpng32_core, libpng64_core = \
gb.add_multilib(PT_SYSTEM, 'libpng', extra_dir='vndk-26')
libpng32_fwk, libpng64_fwk = \
gb.add_multilib(PT_SYSTEM, 'libpng', extra_dir='vndk-26-ext')
libjpeg32_core, libjpeg64_core = \
gb.add_multilib(PT_SYSTEM, 'libjpeg', extra_dir='vndk-26')
libjpeg32_vnd, libjpeg64_vnd = \
gb.add_multilib(PT_VENDOR, 'libjpeg', extra_dir='vndk-26-ext')
gb.resolve()
vndk_core, vndk_fwk_ext, vndk_vnd_ext = gb.graph.find_existing_vndk()
expected_vndk_core = {
libpng32_core, libpng64_core, libjpeg32_core, libjpeg64_core}
expected_vndk_fwk_ext = {libpng32_fwk, libpng64_fwk}
expected_vndk_vnd_ext = {libjpeg32_vnd, libjpeg64_vnd}
self.assertSetEqual(expected_vndk_core, vndk_core)
self.assertSetEqual(expected_vndk_fwk_ext, vndk_fwk_ext)
self.assertSetEqual(expected_vndk_vnd_ext, vndk_vnd_ext)
def test_find_existing_vndk_without_version(self):
gb = GraphBuilder()
libpng32_core, libpng64_core = \
gb.add_multilib(PT_SYSTEM, 'libpng', extra_dir='vndk')
libpng32_fwk, libpng64_fwk = \
gb.add_multilib(PT_SYSTEM, 'libpng', extra_dir='vndk-ext')
libjpeg32_core, libjpeg64_core = \
gb.add_multilib(PT_SYSTEM, 'libjpeg', extra_dir='vndk')
libjpeg32_vnd, libjpeg64_vnd = \
gb.add_multilib(PT_VENDOR, 'libjpeg', extra_dir='vndk-ext')
gb.resolve()
vndk_core, vndk_fwk_ext, vndk_vnd_ext = gb.graph.find_existing_vndk()
expected_vndk_core = {
libpng32_core, libpng64_core, libjpeg32_core, libjpeg64_core}
expected_vndk_fwk_ext = {libpng32_fwk, libpng64_fwk}
expected_vndk_vnd_ext = {libjpeg32_vnd, libjpeg64_vnd}
self.assertSetEqual(expected_vndk_core, vndk_core)
self.assertSetEqual(expected_vndk_fwk_ext, vndk_fwk_ext)
self.assertSetEqual(expected_vndk_vnd_ext, vndk_vnd_ext)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
df1d048f17f5a566209ef32349cf25f4c9e2f754 | be5ea20226c37d81f1ccb2f704d8825d36e88765 | /09. Decorators/EXERCISE/07_execution_time.py | 46206ea6d73977ddd8aef00da8cdfe89e906e747 | [] | no_license | dimDamyanov/PythonOOP | 3845e450e5a48fef4f70a186664e07c0cd60e09b | 723204f5b7e953874fac9314e48eb1d1628d6ff5 | refs/heads/main | 2023-04-07T18:00:36.735248 | 2021-04-19T20:57:14 | 2021-04-19T20:57:14 | 341,329,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import time
def exec_time(func):
def wrapper(*args):
start = time.time()
func(*args)
end = time.time()
return end - start
return wrapper
@exec_time
def loop(start, end):
total = 0
for x in range(start, end):
total += x
return total
print(loop(1, 10000000))
@exec_time
def concatenate(strings):
result = ""
for string in strings:
result += string
return result
print(concatenate(["a" for i in range(1000000)])) | [
"[email protected]"
] | |
162cb9aa4751a361be68509ebfa3fbe28df00f56 | dd6b0635021185bf29f20b5b49ab03f93ff841e3 | /BH_Mergers.py | 24b202c99314f23f6630a06b5e31721bdede2d65 | [] | no_license | sbustamante/Spinstractor | 27754ee5506b929fb51d6af35852c4780873e960 | d546010735fb698963b48b19a2e017060d13ef10 | refs/heads/master | 2021-01-22T02:05:00.969180 | 2018-10-30T09:13:39 | 2018-10-30T09:13:39 | 92,332,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | #========================================================================================
# LIBRARIES
#========================================================================================
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import h5py
import time
plt.close('all')
#========================================================================================
# PARAMETERS
#========================================================================================
#Data folder
DataFolder = '/home/bustamsn/bustamsn/cosmological_BH/Sims256/'
#Simulation
Simulation = 'cosmobh03'
#Number of chunks (same number of used processors)
N_proc = 256
#========================================================================================
# Extracting data
#========================================================================================
indexes = np.loadtxt('%s%s/analysis/BH_IDs.txt'%(DataFolder,Simulation))[:,[0,1]]
#os.system('rm tmp.txt')
for i in xrange(N_proc):
print 'In file', i
#Loading data
str_cmd = "less %s%s/output/blackhole_mergers/blackhole_mergers_%d.txt >> tmp.txt"%(DataFolder,Simulation,i)
os.system(str_cmd)
#Storing concatenated file
data = np.loadtxt('tmp.txt')
os.system('rm tmp.txt')
f= open('%s%s/analysis/BH_Mergers_R.txt'%(DataFolder,Simulation), 'a')
np.savetxt(f, data[:,1:], fmt='%e %d %e %d %e')
f.close()
#========================================================================================
# Applying correction to merger file
#========================================================================================
data_merger = np.loadtxt('%s%s/analysis/BH_Mergers_R.txt'%(DataFolder,Simulation))
M1 = []
for i in xrange(len(data_merger)):
time = data_merger[i,0]
id_mr = indexes[indexes[:,1]==data_merger[i,1].astype(int),0].astype(int)
if len(id_mr)==1:
try:
id_mr = id_mr[0]
#Loading data of current BH
data_BHi = np.loadtxt('%s%s/analysis/spins/BH_%d.txt'%(DataFolder,Simulation,id_mr))
mask_t = data_BHi[:,0] > time
M1.append( data_BHi[mask_t,2][0] - data_merger[i,4] )
except:
print i
M1.append( data_merger[i,2] )
else:
M1.append( data_merger[i,2] )
M1 = np.array(M1)
np.savetxt( '%s%s/analysis/BH_Mergers.txt'%(DataFolder,Simulation), np.array( [data_merger[:,0], data_merger[:,1], M1, data_merger[:,3], data_merger[:,4]] ).T, fmt='%e %d %e %d %e' )
| [
"[email protected]"
] | |
861261d5bcc093077a78e927032649deaa9cc080 | c89d50f7554f2eb551e5a493b7b9614c478a8ac2 | /R/prepare_data.py | 9f0a7a147a47fbfe77d3233e002876b1d03693e0 | [] | no_license | junnyhe/Code | c741d5f6b559ef0d4fbb37f8b67f5dd59d89b3d4 | b698e4831bc240dbc1229c21c71ceb88733aabd2 | refs/heads/master | 2020-04-11T09:44:22.451742 | 2015-08-30T02:28:51 | 2015-08-30T02:28:51 | 31,276,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,456 | py | import csv
import gzip
# change delimiter to "|"
def convert_data(infile_name,outfile_name):
# remove space in header, and strange characters in data
infile=gzip.open(infile_name,'rb')
incsv=csv.reader(infile)
outfile=open(outfile_name,'w')
outcsv=csv.writer(outfile,delimiter='|')
header=incsv.next()
header=[var.replace(" ","_").replace("-",".") for var in header]
outcsv.writerow(header)
for row in incsv:
outcsv.writerow([var.replace('"','').replace("'","").replace('\n','').replace('\r','').replace('#','apt') for var in row])
outfile.close()
infile_name='/Users/junhe/Documents/Data/Model_Data_Signal_Tmx/model_data_ds_ins_imp_woe.csv.gz'
outfile_name='/Users/junhe/Documents/Data/Model_Data_Signal_Tmx/model_data_ds_ins_imp_woe.csv'
convert_data(infile_name,outfile_name)
infile_name='/Users/junhe/Documents/Data/Model_Data_Signal_Tmx/model_data_ds_oos_imp_woe.csv.gz'
outfile_name='/Users/junhe/Documents/Data/Model_Data_Signal_Tmx/model_data_ds_oos_imp_woe.csv'
convert_data(infile_name,outfile_name)
def process_var_lsit(infile_name,outfile_name):
# remove space in variable names
infile=open(infile_name,'rU')
incsv=csv.reader(infile)
outfile=open(outfile_name,'w')
outcsv=csv.writer(outfile)
for row in incsv:
outcsv.writerow([var.replace(" ","_").replace("-",".") for var in row])
outfile.close()
infile_name='/Users/junhe/Documents/Results/Model_Results_Signal_Tmx/model_var_list_signal_tmxboth.csv'
outfile_name='/Users/junhe/Documents/Results/Model_Results_Signal_Tmx/model_var_list_signal_tmxboth_nospace.csv'
process_var_lsit(infile_name,outfile_name)
['payment_request_id', 'state', 'create_time', 'fs_payment_request_id', 'signal_1', 'signal_2', 'signal_4', 'signal_8', 'signal_9', 'signal_10', 'signal_11', 'signal_12', 'signal_13', 'signal_14', 'signal_15', 'signal_16', 'signal_17', 'signal_18', 'signal_19', 'signal_24', 'signal_25', 'signal_26', 'signal_27', 'signal_28', 'signal_29', 'signal_31', 'signal_33', 'signal_34', 'signal_35', 'signal_36', 'signal_37', 'signal_38', 'signal_39', 'signal_40', 'signal_41', 'signal_42', 'signal_43', 'signal_44', 'signal_45', 'signal_46', 'signal_47', 'signal_48', 'signal_49', 'signal_50', 'signal_58', 'signal_59', 'signal_61', 'signal_62', 'signal_63', 'signal_64', 'signal_65', 'signal_66', 'signal_67', 'signal_68', 'signal_69', 'signal_70', 'signal_71', 'signal_72', 'signal_73', 'signal_74', 'signal_75', 'signal_76', 'signal_77', 'signal_78', 'signal_79', 'signal_127', 'signal_128', 'signal_129', 'signal_140', 'signal_141', 'signal_142', 'signal_143', 'signal_144', 'signal_145', 'signal_146', 'signal_147', 'signal_148', 'signal_149', 'signal_150', 'signal_151', 'signal_152', 'signal_153', 'signal_154', 'signal_155', 'signal_156', 'signal_157', 'signal_158', 'signal_159', 'signal_160', 'signal_161', 'signal_162', 'signal_163', 'signal_164', 'signal_165', 'signal_166', 'signal_167', 'signal_168', 'signal_169', 'signal_170', 'signal_173', 'signal_174', 'signal_175', 'signal_176', 'signal_177', 'signal_178', 'signal_179', 'signal_180', 'signal_181', 'signal_182', 'signal_204', 'signal_228', 'signal_247', 'signal_248', 'signal_300', 'signal_301', 'signal_302', 'signal_303', 'signal_304', 'signal_305', 'signal_306', 'signal_307', 'signal_312', 'signal_313', 'signal_351', 'signal_352', 'signal_353', 'signal_354', 'signal_355', 'signal_356', 'signal_361', 'signal_362', 'signal_371', 'signal_400', 'signal_401', 'signal_402', 'signal_403', 'signal_404', 'signal_405', 'signal_406', 'signal_407', 'signal_408', 'signal_409', 'signal_410', 'signal_411', 'signal_412', 'signal_413', 'signal_414', 'signal_415', 'signal_416', 'signal_417', 'signal_418', 'signal_419', 'signal_420', 'signal_421', 'signal_422', 'signal_423', 'signal_424', 'signal_425', 'signal_426', 'signal_427', 'signal_428', 'signal_429', 'signal_500', 'signal_501', 'signal_503', 'signal_504', 'signal_505', 'signal_506', 'signal_507', 'signal_508', 'signal_509', 'signal_510', 'signal_511', 'signal_512', 'signal_513', 'signal_514', 'signal_515', 'signal_516', 'signal_517', 'signal_518', 'signal_519', 'signal_520', 'signal_521', 'signal_522', 'signal_523', 'signal_524', 'signal_525', 'signal_526', 'signal_527', 'signal_528', 'signal_529', 'signal_530', 'signal_531', 'signal_532', 'signal_533', 'signal_534', 'signal_535', 'signal_536', 'signal_537', 'signal_538', 'signal_539', 'signal_540', 'signal_541', 'signal_542', 'signal_543', 'signal_544', 'signal_545', 'signal_546', 'signal_547', 'signal_548', 'signal_560', 'signal_561', 'signal_570', 'signal_571', 'signal_580', 'signal_590', 'signal_591', 'signal_592', 'signal_593', 'signal_600', 'signal_601', 'signal_602', 'signal_603', 'signal_604', 'signal_605', 'signal_606', 'signal_607', 'signal_608', 'signal_611', 'signal_612', 'signal_613', 'signal_614', 'signal_615', 'signal_616', 'signal_617', 'signal_618', 'signal_100018', 'signal_100024', 'signal_100030', 'signal_100039', 'signal_100042', 'signal_100048', 'signal_100057', 'signal_100066', 'signal_100072', 'signal_100073', 'signal_100083', 'signal_100086', 'signal_100087', 'signal_100096', 'signal_100099', 'signal_100102', 'signal_100108', 'signal_100110', 'tmx_payer_account_address_assert_history', 'tmx_payer_account_address_city', 'tmx_payer_account_address_country', 'tmx_payer_account_address_first_seen', 'tmx_payer_account_address_last_event', 'tmx_payer_account_address_last_update', 'tmx_payer_account_address_result', 'tmx_payer_account_address_score', 'tmx_payer_account_address_state', 'tmx_payer_account_address_street1', 'tmx_payer_account_address_worst_score', 'tmx_payer_account_address_zip', 'tmx_payer_account_email', 'tmx_payer_account_email_activities', 'tmx_payer_account_email_assert_history', 'tmx_payer_account_email_attributes', 'tmx_payer_account_email_first_seen', 'tmx_payer_account_email_last_assertion', 'tmx_payer_account_email_last_event', 'tmx_payer_account_email_last_update', 'tmx_payer_account_email_result', 'tmx_payer_account_email_score', 'tmx_payer_account_email_worst_score', 'tmx_payer_account_login', 'tmx_payer_account_login_assert_history', 'tmx_payer_account_login_first_seen', 'tmx_payer_account_login_last_event', 'tmx_payer_account_login_last_update', 'tmx_payer_account_login_result', 'tmx_payer_account_login_score', 'tmx_payer_account_login_worst_score', 'tmx_payer_account_name', 'tmx_payer_account_name_activities', 'tmx_payer_account_name_assert_history', 'tmx_payer_account_name_attributes', 'tmx_payer_account_name_first_seen', 'tmx_payer_account_name_last_assertion', 'tmx_payer_account_name_last_event', 'tmx_payer_account_name_last_update', 'tmx_payer_account_name_result', 'tmx_payer_account_name_score', 'tmx_payer_account_name_worst_score', 'tmx_payer_agent_type', 'tmx_payer_Array', 'tmx_payer_browser_language', 'tmx_payer_browser_language_anomaly', 'tmx_payer_browser_string', 'tmx_payer_browser_string_anomaly', 'tmx_payer_browser_string_hash', 'tmx_payer_browser_string_mismatch', 'tmx_payer_cidr_number', 'tmx_payer_css_image_loaded', 'tmx_payer_custom_count_1', 'tmx_payer_custom_count_2', 'tmx_payer_custom_match_1', 'tmx_payer_custom_match_2', 'tmx_payer_custom_match_3', 'tmx_payer_custom_policy_score', 'tmx_payer_detected_fl', 'tmx_payer_device_activities', 'tmx_payer_device_assert_history', 'tmx_payer_device_attributes', 'tmx_payer_device_first_seen', 'tmx_payer_device_id', 'tmx_payer_device_id_confidence', 'tmx_payer_device_last_assertion', 'tmx_payer_device_last_event', 'tmx_payer_device_last_update', 'tmx_payer_device_match_result', 'tmx_payer_device_result', 'tmx_payer_device_score', 'tmx_payer_device_worst_score', 'tmx_payer_dns_ip', 'tmx_payer_dns_ip_city', 'tmx_payer_dns_ip_geo', 'tmx_payer_dns_ip_isp', 'tmx_payer_dns_ip_latitude', 'tmx_payer_dns_ip_longitude', 'tmx_payer_dns_ip_organization', 'tmx_payer_dns_ip_region', 'tmx_payer_enabled_ck', 'tmx_payer_enabled_fl', 'tmx_payer_enabled_im', 'tmx_payer_enabled_js', 'tmx_payer_error_detail', 'tmx_payer_event_type', 'tmx_payer_flash_anomaly', 'tmx_payer_flash_lang', 'tmx_payer_flash_os', 'tmx_payer_flash_system_state', 'tmx_payer_flash_version', 'tmx_payer_fuzzy_device_activities', 'tmx_payer_fuzzy_device_first_seen', 'tmx_payer_fuzzy_device_id', 'tmx_payer_fuzzy_device_id_confidence', 'tmx_payer_fuzzy_device_last_event', 'tmx_payer_fuzzy_device_last_update', 'tmx_payer_fuzzy_device_match_result', 'tmx_payer_fuzzy_device_result', 'tmx_payer_fuzzy_device_score', 'tmx_payer_fuzzy_device_worst_score', 'tmx_payer_headers_name_value_hash', 'tmx_payer_headers_order_string_hash', 'tmx_payer_honeypot_fingerprint', 'tmx_payer_honeypot_fingerprint_check', 'tmx_payer_honeypot_fingerprint_diff', 'tmx_payer_honeypot_fingerprint_match', 'tmx_payer_honeypot_unknown_diff', 'tmx_payer_http_os_signature', 'tmx_payer_http_referer', 'tmx_payer_http_referer_domain', 'tmx_payer_http_referer_domain_assert_history', 'tmx_payer_http_referer_domain_first_seen', 'tmx_payer_http_referer_domain_last_event', 'tmx_payer_http_referer_domain_last_update', 'tmx_payer_http_referer_domain_result', 'tmx_payer_http_referer_domain_score', 'tmx_payer_http_referer_domain_worst_score', 'tmx_payer_http_referer_url', 'tmx_payer_image_anomaly', 'tmx_payer_image_loaded', 'tmx_payer_js_browser_string', 'tmx_payer_js_fonts_hash', 'tmx_payer_js_fonts_number', 'tmx_payer_js_href_domain', 'tmx_payer_mime_type_hash', 'tmx_payer_mime_type_number', 'tmx_payer_multiple_session_id', 'tmx_payer_org_id', 'tmx_payer_os', 'tmx_payer_os_anomaly', 'tmx_payer_os_fonts_hash', 'tmx_payer_os_fonts_number', 'tmx_payer_page_time_on', 'tmx_payer_plugin_adobe_acrobat', 'tmx_payer_plugin_devalvr', 'tmx_payer_plugin_flash', 'tmx_payer_plugin_hash', 'tmx_payer_plugin_java', 'tmx_payer_plugin_number', 'tmx_payer_plugin_quicktime', 'tmx_payer_plugin_realplayer', 'tmx_payer_plugin_shockwave', 'tmx_payer_plugin_silverlight', 'tmx_payer_plugin_svg_viewer', 'tmx_payer_plugin_vlc_player', 'tmx_payer_plugin_windows_media_player', 'tmx_payer_policy', 'tmx_payer_policy_score', 'tmx_payer_profiled_domain', 'tmx_payer_profiled_domain_first_seen', 'tmx_payer_profiled_domain_last_event', 'tmx_payer_profiled_domain_last_update', 'tmx_payer_profiled_domain_result', 'tmx_payer_profiled_domain_score', 'tmx_payer_profiled_domain_worst_score', 'tmx_payer_profiled_url', 'tmx_payer_profiling_datetime', 'tmx_payer_proxy_ip', 'tmx_payer_proxy_ip_activities', 'tmx_payer_proxy_ip_assert_history', 'tmx_payer_proxy_ip_attributes', 'tmx_payer_proxy_ip_city', 'tmx_payer_proxy_ip_first_seen', 'tmx_payer_proxy_ip_geo', 'tmx_payer_proxy_ip_isp', 'tmx_payer_proxy_ip_last_assertion', 'tmx_payer_proxy_ip_last_event', 'tmx_payer_proxy_ip_last_update', 'tmx_payer_proxy_ip_latitude', 'tmx_payer_proxy_ip_longitude', 'tmx_payer_proxy_ip_organization', 'tmx_payer_proxy_ip_region', 'tmx_payer_proxy_ip_result', 'tmx_payer_proxy_ip_score', 'tmx_payer_proxy_ip_worst_score', 'tmx_payer_proxy_name', 'tmx_payer_proxy_type', 'tmx_payer_reason_code', 'tmx_payer_request_duration', 'tmx_payer_request_id', 'tmx_payer_request_result', 'tmx_payer_review_status', 'tmx_payer_risk_rating', 'tmx_payer_screen_aspect_ratio_anomaly', 'tmx_payer_screen_color_depth', 'tmx_payer_screen_dpi', 'tmx_payer_screen_res', 'tmx_payer_screen_res_anomaly', 'tmx_payer_service_type', 'tmx_payer_session_anomaly', 'tmx_payer_session_id', 'tmx_payer_summary_risk_score', 'tmx_payer_system_state', 'tmx_payer_tcp_os_signature', 'tmx_payer_timezone_offset_anomaly', 'tmx_payer_time_zone', 'tmx_payer_time_zone_dst_offset', 'tmx_payer_tmx_policy_score', 'tmx_payer_tmx_reason_code', 'tmx_payer_tmx_review_status', 'tmx_payer_tmx_risk_rating', 'tmx_payer_tmx_summary_reason_code', 'tmx_payer_transaction_amount', 'tmx_payer_transaction_currency', 'tmx_payer_transaction_id', 'tmx_payer_true_ip', 'tmx_payer_true_ip_activities', 'tmx_payer_true_ip_assert_history', 'tmx_payer_true_ip_attributes', 'tmx_payer_true_ip_city', 'tmx_payer_true_ip_first_seen', 'tmx_payer_true_ip_geo', 'tmx_payer_true_ip_isp', 'tmx_payer_true_ip_last_assertion', 'tmx_payer_true_ip_last_event', 'tmx_payer_true_ip_last_update', 'tmx_payer_true_ip_latitude', 'tmx_payer_true_ip_longitude', 'tmx_payer_true_ip_organization', 'tmx_payer_true_ip_region', 'tmx_payer_true_ip_result', 'tmx_payer_true_ip_score', 'tmx_payer_true_ip_worst_score', 'tmx_payer_ua_browser', 'tmx_payer_ua_mobile', 'tmx_payer_ua_os', 'tmx_payer_ua_platform', 'tmx_payer_unknown_session', 'tmx_payer_url_anomaly', 'tmx_payer_rs_ind_No reject status on Email - global - month', 'tmx_payer_rs_ind_No reject status on Exact ID - global - month', 'tmx_payer_rs_ind_New Exact ID', 'tmx_payer_rs_ind_Good Exact ID Age', 'tmx_payer_rs_ind_3IPLogins_inaDay', 'tmx_payer_rs_ind_NoDeviceID', 'tmx_payer_rs_ind_Global Email gt 500 dollars - month', 'tmx_payer_rs_ind_Global Exact ID gt 500 dollars - month', 'tmx_payer_rs_ind_3DifferentDeviceIDs_SameAccountEmailID_inaWeek', 'tmx_payer_rs_ind_3DifferentDeviceIDs_SameAccountLogin_inaWeek', 'tmx_payer_rs_ind_3_Emails_per_device', 'tmx_payer_rs_ind_Review Status', 'tmx_payer_rs_ind_Lang Mismatch', 'tmx_payer_rs_ind_Good global persona - month', 'tmx_payer_rs_ind_AccountAddress_Differentfrom_TrueGeo', 'tmx_payer_rs_ind_global email used mlt places -day', 'tmx_payer_rs_ind_ProxyIP_isHidden', 'tmx_payer_rs_ind_ProxyIPAddress_Risky_inReputationNetwork', 'tmx_payer_rs_ind_3DifferentAccountEmailIDs_SameDeviceID_inaDay', 'tmx_payer_rs_ind_3DifferentAccountLogins_SameDeviceID_inaDay', 'tmx_payer_rs_ind_3DifferentAccountEmailIDs_SameDeviceID_inaWeek', 'tmx_payer_rs_ind_3DifferentAccountLogins_SameDeviceID_inaWeek', 'tmx_payer_rs_ind_global device using mlt personas - day', 'tmx_payer_rs_ind_DeviceTrueGEO_Differentfrom_ProxyGeo', 'tmx_payer_rs_ind_Dial-up connection', 'tmx_payer_rs_ind_2Device_Creation_inanHour', 'tmx_payer_rs_ind_ProxyIP_isAnonymous', 'tmx_payer_rs_ind_3DifferentProxyIPs_SameDeviceID_inaDay', 'tmx_payer_rs_ind_PossibleCookieWiping', 'tmx_payer_rs_ind_3DeviceCreation_inaDay', 'merge_key', 'merge_ind', 'tmx_payee_account_address_assert_history', 'tmx_payee_account_address_city', 'tmx_payee_account_address_country', 'tmx_payee_account_address_first_seen', 'tmx_payee_account_address_last_event', 'tmx_payee_account_address_last_update', 'tmx_payee_account_address_result', 'tmx_payee_account_address_score', 'tmx_payee_account_address_state', 'tmx_payee_account_address_street1', 'tmx_payee_account_address_worst_score', 'tmx_payee_account_address_zip', 'tmx_payee_account_email', 'tmx_payee_account_email_activities', 'tmx_payee_account_email_assert_history', 'tmx_payee_account_email_attributes', 'tmx_payee_account_email_first_seen', 'tmx_payee_account_email_last_assertion', 'tmx_payee_account_email_last_event', 'tmx_payee_account_email_last_update', 'tmx_payee_account_email_result', 'tmx_payee_account_email_score', 'tmx_payee_account_email_worst_score', 'tmx_payee_account_login', 'tmx_payee_account_login_assert_history', 'tmx_payee_account_login_first_seen', 'tmx_payee_account_login_last_event', 'tmx_payee_account_login_last_update', 'tmx_payee_account_login_result', 'tmx_payee_account_login_score', 'tmx_payee_account_login_worst_score', 'tmx_payee_account_name', 'tmx_payee_account_name_activities', 'tmx_payee_account_name_assert_history', 'tmx_payee_account_name_attributes', 'tmx_payee_account_name_first_seen', 'tmx_payee_account_name_last_assertion', 'tmx_payee_account_name_last_event', 'tmx_payee_account_name_last_update', 'tmx_payee_account_name_result', 'tmx_payee_account_name_score', 'tmx_payee_account_name_worst_score', 'tmx_payee_agent_type', 'tmx_payee_Array', 'tmx_payee_browser_language', 'tmx_payee_browser_language_anomaly', 'tmx_payee_browser_string', 'tmx_payee_browser_string_anomaly', 'tmx_payee_browser_string_hash', 'tmx_payee_browser_string_mismatch', 'tmx_payee_cidr_number', 'tmx_payee_css_image_loaded', 'tmx_payee_custom_count_1', 'tmx_payee_custom_count_2', 'tmx_payee_custom_match_1', 'tmx_payee_custom_match_2', 'tmx_payee_custom_match_3', 'tmx_payee_custom_policy_score', 'tmx_payee_detected_fl', 'tmx_payee_device_activities', 'tmx_payee_device_assert_history', 'tmx_payee_device_attributes', 'tmx_payee_device_first_seen', 'tmx_payee_device_id', 'tmx_payee_device_id_confidence', 'tmx_payee_device_last_assertion', 'tmx_payee_device_last_event', 'tmx_payee_device_last_update', 'tmx_payee_device_match_result', 'tmx_payee_device_result', 'tmx_payee_device_score', 'tmx_payee_device_worst_score', 'tmx_payee_dns_ip', 'tmx_payee_dns_ip_city', 'tmx_payee_dns_ip_geo', 'tmx_payee_dns_ip_isp', 'tmx_payee_dns_ip_latitude', 'tmx_payee_dns_ip_longitude', 'tmx_payee_dns_ip_organization', 'tmx_payee_dns_ip_region', 'tmx_payee_enabled_ck', 'tmx_payee_enabled_fl', 'tmx_payee_enabled_im', 'tmx_payee_enabled_js', 'tmx_payee_error_detail', 'tmx_payee_event_type', 'tmx_payee_flash_anomaly', 'tmx_payee_flash_lang', 'tmx_payee_flash_os', 'tmx_payee_flash_system_state', 'tmx_payee_flash_version', 'tmx_payee_fuzzy_device_activities', 'tmx_payee_fuzzy_device_first_seen', 'tmx_payee_fuzzy_device_id', 'tmx_payee_fuzzy_device_id_confidence', 'tmx_payee_fuzzy_device_last_event', 'tmx_payee_fuzzy_device_last_update', 'tmx_payee_fuzzy_device_match_result', 'tmx_payee_fuzzy_device_result', 'tmx_payee_fuzzy_device_score', 'tmx_payee_fuzzy_device_worst_score', 'tmx_payee_headers_name_value_hash', 'tmx_payee_headers_order_string_hash', 'tmx_payee_honeypot_fingerprint', 'tmx_payee_honeypot_fingerprint_check', 'tmx_payee_honeypot_fingerprint_diff', 'tmx_payee_honeypot_fingerprint_match', 'tmx_payee_honeypot_unknown_diff', 'tmx_payee_http_os_signature', 'tmx_payee_http_referer', 'tmx_payee_http_referer_domain', 'tmx_payee_http_referer_domain_assert_history', 'tmx_payee_http_referer_domain_first_seen', 'tmx_payee_http_referer_domain_last_event', 'tmx_payee_http_referer_domain_last_update', 'tmx_payee_http_referer_domain_result', 'tmx_payee_http_referer_domain_score', 'tmx_payee_http_referer_domain_worst_score', 'tmx_payee_http_referer_url', 'tmx_payee_image_anomaly', 'tmx_payee_image_loaded', 'tmx_payee_js_browser_string', 'tmx_payee_js_fonts_hash', 'tmx_payee_js_fonts_number', 'tmx_payee_js_href_domain', 'tmx_payee_mime_type_hash', 'tmx_payee_mime_type_number', 'tmx_payee_multiple_session_id', 'tmx_payee_org_id', 'tmx_payee_os', 'tmx_payee_os_anomaly', 'tmx_payee_os_fonts_hash', 'tmx_payee_os_fonts_number', 'tmx_payee_page_time_on', 'tmx_payee_plugin_adobe_acrobat', 'tmx_payee_plugin_devalvr', 'tmx_payee_plugin_flash', 'tmx_payee_plugin_hash', 'tmx_payee_plugin_java', 'tmx_payee_plugin_number', 'tmx_payee_plugin_quicktime', 'tmx_payee_plugin_realplayer', 'tmx_payee_plugin_shockwave', 'tmx_payee_plugin_silverlight', 'tmx_payee_plugin_svg_viewer', 'tmx_payee_plugin_vlc_player', 'tmx_payee_plugin_windows_media_player', 'tmx_payee_policy', 'tmx_payee_policy_score', 'tmx_payee_profiled_domain', 'tmx_payee_profiled_domain_first_seen', 'tmx_payee_profiled_domain_last_event', 'tmx_payee_profiled_domain_last_update', 'tmx_payee_profiled_domain_result', 'tmx_payee_profiled_domain_score', 'tmx_payee_profiled_domain_worst_score', 'tmx_payee_profiled_url', 'tmx_payee_profiling_datetime', 'tmx_payee_proxy_ip', 'tmx_payee_proxy_ip_activities', 'tmx_payee_proxy_ip_assert_history', 'tmx_payee_proxy_ip_attributes', 'tmx_payee_proxy_ip_city', 'tmx_payee_proxy_ip_first_seen', 'tmx_payee_proxy_ip_geo', 'tmx_payee_proxy_ip_isp', 'tmx_payee_proxy_ip_last_assertion', 'tmx_payee_proxy_ip_last_event', 'tmx_payee_proxy_ip_last_update', 'tmx_payee_proxy_ip_latitude', 'tmx_payee_proxy_ip_longitude', 'tmx_payee_proxy_ip_organization', 'tmx_payee_proxy_ip_region', 'tmx_payee_proxy_ip_result', 'tmx_payee_proxy_ip_score', 'tmx_payee_proxy_ip_worst_score', 'tmx_payee_proxy_name', 'tmx_payee_proxy_type', 'tmx_payee_reason_code', 'tmx_payee_request_duration', 'tmx_payee_request_id', 'tmx_payee_request_result', 'tmx_payee_review_status', 'tmx_payee_risk_rating', 'tmx_payee_screen_aspect_ratio_anomaly', 'tmx_payee_screen_color_depth', 'tmx_payee_screen_dpi', 'tmx_payee_screen_res', 'tmx_payee_screen_res_anomaly', 'tmx_payee_service_type', 'tmx_payee_session_anomaly', 'tmx_payee_session_id', 'tmx_payee_summary_risk_score', 'tmx_payee_system_state', 'tmx_payee_tcp_os_signature', 'tmx_payee_timezone_offset_anomaly', 'tmx_payee_time_zone', 'tmx_payee_time_zone_dst_offset', 'tmx_payee_tmx_policy_score', 'tmx_payee_tmx_reason_code', 'tmx_payee_tmx_review_status', 'tmx_payee_tmx_risk_rating', 'tmx_payee_tmx_summary_reason_code', 'tmx_payee_transaction_amount', 'tmx_payee_transaction_currency', 'tmx_payee_transaction_id', 'tmx_payee_true_ip', 'tmx_payee_true_ip_activities', 'tmx_payee_true_ip_assert_history', 'tmx_payee_true_ip_attributes', 'tmx_payee_true_ip_city', 'tmx_payee_true_ip_first_seen', 'tmx_payee_true_ip_geo', 'tmx_payee_true_ip_isp', 'tmx_payee_true_ip_last_assertion', 'tmx_payee_true_ip_last_event', 'tmx_payee_true_ip_last_update', 'tmx_payee_true_ip_latitude', 'tmx_payee_true_ip_longitude', 'tmx_payee_true_ip_organization', 'tmx_payee_true_ip_region', 'tmx_payee_true_ip_result', 'tmx_payee_true_ip_score', 'tmx_payee_true_ip_worst_score', 'tmx_payee_ua_browser', 'tmx_payee_ua_mobile', 'tmx_payee_ua_os', 'tmx_payee_ua_platform', 'tmx_payee_unknown_session', 'tmx_payee_url_anomaly', 'tmx_payee_rs_ind_No reject status on Email - global - month', 'tmx_payee_rs_ind_No reject status on Exact ID - global - month', 'tmx_payee_rs_ind_New Exact ID', 'tmx_payee_rs_ind_Good Exact ID Age', 'tmx_payee_rs_ind_3IPLogins_inaDay', 'tmx_payee_rs_ind_NoDeviceID', 'tmx_payee_rs_ind_Global Email gt 500 dollars - month', 'tmx_payee_rs_ind_Global Exact ID gt 500 dollars - month', 'tmx_payee_rs_ind_3DifferentDeviceIDs_SameAccountEmailID_inaWeek', 'tmx_payee_rs_ind_3DifferentDeviceIDs_SameAccountLogin_inaWeek', 'tmx_payee_rs_ind_3_Emails_per_device', 'tmx_payee_rs_ind_Review Status', 'tmx_payee_rs_ind_Lang Mismatch', 'tmx_payee_rs_ind_Good global persona - month', 'tmx_payee_rs_ind_AccountAddress_Differentfrom_TrueGeo', 'tmx_payee_rs_ind_global email used mlt places -day', 'tmx_payee_rs_ind_ProxyIP_isHidden', 'tmx_payee_rs_ind_ProxyIPAddress_Risky_inReputationNetwork', 'tmx_payee_rs_ind_3DifferentAccountEmailIDs_SameDeviceID_inaDay', 'tmx_payee_rs_ind_3DifferentAccountLogins_SameDeviceID_inaDay', 'tmx_payee_rs_ind_3DifferentAccountEmailIDs_SameDeviceID_inaWeek', 'tmx_payee_rs_ind_3DifferentAccountLogins_SameDeviceID_inaWeek', 'tmx_payee_rs_ind_global device using mlt personas - day', 'tmx_payee_rs_ind_DeviceTrueGEO_Differentfrom_ProxyGeo', 'tmx_payee_rs_ind_Dial-up connection', 'tmx_payee_rs_ind_2Device_Creation_inanHour', 'tmx_payee_rs_ind_ProxyIP_isAnonymous', 'tmx_payee_rs_ind_3DifferentProxyIPs_SameDeviceID_inaDay', 'tmx_payee_rs_ind_PossibleCookieWiping', 'tmx_payee_rs_ind_3DeviceCreation_inaDay', 'merge_key', 'merge_ind', 'target', 'lo_tmx_payee_tmx_summary_reason_code', 'lo_tmx_payer_proxy_ip', 'lo_tmx_payer_enabled_js', 'lo_tmx_payer_screen_dpi', 'lo_tmx_payee_reason_code', 'lo_signal_355', 'lo_tmx_payer_ua_browser', 'lo_tmx_payee_fuzzy_device_result', 'lo_tmx_payee_account_email_result', 'lo_tmx_payee_proxy_ip_city', 'lo_tmx_payer_proxy_ip_last_event', 'lo_tmx_payee_flash_lang', 'lo_tmx_payee_enabled_im', 'lo_tmx_payee_proxy_ip', 'lo_tmx_payee_plugin_realplayer', 'lo_tmx_payee_plugin_adobe_acrobat', 'lo_signal_13', 'lo_tmx_payer_plugin_java', 'lo_tmx_payee_proxy_ip_result', 'lo_tmx_payer_screen_res', 'lo_signal_506', 'lo_tmx_payee_review_status', 'lo_tmx_payer_plugin_shockwave', 'lo_tmx_payee_transaction_amount', 'lo_tmx_payee_fuzzy_device_match_result', 'lo_tmx_payee_proxy_ip_last_assertion', 'lo_tmx_payee_proxy_type', 'lo_tmx_payer_account_login_result', 'lo_tmx_payer_plugin_windows_media_player', 'lo_tmx_payee_screen_dpi', 'lo_tmx_payer_true_ip_region', 'lo_tmx_payer_http_referer_domain_result', 'lo_tmx_payer_honeypot_fingerprint_check', 'lo_tmx_payee_true_ip_organization', 'lo_tmx_payee_account_address_zip', 'lo_tmx_payee_headers_name_value_hash', 'lo_tmx_payee_enabled_fl', 'lo_tmx_payee_account_address_state', 'lo_tmx_payee_tmx_reason_code', 'lo_tmx_payee_plugin_silverlight', 'lo_tmx_payer_request_result', 'lo_tmx_payee_proxy_ip_first_seen', 'lo_tmx_payer_event_type', 'lo_tmx_payer_device_result', 'lo_tmx_payee_device_assert_history', 'lo_tmx_payer_js_browser_string', 'lo_tmx_payee_request_result', 'lo_tmx_payee_proxy_ip_region', 'lo_tmx_payer_true_ip_geo', 'lo_tmx_payee_agent_type', 'lo_tmx_payer_mime_type_hash', 'lo_tmx_payer_proxy_ip_last_update', 'lo_tmx_payer_honeypot_fingerprint_diff', 'lo_tmx_payer_account_address_country', 'lo_tmx_payer_honeypot_fingerprint_match', 'lo_tmx_payer_ua_platform', 'lo_tmx_payee_js_browser_string', 'lo_tmx_payer_plugin_flash', 'lo_tmx_payee_enabled_ck', 'lo_tmx_payer_honeypot_fingerprint', 'lo_tmx_payee_plugin_shockwave', 'lo_tmx_payee_http_referer_domain', 'lo_tmx_payer_proxy_ip_city', 'lo_tmx_payee_account_login_result', 'lo_signal_600', 'lo_tmx_payee_honeypot_fingerprint_match', 'lo_tmx_payee_mime_type_hash', 'lo_tmx_payer_profiled_domain', 'lo_tmx_payer_http_os_signature', 'lo_tmx_payee_device_result', 'lo_tmx_payer_ua_os', 'lo_tmx_payee_account_address_country', 'lo_tmx_payer_account_email_assert_history', 'lo_tmx_payee_os_fonts_hash', 'lo_tmx_payer_device_match_result', 'lo_tmx_payer_proxy_ip_organization', 'lo_tmx_payer_honeypot_unknown_diff', 'lo_tmx_payer_enabled_ck', 'lo_tmx_payer_true_ip_organization', 'lo_tmx_payer_true_ip_city', 'lo_tmx_payer_device_attributes', 'lo_tmx_payee_honeypot_fingerprint', 'lo_signal_156', 'lo_tmx_payee_http_referer', 'lo_tmx_payee_true_ip_region', 'lo_tmx_payer_account_address_city', 'lo_tmx_payee_risk_rating', 'lo_tmx_payer_proxy_ip_geo', 'lo_tmx_payer_proxy_ip_last_assertion', 'lo_tmx_payee_account_name_result', 'lo_tmx_payee_account_email_assert_history', 'lo_tmx_payee_true_ip_geo', 'lo_signal_8', 'lo_tmx_payee_true_ip', 'lo_tmx_payee_account_address_result', 'lo_tmx_payee_headers_order_string_hash', 'lo_signal_2', 'lo_tmx_payee_ua_platform', 'lo_tmx_payer_fuzzy_device_result', 'lo_tmx_payee_proxy_ip_organization', 'lo_tmx_payer_os', 'lo_tmx_payer_enabled_fl', 'lo_signal_548', 'lo_tmx_payee_honeypot_fingerprint_diff', 'lo_signal_547', 'lo_tmx_payee_screen_res', 'lo_tmx_payee_flash_version', 'lo_tmx_payee_proxy_ip_last_event', 'lo_tmx_payer_plugin_silverlight', 'lo_tmx_payer_fuzzy_device_match_result', 'lo_tmx_payer_agent_type', 'lo_tmx_payee_proxy_ip_last_update', 'lo_tmx_payer_enabled_im', 'lo_tmx_payee_enabled_js', 'lo_tmx_payee_tcp_os_signature', 'lo_tmx_payee_proxy_ip_geo', 'lo_tmx_payee_account_address_city', 'lo_tmx_payer_plugin_adobe_acrobat', 'lo_tmx_payer_account_address_result', 'lo_tmx_payee_honeypot_fingerprint_check', 'lo_tmx_payer_device_assert_history', 'lo_tmx_payee_device_match_result', 'lo_tmx_payer_proxy_ip_result', 'lo_tmx_payer_account_name_result', 'lo_tmx_payer_mime_type_number', 'lo_tmx_payer_plugin_realplayer', 'lo_tmx_payer_headers_order_string_hash', 'lo_tmx_payee_true_ip_result', 'lo_tmx_payee_plugin_hash', 'lo_tmx_payee_os', 'lo_tmx_payee_honeypot_unknown_diff', 'lo_tmx_payer_flash_os', 'lo_tmx_payee_event_type', 'lo_tmx_payee_flash_os', 'lo_tmx_payee_http_referer_domain_result', 'lo_tmx_payee_plugin_windows_media_player', 'lo_tmx_payer_transaction_amount', 'lo_tmx_payee_profiled_domain', 'lo_tmx_payer_http_referer', 'lo_tmx_payer_account_email_attributes', 'lo_tmx_payer_account_email_result', 'lo_tmx_payer_flash_version', 'lo_tmx_payee_http_os_signature', 'lo_tmx_payer_risk_rating', 'lo_tmx_payee_account_email_attributes', 'lo_tmx_payer_http_referer_domain', 'lo_tmx_payer_account_address_state', 'lo_tmx_payee_mime_type_number', 'lo_tmx_payer_proxy_ip_isp', 'lo_signal_166', 'lo_tmx_payer_reason_code', 'lo_tmx_payee_device_attributes', 'lo_tmx_payer_account_address_zip', 'lo_tmx_payer_browser_language', 'lo_tmx_payer_plugin_quicktime', 'lo_tmx_payee_true_ip_isp', 'lo_tmx_payer_true_ip_result', 'lo_tmx_payer_true_ip_isp', 'lo_tmx_payee_true_ip_city', 'lo_tmx_payer_proxy_ip_first_seen', 'lo_tmx_payee_plugin_flash', 'lo_tmx_payee_browser_language', 'lo_tmx_payer_tcp_os_signature', 'lo_tmx_payee_plugin_java', 'lo_tmx_payer_review_status', 'lo_tmx_payer_flash_lang', 'lo_tmx_payee_plugin_quicktime', 'lo_tmx_payee_ua_os', 'lo_signal_177', 'lo_tmx_payee_ua_browser', 'lo_tmx_payee_proxy_ip_isp', 'lo_tmx_payer_proxy_ip_region', 'lo_tmx_payer_proxy_type']
| [
"[email protected]"
] | |
eb05a439ca3e8cb84a9dd47e7476548ed343a980 | 5db75589901eb5eb991d8efd5aa043fbd475179c | /leetcode/strStr.py | 6af411d87627e6fa6c68306c8bebf86d0cfe9c05 | [] | no_license | cizixs/playground | b15f503ddb7a406791443768b6325776c9fe6f22 | eea3668cc80d7f328359a56144a5344029e83b47 | refs/heads/master | 2021-01-22T02:53:09.981444 | 2014-10-26T07:19:17 | 2014-10-26T07:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | class Solution:
# @param haystack, a string
# @param needle, a string
# @return a string or None
def strStr(self, haystack, needle):
if len(haystack) < len(needle):
return None
if needle == "":
return haystack
result = haystack.find(needle)
if result < 0:
return None
return haystack[result:]
| [
"vagrant@precise64.(none)"
] | vagrant@precise64.(none) |
ed08064f64c5c4407c7c2f69b59101480452a297 | fa795af74cda4d92604fa3332179ba939460a9b5 | /JUBioactivities/QSARDB/Zhang_Property_logKMXa_Cuticular_polymer_matrix/__init__.py | adddf136cd274fcf5c3c41ccd4a71b4f7ab2e83c | [] | no_license | JenniferHemmerich/JUBioactivities | 7329a89db0e2790aff9bcfe153ab4dcd2c19a489 | 87054ac135d91e034dcfb6028562b4a7930a3433 | refs/heads/master | 2020-04-26T03:56:36.177955 | 2019-03-07T13:08:08 | 2019-03-07T13:08:08 | 173,284,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import os.path
import pandas as pd
from ... import utils
import glob
__data_src__ = [os.path.join(__path__[0], "compounds/InChI_from_XML.csv")]
__data_src__ += list(sorted(glob.glob(os.path.join(__path__[0], "properties/*.txt"))))
def read_data(raw=False):
dat = pd.read_csv(__data_src__[0], index_col=0)
prop = pd.read_csv(__data_src__[1], index_col=0, sep="\t")
prop.columns = ['logKMXa_Zhang']
df = pd.concat([dat, prop], axis=1)
df = df.set_index('InchiCode')
df = utils.index_from_inchicode(df)
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='cont')
return df
def read_structures(raw=False):
df = pd.read_csv(__data_src__[0], index_col=1).drop('ID', axis=1)
df = utils.index_from_inchicode(df, smiles=True)
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='str')
return df
| [
"[email protected]"
] | |
7c94c4246337922503c3c04bb85a88472f19f303 | 7caa2803db67f5c609865fe0f5c4d24bbbdb4afe | /leetcode/621-task-scheduler/main.py | ce57a5a949b68ada661a286438c646a3b5b02b77 | [] | no_license | ataul443/AlgoDaily | 106fd9e496ede30bfdf223ce54dcac2b14852815 | b8ae4f80cf162681aaff1ff8ed6e1e4d05f2010d | refs/heads/master | 2022-11-20T10:47:52.683016 | 2020-07-16T13:27:55 | 2020-07-16T13:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | import heapq
from collections import *
"""
1st approach: maxheap
- similar to lc358
1. count occurence for each task
2. in each iteration
- pop the tasks from maxheap n+1 times
- put the tasks back to the queue with decremented count
3. remove trailing '-'(idle)
Time O(nlog26) -> O(n)
Space O(n)
744 ms, faster than 26.51%
"""
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
# count occurence for each task
counter = Counter(tasks)
pq = []
for key in counter:
heapq.heappush(pq, (-counter[key], key))
res = []
while len(pq) > 0:
arr = []
# pop the tasks from maxheap
for i in range(n+1):
if len(pq) > 0:
pop = heapq.heappop(pq)
res.append(pop[1])
arr.append(pop)
else:
res.append("-")
# put the tasks back to the queue with decremented count
for count, key in arr:
if abs(count) > 1:
heapq.heappush(pq, (count+1, key))
# remove trailing '-'(idle)
while len(res) > 0:
if res[-1] == '-':
res.pop()
else:
break
# res is the list of tasks
return len(res)
print(Solution().leastInterval(["A", "A", "A", "B", "B", "B"], 2))
print(Solution().leastInterval(["A", "A", "A", "B", "B", "B", "C", "C"], 2))
print(Solution().leastInterval(
["A", "A", "A", "A", "A", "B", "B", "B", "C", "C", "C", "D", "D"], 2))
| [
"[email protected]"
] | |
06c0697c042852553ea8f2603afaca223ce2c5c1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_81/311.py | d316cf2c5a48f65d437f14d31ee4f5b5c78d65a5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | def crash():
assert 1==2
def percentage(arrayWins):
wins = arrayWins.count('1')
losses = arrayWins.count('0')
return float(wins)/(wins+losses)
def average(arrayWins):
return float(sum(arrayWins))/len(arrayWins)
fileLoc = '/Users/alarobric/Downloads/'
#fileLoc += 'A-small-attempt0'
#fileLoc += 'A-test'
fileLoc += 'A-large'
f = open(fileLoc+'.in', 'r')
g = open(fileLoc+'.out', 'w')
cases = int(f.readline())
for i in range (1, cases + 1):
N = int(f.readline())
#N teams
winPerc = []
winPercAgainst = [[] for j in range(N)]
owp = []
oowp = []
rpi = []
wins = []
for j in range(N):
line = [c for c in f.readline().strip()]
wins.append(line)
#print wins
for k, teamWins in enumerate(wins):
#print teamWins
winPerc.append(percentage(teamWins))
for j in range(N):
if teamWins[j] == '.':
winPercAgainst[j].append('.')
else:
tempTeamWins = teamWins[:]
tempTeamWins.pop(j)
#print "k", k, j, tempTeamWins, percentage(tempTeamWins)
winPercAgainst[j].append(percentage(tempTeamWins))
for winPercAgainstSub in winPercAgainst:
#print "a", winPercAgainstSub
for bob in range(winPercAgainstSub.count('.')):
winPercAgainstSub.remove('.')
owp.append(average(winPercAgainstSub))
for k, teamWins in enumerate(wins):
oowpTmp = []
for j in range(N):
if teamWins[j] == '1' or teamWins[j] == '0':
oowpTmp.append(owp[j])
oowp.append(average(oowpTmp))
rpi.append(0.25 * winPerc[k] + 0.50 * owp[k] + 0.25 * oowp[k])
#print "end"
#print winPerc
#print owp
#print oowp
#print rpi
output = "Case #" + str(i) + ": " + "\n"
for k in range(N):
output += str(rpi[k]) + "\n"
print output
g.write(output)
#2
#3
#.10
#0.1
#10.
#4
#.11.
#0.00
#01.1
#.10. | [
"[email protected]"
] | |
b02b6f4a8dad54e71517c5a53c23626326299904 | 1a36f8f77ca3d3093b51932f8df15b3c2fad3ae4 | /mlx/filesystem/local_filesystem.py | 599111e0fe82594c1cd126f3a05558c81109f986 | [
"Apache-2.0"
] | permissive | lewfish/mlx | 3c2477aa2fc53f5522e4e70975f0e430090c3024 | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | refs/heads/master | 2020-04-20T13:57:36.831202 | 2019-10-08T00:37:55 | 2019-10-08T00:37:55 | 168,884,084 | 0 | 0 | Apache-2.0 | 2019-10-08T00:37:56 | 2019-02-02T22:13:28 | Jupyter Notebook | UTF-8 | Python | false | false | 4,068 | py | import os
import shutil
from datetime import datetime, timezone
import glob
from mlx.filesystem import (FileSystem, NotReadableError)
def make_dir(path, check_empty=False, force_empty=False, use_dirname=False):
"""Make a local directory.
Args:
path: path to directory
check_empty: if True, check that directory is empty
force_empty: if True, delete files if necessary to make directory
empty
use_dirname: if path is a file, use the the parent directory as path
Raises:
ValueError if check_empty is True and directory is not empty
"""
directory = path
if use_dirname:
directory = os.path.abspath(os.path.dirname(path))
if force_empty and os.path.isdir(directory):
shutil.rmtree(directory)
os.makedirs(directory, exist_ok=True)
if check_empty and any(os.scandir(directory)):
raise ValueError(
'{} needs to be an empty directory!'.format(directory))
class LocalFileSystem(FileSystem):
@staticmethod
def matches_uri(uri: str, mode: str) -> bool:
return True
@staticmethod
def file_exists(uri: str, include_dir: bool = True) -> bool:
return (os.path.isfile(uri) or (include_dir and os.path.isdir(uri)))
@staticmethod
def read_str(file_uri: str) -> str:
if not os.path.isfile(file_uri):
raise NotReadableError('Could not read {}'.format(file_uri))
with open(file_uri, 'r') as file_buffer:
return file_buffer.read()
@staticmethod
def read_bytes(file_uri: str) -> bytes:
if not os.path.isfile(file_uri):
raise NotReadableError('Could not read {}'.format(file_uri))
with open(file_uri, 'rb') as file_buffer:
return file_buffer.read()
@staticmethod
def write_str(file_uri: str, data: str) -> None:
make_dir(file_uri, use_dirname=True)
with open(file_uri, 'w') as content_file:
content_file.write(data)
@staticmethod
def write_bytes(file_uri: str, data: bytes) -> None:
make_dir(file_uri, use_dirname=True)
with open(file_uri, 'wb') as content_file:
content_file.write(data)
@staticmethod
def sync_from_dir(src_dir_uri: str,
dest_dir_uri: str,
delete: bool = False) -> None:
if src_dir_uri == dest_dir_uri:
return
if delete:
shutil.rmtree(dest_dir_uri)
# https://stackoverflow.com/a/15824216/841563
def recursive_overwrite(src, dest):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
for entry in os.scandir(src):
recursive_overwrite(entry.path,
os.path.join(dest, entry.name))
else:
shutil.copyfile(src, dest)
recursive_overwrite(src_dir_uri, dest_dir_uri)
@staticmethod
def sync_to_dir(src_dir_uri: str, dest_dir_uri: str,
delete: bool = False) -> None:
LocalFileSystem.sync_from_dir(src_dir_uri, dest_dir_uri, delete)
@staticmethod
def copy_to(src_path: str, dst_uri: str) -> None:
if src_path != dst_uri:
make_dir(dst_uri, use_dirname=True)
shutil.copyfile(src_path, dst_uri)
@staticmethod
def copy_from(uri: str, path: str) -> None:
not_found = not os.path.isfile(path)
if not_found:
raise NotReadableError('Could not read {}'.format(uri))
@staticmethod
def local_path(uri: str, download_dir: str) -> None:
path = uri
return path
@staticmethod
def last_modified(uri: str) -> datetime:
local_last_modified = datetime.utcfromtimestamp(os.path.getmtime(uri))
return local_last_modified.replace(tzinfo=timezone.utc)
@staticmethod
def list_paths(uri, ext=None):
if ext is None:
ext = ''
return glob.glob(os.path.join(uri, '*' + ext))
| [
"[email protected]"
] | |
84d16dc699f540476901eb0935eba31f39b44c87 | cace862c1d95f6b85a9750a427063a8b0e5ed49c | /binaryapi/ws/chanels/statement.py | 1feab178b3ce9eabd3e3223fa3549dc1c82fe4f4 | [] | no_license | HyeongD/binaryapi | 65486532389210f1ca83f6f2098276ecf984702b | e8daa229c04de712242e8e9b79be3b774b409e35 | refs/heads/master | 2023-08-29T13:24:58.364810 | 2021-10-26T19:00:59 | 2021-10-26T19:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | """Module for Binary statement websocket channel."""
from binaryapi.ws.chanels.base import Base
from typing import Any, Optional, Union
from decimal import Decimal
# https://developers.binary.com/api/#statement
class Statement(Base):
"""Class for Binary statement websocket channel."""
name = "statement"
def __call__(self, action_type: Optional[str] = None, date_from: Optional[int] = None, date_to: Optional[int] = None, description: Optional[int] = None, limit: Optional[Union[int, float, Decimal]] = None, offset: Optional[Union[int, float, Decimal]] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):
"""Method to send message to statement websocket channel.
Statement (request)
Retrieve a summary of account transactions, according to given search criteria
:param action_type: [Optional] To filter the statement according to the type of transaction.
:type action_type: Optional[str]
:param date_from: [Optional] Start date (epoch)
:type date_from: Optional[int]
:param date_to: [Optional] End date (epoch)
:type date_to: Optional[int]
:param description: [Optional] If set to 1, will return full contracts description.
:type description: Optional[int]
:param limit: [Optional] Maximum number of transactions to receive.
:type limit: Optional[Union[int, float, Decimal]]
:param offset: [Optional] Number of transactions to skip.
:type offset: Optional[Union[int, float, Decimal]]
:param passthrough: [Optional] Used to pass data through the websocket, which may be retrieved via the `echo_req` output field.
:type passthrough: Optional[Any]
:param req_id: [Optional] Used to map request to response.
:type req_id: Optional[int]
"""
data = {
"statement": int(1)
}
if action_type:
data['action_type'] = str(action_type)
if date_from:
data['date_from'] = int(date_from)
if date_to:
data['date_to'] = int(date_to)
if description:
data['description'] = int(description)
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)
| [
"[email protected]"
] | |
b95757190f75c244f98cde51fd55a1f6010f586e | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/ecf3f471c55ae18f8a0bbf4c7170104a63eeceb8-<solve>-bug.py | e8fa2b2c9ec86842a0502cfe3dd6c00fb1c8305d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py | def solve(self):
'\n Runs the DifferentialEvolutionSolver.\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing,\n then OptimizeResult also contains the ``jac`` attribute.\n '
(nit, warning_flag) = (0, False)
status_message = _status_message['success']
if np.all(np.isinf(self.population_energies)):
self._calculate_population_energies()
for nit in range(1, (self.maxiter + 1)):
try:
next(self)
except StopIteration:
warning_flag = True
status_message = _status_message['maxfev']
break
if self.disp:
print(('differential_evolution step %d: f(x)= %g' % (nit, self.population_energies[0])))
convergence = self.convergence
if (self.callback and (self.callback(self._scale_parameters(self.population[0]), convergence=(self.tol / convergence)) is True)):
warning_flag = True
status_message = 'callback function requested stop early by returning True'
break
if ((convergence < self.tol) or warning_flag):
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(x=self.x, fun=self.population_energies[0], nfev=self._nfev, nit=nit, message=status_message, success=(warning_flag is not True))
if self.polish:
result = minimize(self.func, np.copy(DE_result.x), method='L-BFGS-B', bounds=self.limits.T, args=self.args)
self._nfev += result.nfev
DE_result.nfev = self._nfev
if (result.fun < DE_result.fun):
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result | [
"[email protected]"
] | |
d080d3e8d1b6f4511ea71577ee373dcddf86faf3 | 559e6745868358da529c2916180edc90b97f852a | /tests/test_connection_serial.py | b455ad3694f789772efba6de4bee9a93bb5f2feb | [
"MIT"
] | permissive | SwiftyMorgan/msl-equipment | 712290579e49682337548f8c4294907e9b56d5a3 | 56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c | refs/heads/master | 2020-04-21T14:49:23.902185 | 2019-01-18T18:43:16 | 2019-01-18T18:43:16 | 169,647,690 | 0 | 0 | MIT | 2019-02-07T22:08:37 | 2019-02-07T21:46:56 | Python | UTF-8 | Python | false | false | 1,958 | py | import os
import time
import threading
try:
import pty
except ImportError:
pty = None
import pytest
from msl.equipment import EquipmentRecord, ConnectionRecord, Backend, MSLConnectionError
@pytest.mark.skipif(pty is None, reason='pty is not available')
def test_connection_serial_read():
term = b'\r\n'
def echo_server(port):
while True:
data = bytearray()
while not data.endswith(term):
data.extend(os.read(port, 1))
if data.startswith(b'SHUTDOWN'):
break
os.write(port, data)
# simulate a Serial port
master, slave = pty.openpty()
thread = threading.Thread(target=echo_server, args=(master,))
thread.start()
time.sleep(0.5) # allow some time for the echo server to start
record = EquipmentRecord(
connection=ConnectionRecord(
address='ASRL::' + os.ttyname(slave),
backend=Backend.MSL,
properties={
'read_termination': term,
'write_termination': term,
'timeout': 25,
},
)
)
dev = record.connect()
assert dev.read_termination == term
assert dev.write_termination == term
dev.write('hello')
assert dev.read() == 'hello'
n = dev.write('hello')
assert dev.read(n) == 'hello' + term.decode()
dev.write('x'*4096)
assert dev.read() == 'x'*4096
n = dev.write('123.456')
with pytest.raises(MSLConnectionError):
dev.read(n+1)
with pytest.raises(MSLConnectionError):
dev.read(dev.max_read_size+1) # requesting more bytes than are maximally allowed
msg = 'a' * (dev.max_read_size - len(term))
dev.write(msg)
assert dev.read() == msg
dev.write(b'021.3' + term + b',054.2')
assert dev.read() == '021.3' # read until first `term`
assert dev.read() == ',054.2' # read until second `term`
dev.write('SHUTDOWN')
| [
"[email protected]"
] | |
fd8ba7457853c480f4536bb86ddc6b051a090e0a | 779291cb83ec3cab36d8bb66ed46b3afd4907f95 | /notebook/2020-02-25_gene_dists.py | fb81557f52dcb9ae4b74edaec0aa9f2c2a93caf2 | [] | no_license | Shengqian95/ncbi_remap | ac3258411fda8e9317f3cdf951cc909cc0f1946e | 3f2099058bce5d1670a672a69c13efd89d538cd1 | refs/heads/master | 2023-05-22T06:17:57.900135 | 2020-11-01T17:16:54 | 2020-11-01T17:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | # %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ncbi_remap import plotting
# %%
plt.style.use(("sra_talk", "sra"))
plt.style.use("sra")
# %%
gene_metadata = (
pd.read_feather(
"../../larval_gonad/references/gene_annotation_dmel_r6-26.feather",
columns=["FBgn", "gene_symbol"],
)
.set_index("FBgn")
.squeeze()
)
symbol2fbgn = {v: k for k, v in gene_metadata.items()}
# %%
gene_expression = pd.read_csv("../output/agg-rnaseq-wf/tpm_gene_counts.tsv", sep="\t", index_col=0)
# %%
def zscore(x):
return (x - x.mean()) / x.std()
# %%
dsx = zscore(gene_expression[symbol2fbgn["dsx"]]).rename("dsx")
pd.cut(dsx, bins=4, labels=["low", "low-mid", "mid-high", "high"]).value_counts().map(lambda x: f"{x:,}").to_frame()
# %%
pd.cut(dsx, bins=4, labels=["low", "low-mid", "mid-high", "high"]).pipe(lambda x: x[x == "high"]).index.tolist()
# %%
ax = sns.kdeplot(dsx)
ax.legend_.remove()
ax.set(ylabel="Density", xlabel="Z-score (TPM)")
sns.despine(ax=ax, left=True, right=True)
ax.set_title("dsx", fontstyle="italic") | [
"[email protected]"
] | |
f6090b4123803ea0f46d3530ec0f174c8b4fa349 | c4e9b3e5686ed8c6e885aa9f6a72a571f4b33db6 | /matplotlib_study/multiplot.py | 1d61e196cd0bb3e007ee0e6d62a3ce621c044f01 | [] | no_license | WhiteCri/learn-advanced-python | c2a081db0f901bb76e470341497014b4384ba803 | 8111cb12e8b05a9168a0236e05f4a6a1cda255eb | refs/heads/master | 2023-04-13T22:55:49.060522 | 2021-04-28T05:00:49 | 2021-04-28T05:00:49 | 345,628,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-3, 3, 30)
y = x**2
plt.plot(x, y)
plt.show()
x = np.linspace(-3, 3, 30)
y = x ** 2
plt.plot(x, y, 'r.')
plt.show()
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x), 'r-')
plt.plot(x, -np.sin(x), 'g--')
plt.show() | [
"[email protected]"
] | |
a64f61789af45f794ceb817f356e38ac2bcfa265 | 12a6f15a0ed1af285459ea61707984c0a4fd368d | /code/python_utilities_py3/irrigation_hash_control_py3.py | 24d9d2e7bcde5e8236fe9d6b2f1f0b511c9663d0 | [
"MIT"
] | permissive | NanoDataCenter/nano_data_center | 66ec84ef8f78a6d35238bd18c71bb774b38b37db | 76ad521e1a5139a37df80214af1413d2fd4ade60 | refs/heads/master | 2022-12-12T13:58:04.761671 | 2020-09-18T17:33:32 | 2020-09-18T17:33:32 | 119,457,568 | 2 | 2 | MIT | 2022-12-06T19:37:49 | 2018-01-29T23:58:26 | C | UTF-8 | Python | false | false | 8,570 | py |
import time
import redis
import json
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from redis_support_py3.graph_query_support_py3 import Query_Support
class Generate_Hash_Control_Handler():
def __init__(self,redis_site_data):
qs = Query_Support( redis_server_ip = redis_site_data["host"], redis_server_port=redis_site_data["port"] )
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site_data["site"] )
query_list = qs.add_match_terminal( query_list,
relationship = "PACKAGE", property_mask={"name":"IRRIGATION_CONTROL_MANAGEMENT"} )
package_sets, package_sources = qs.match_list(query_list)
package = package_sources[0]
data_structures = package["data_structures"]
generate_handlers = Generate_Handlers(package,redis_site_data)
self.handler = generate_handlers.construct_hash(data_structures["IRRIGATION_CONTROL"])
self.access_handler = {}
self.access_handler["RAIN_FLAG"] = self.set_rain_flag
self.access_handler["ETO_MANAGEMENT"] = self.set_eto_management_flag
self.access_handler["FLOW_CUT_OFF"] = self.set_flow_cutoff
self.access_handler["CLEANING_INTERVAL"] = self.set_cleaning_interval
self.access_handler["MASTER_VALVE"] = self.set_master_valve
self.access_handler["CLEANING_VALVE"] = self.set_cleaning_valve
self.access_handler["MASTER_VALVE_SETUP"] = self.set_master_valve_setup
self.access_handler["SCHEDULE_NAME"] = self.set_schedule_name
self.access_handler["STEP"] = self.set_step_number
self.access_handler["RUN_TIME"] = self.set_run_time
self.access_handler["ELASPED_TIME"] = self.set_elasped_time
self.access_handler["TIME_STAMP"] = self.set_time_stamp
self.access_handler["SUSPEND"] = self.set_suspend
def get_redis_handle(self):
return self.redis_handle
def get_all(self):
return self.handler.hgetall()
def set_field(self,field,data):
try:
self.access_handler[field](data)
except:
raise ValueError("Unrecognized field "+field)
def clear_json_object(self):
self.set_field("SCHEDULE_NAME","OFFLINE")
self.set_field("STEP",0)
self.set_field("RUN_TIME",0)
self.set_field("ELASPED_TIME",0)
def update_json_object(self,json_object):
self.set_field("SCHEDULE_NAME",json_object["schedule_name"])
self.set_field("STEP",json_object["step"])
try:
self.set_field("RUN_TIME",json_object["run_time"])
except:
self.set_field("RUN_TIME",0)
try:
self.set_field("ELASPED_TIME",json_object["elasped_time"])
except:
self.set_field("ELASPED_TIME",0)
def get_rain_flag(self):
temp = self.handler.hget("RAIN_FLAG")
if (temp == 0) or (temp == 1 ):
return temp
self.handler.hset("RAIN_FLAG",0)
return 0
def set_rain_flag(self,data):
data = int(data)
if (data == 0) or (data == 1 ):
self.handler.hset("RAIN_FLAG",data)
else:
self.handler.hset("RAIN_FLAG",0)
return
def get_eto_management_flag(self):
temp = self.handler.hget("ETO_MANAGEMENT")
if (temp == 0) or (temp == 1 ):
return temp
self.handler.hset("ETO_MANAGEMENT",1)
return 0
def set_eto_management_flag(self,data):
data = int(data)
if (data == 0) or (data == 1 ):
self.handler.hset("ETO_MANAGEMENT",data)
else:
self.handler.hset("ETO_MANAGEMENT",1)
return
def get_flow_cutoff(self):
data = self.handler.hget("FLOW_CUT_OFF")
try:
data = float(data)
except:
data = 30
self.handler.hset("FLOW_CUT_OFF",data)
return data
def set_flow_cutoff(self,data):
try:
data = float(data)
except:
data = 30
self.handler.hset("FLOW_CUT_OFF",data)
def get_cleaning_interval(self):
data = self.handler.hget("CLEANING_INTERVAL")
try:
data = float(data)
except:
data = 15000
self.handler.hset("CLEANING_INTERVAL",data)
return data
def set_cleaning_interval(self,data):
try:
data = float(data)
except:
data = 15000
self.handler.hset("CLEANING_INTERVAL",data)
def get_master_valve(self):
temp = self.handler.hget("MASTER_VALVE")
if (temp == 0) or (temp == 1 ):
return temp
self.handler.hset("MASTER_VALVE",0)
return 0
def set_master_valve(self,data):
data = int(data)
if (data == 0) or (data == 1 ):
self.handler.hset("MASTER_VALVE",data)
else:
self.handler.hset("MASTER_VALVE",0)
return
def get_cleaning_valve(self):
temp = self.handler.hget("CLEANING_VALVE")
if (temp == 0) or (temp == 1 ):
return temp
self.handler.hset("CLEANING_VALVE",0)
return 0
def set_cleaning_valve(self,data):
data = int(data)
if (data == 0) or (data == 1 ):
self.handler.hset("CLEANING_VALVE",data)
else:
self.handler.hset("CLEANING_VALVE",0)
return
def get_master_valve_setup(self):
temp = self.handler.hget("MASTER_VALVE_SETUP")
if (temp == 0) or (temp == 1 ):
return temp
self.handler.hset("MASTER_VALVE_SETUP",0)
return 0
def set_master_valve_setup(self,data):
data = int(data)
if (data == 0) or (data == 1 ):
self.handler.hset("MASTER_VALVE_SETUP",data)
else:
self.handler.hset("MASTER_VALVE_SETUP",0)
return
def get_run_time(self):
temp = self.handler.hget("RUN_TIME")
try:
temp = int(temp)
except:
temp = 0
self.handler.hset("RUN_TIME",temp)
return temp
def set_run_time(self,data):
data = int(data)
self.handler.hset("RUN_TIME",data)
return
def get_step_number(self):
temp = self.handler.hget("STEP")
try:
temp = int(temp)
except:
temp = 0
self.handler.hset("STEP",temp)
return temp
def set_step_number(self,data):
data = int(data)
self.handler.hset("STEP",data)
return
def get_schedule_name(self):
temp = self.handler.hget("SCHEDULE_NAME")
if temp != None:
return str(temp)
else:
return ""
def set_schedule_name(self,data):
if data != None:
self.handler.hset("SCHEDULE_NAME",str(data))
else:
self.handler.hset("SCHEDULE_NAME","OFFLINE")
return
def get_elasped_time(self):
temp = self.handler.hget("STEP")
try:
temp = int(temp)
except:
temp = 0
self.handler.hset("ELASPED_TIME",temp)
return temp
def set_elasped_time(self,data):
data = int(data)
self.handler.hset("ELASPED_TIME",data)
return
def get_time_stamp(self):
temp = self.handler.hget("TIME_STAMP")
try:
temp = float(temp)
except:
temp = time.time()
self.handler.hset("TIME_STAMP",temp)
return temp
def set_time_stamp(self,data):
data = float(data)
self.handler.hset("TIME_STAMP",data)
return
def get_suspend(self):
temp = self.handler.hget("SUSPEND")
if (temp == 0) or (temp == 1 ):
return temp
self.handler.hset("SUSPEND",0)
return 0
def set_suspend(self,data):
data = int(data)
if (data == 0) or (data == 1 ):
self.handler.hset("SUSPEND",data)
else:
self.handler.hset("SUSPEND",0)
return
| [
"[email protected]"
] | |
5e6416fa20b7c75266a35c0f033ba4e3ad7dab6e | 20a0bd0a9675f52d4cbd100ee52f0f639fb552ef | /config/urls/admin.py | aef939fc4dc34e06fb39b702a80393a1c2a7734d | [] | no_license | yx20och/bods | 2f7d70057ee9f21565df106ef28dc2c4687dfdc9 | 4e147829500a85dd1822e94a375f24e304f67a98 | refs/heads/main | 2023-08-02T21:23:06.066134 | 2021-10-06T16:49:43 | 2021-10-06T16:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,837 | py | from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from transit_odp.common.utils.custom_error_handlers import (
page_not_found,
permission_denied,
)
from transit_odp.common.views import ComingSoonView
from transit_odp.site_admin.urls import (
account_paths,
agent_paths,
consumer_paths,
metrics_paths,
organisation_paths,
)
from transit_odp.site_admin.views import AdminHomeView
from transit_odp.users.views.auth import InviteOnlySignupView
urlpatterns = [
path("", AdminHomeView.as_view(), name="home"),
path(
"metrics/",
include(metrics_paths),
),
path("coming_soon/", ComingSoonView.as_view(), name="placeholder"),
path(
"",
include(
(
[
path("consumers/", include(consumer_paths)),
path("organisations/", include(organisation_paths)),
path("agents/", include(agent_paths)),
# Put account routes here so they share the users namespace
path("account/", include(account_paths)),
],
"users",
),
# Note need to add users namespace to be compatible with other service
namespace="users",
),
),
path(
"account/",
include(
[
# override signup view with invited only signup page
path(
"signup/",
view=InviteOnlySignupView.as_view(),
name="account_signup",
),
path("", include("config.urls.allauth")),
]
),
),
path("invitations/", include("config.urls.invitations", namespace="invitations")),
# Django Admin, use {% url 'admin:index' %}
# TODO - host route on Admin service
path(settings.ADMIN_URL, admin.site.urls),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| [
"[email protected]"
] | |
6765e6eccaa7845dd1dfbcce37ca3e18dfc8895e | 8f0c5cb4938cffb0fb931c9bed6ce3e74d63e342 | /trydjango/settings.py | 932475114b7fe2005eb83556f701b6668ca2e373 | [] | no_license | Mehedi2885/trydjango2 | acce9b3643769759c62fbc6475c6a3e68b162f1f | e1bfe3c14ef2df573d062b60a4a671d74103717b | refs/heads/master | 2022-04-22T20:14:59.813475 | 2020-04-28T20:11:20 | 2020-04-28T20:11:20 | 257,658,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | """
Django settings for trydjango project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7c_++!-d6=3lx(o^x5g0m(=nc*-ppx%&&l0r27fh#g99_br4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
'pages',
'blog',
'courses',
'modelQuerySet',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trydjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trydjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django',
'USER': 'root',
'PASSWORD': 'Hassan2885',
'HOST': 'localhost',
'PORT': '3306',
'OPTIONS': {
'autocommit': True,
},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
f9fedff9e4d783a6823b9fb25ad4aa897ee8700d | 556e88a954cf031460ea7fdf3791eb968ca4fbdd | /fluent_python/chapter_10/ch10_vector_v3.py | 6491b78aa2e5ea74affc5864f33b6aa2955b4065 | [] | no_license | feng-hui/python_books_examples | c696243fcb8305be495f44d1a88a02e7f906b7bd | e38542db7be927cdaa5d85317a58a13b3a13ae25 | refs/heads/master | 2022-03-07T00:37:29.311687 | 2019-09-28T15:15:20 | 2019-09-28T15:15:20 | 122,941,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @time : 2018-11-09 21:44
# @author : feng_hui
# @email : [email protected]
import reprlib
from array import array
from math import sqrt
import numbers
class Vector(object):
type_code = 'd'
shortcut_names = 'xyzt'
def __init__(self, components):
self._components = array(self.type_code, components)
def __iter__(self):
return iter(self._components)
def __len__(self):
return len(self._components)
def __getitem__(self, item):
cls = type(self)
if isinstance(item, slice):
return cls(self._components[item])
elif isinstance(item, numbers.Integral):
return self._components[item]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
def __getattr__(self, item):
cls = type(self)
if len(item) == 1:
pos = cls.shortcut_names.find(item)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
return AttributeError(msg.format(cls, item))
def __setattr__(self, key, value):
cls = type(self)
if len(key) == 1:
if key in cls.shortcut_names:
error = 'readonly attribute {attr_name!r}'
elif key.islower():
error = "can't set attribute 'a' to 'z' in {attr_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name=cls.__name__, attr_name=key)
raise AttributeError(msg)
super().__setattr__(key, value)
def __repr__(self):
"""
string
:return: if len(string) > 30, return string[:13] + '...' + string[14:]
"""
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (
bytes([ord(self.type_code)]) + bytes(self._components)
)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return sqrt(sum(x * x for x in self._components))
def __bool__(self):
return bool(abs(self))
@classmethod
def from_bytes(cls, octets):
type_code = chr(octets[0])
memv = memoryview(octets[1:]).cast(type_code)
return cls(memv)
| [
"[email protected]"
] | |
17ec92e3ac05477b8868ff0e22788a7ce32eae7b | bf683eb4a6080cf67669de90d1afdad53fccb738 | /Lib/site-packages/django/contrib/auth/views.py | 49fdda6e3b9e67e8b78fa213022329bba45d3e69 | [
"MIT"
] | permissive | mspgeek/Client_Portal | cd513308840aa4203554ebc1160f17f0dd4b17cf | 0267168bb90e8e9c85aecdd715972b9622b82384 | refs/heads/master | 2023-03-07T21:33:22.767108 | 2020-04-08T01:43:19 | 2020-04-08T01:43:19 | 253,946,635 | 6 | 0 | MIT | 2022-12-31T07:01:43 | 2020-04-08T00:43:07 | HTML | UTF-8 | Python | false | false | 22,536 | py | import warnings
from urllib.parse import urlparse, urlunparse
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
UserModel = get_user_model()
class SuccessURLAllowedHostsMixin:
success_url_allowed_hosts = set()
def get_success_url_allowed_hosts(self):
allowed_hosts = {self.request.get_host()}
allowed_hosts.update(self.success_url_allowed_hosts)
return allowed_hosts
class LoginView(SuccessURLAllowedHostsMixin, FormView):
"""
Display the login form and handle the login action.
"""
form_class = AuthenticationForm
authentication_form = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
redirect_authenticated_user = False
extra_context = None
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ''
def get_form_class(self):
return self.authentication_form or self.form_class
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
self.redirect_field_name: self.get_redirect_url(),
'site': current_site,
'site_name': current_site.name,
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
extra_context=None, redirect_authenticated_user=False):
warnings.warn(
'The login() view is superseded by the class-based LoginView().',
RemovedInDjango21Warning, stacklevel=2
)
return LoginView.as_view(
template_name=template_name,
redirect_field_name=redirect_field_name,
form_class=authentication_form,
extra_context=extra_context,
redirect_authenticated_user=redirect_authenticated_user,
)(request)
class LogoutView(SuccessURLAllowedHostsMixin, TemplateView):
"""
Log out the user and display the 'You are logged out' message.
"""
next_page = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/logged_out.html'
extra_context = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
auth_logout(request)
next_page = self.get_next_page()
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""Logout may be done via POST."""
return self.get(request, *args, **kwargs)
def get_next_page(self):
if self.next_page is not None:
next_page = resolve_url(self.next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
else:
next_page = self.next_page
if (self.redirect_field_name in self.request.POST or
self.redirect_field_name in self.request.GET):
next_page = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name)
)
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
# Security check -- Ensure the user-originating redirection URL is
# safe.
if not url_is_safe:
next_page = self.request.path
return next_page
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out'),
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
extra_context=None):
warnings.warn(
'The logout() view is superseded by the class-based LogoutView().',
RemovedInDjango21Warning, stacklevel=2
)
return LogoutView.as_view(
next_page=next_page,
template_name=template_name,
redirect_field_name=redirect_field_name,
extra_context=extra_context,
)(request)
_sentinel = object()
def logout_then_login(request, login_url=None, extra_context=_sentinel):
"""
Log out the user if they are logged in. Then redirect to the login page.
"""
if extra_context is not _sentinel:
warnings.warn(
"The unused `extra_context` parameter to `logout_then_login` "
"is deprecated.", RemovedInDjango21Warning
)
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return LogoutView.as_view(next_page=login_url)(request)
def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirect the user to the login page, passing the given 'next' page.
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
warnings.warn("The password_reset() view is superseded by the "
"class-based PasswordResetView().",
RemovedInDjango21Warning, stacklevel=2)
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
warnings.warn("The password_reset_done() view is superseded by the "
"class-based PasswordResetDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
Check the hash in a password reset link and present a form for entering a
new password.
"""
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
warnings.warn("The password_reset_complete() view is superseded by the "
"class-based PasswordResetCompleteView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordContextMixin:
extra_context = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = self.title
if self.extra_context is not None:
context.update(self.extra_context)
return context
class PasswordResetView(PasswordContextMixin, FormView):
email_template_name = 'registration/password_reset_email.html'
extra_email_context = None
form_class = PasswordResetForm
from_email = None
html_email_template_name = None
subject_template_name = 'registration/password_reset_subject.txt'
success_url = reverse_lazy('password_reset_done')
template_name = 'registration/password_reset_form.html'
title = _('Password reset')
token_generator = default_token_generator
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'token_generator': self.token_generator,
'from_email': self.from_email,
'email_template_name': self.email_template_name,
'subject_template_name': self.subject_template_name,
'request': self.request,
'html_email_template_name': self.html_email_template_name,
'extra_email_context': self.extra_email_context,
}
form.save(**opts)
return super().form_valid(form)
INTERNAL_RESET_URL_TOKEN = 'set-password'
INTERNAL_RESET_SESSION_TOKEN = '_password_reset_token'
class PasswordResetDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_done.html'
title = _('Password reset sent')
class PasswordResetConfirmView(PasswordContextMixin, FormView):
form_class = SetPasswordForm
post_reset_login = False
post_reset_login_backend = None
success_url = reverse_lazy('password_reset_complete')
template_name = 'registration/password_reset_confirm.html'
title = _('Enter new password')
token_generator = default_token_generator
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
assert 'uidb64' in kwargs and 'token' in kwargs
self.validlink = False
self.user = self.get_user(kwargs['uidb64'])
if self.user is not None:
token = kwargs['token']
if token == INTERNAL_RESET_URL_TOKEN:
session_token = self.request.session.get(INTERNAL_RESET_SESSION_TOKEN)
if self.token_generator.check_token(self.user, session_token):
# If the token is valid, display the password reset form.
self.validlink = True
return super().dispatch(*args, **kwargs)
else:
if self.token_generator.check_token(self.user, token):
# Store the token in the session and redirect to the
# password reset form at a URL without the token. That
# avoids the possibility of leaking the token in the
# HTTP Referer header.
self.request.session[INTERNAL_RESET_SESSION_TOKEN] = token
redirect_url = self.request.path.replace(token, INTERNAL_RESET_URL_TOKEN)
return HttpResponseRedirect(redirect_url)
# Display the "Password reset unsuccessful" page.
return self.render_to_response(self.get_context_data())
def get_user(self, uidb64):
try:
# urlsafe_base64_decode() decodes to bytestring
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist, ValidationError):
user = None
return user
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def form_valid(self, form):
user = form.save()
del self.request.session[INTERNAL_RESET_SESSION_TOKEN]
if self.post_reset_login:
auth_login(self.request, user, self.post_reset_login_backend)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.validlink:
context['validlink'] = True
else:
context.update({
'form': None,
'title': _('Password reset unsuccessful'),
'validlink': False,
})
return context
class PasswordResetCompleteView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_complete.html'
title = _('Password reset complete')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['login_url'] = resolve_url(settings.LOGIN_URL)
return context
@sensitive_post_parameters()
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
warnings.warn("The password_change() view is superseded by the "
"class-based PasswordChangeView().",
RemovedInDjango21Warning, stacklevel=2)
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
warnings.warn("The password_change_done() view is superseded by the "
"class-based PasswordChangeDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
class PasswordChangeView(PasswordContextMixin, FormView):
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
template_name = 'registration/password_change_form.html'
title = _('Password change')
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(self.request, form.user)
return super().form_valid(form)
class PasswordChangeDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_change_done.html'
title = _('Password change successful')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
| [
"[email protected]"
] | |
b0f80eb2b1f7e213f697b4799f1b0d39d340b773 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/videointelligence/v1p3beta1/videointelligence-v1p3beta1-py/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/__init__.py | a2b915bf9cbce5897c92f4aaabe34406b5a83a45 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import StreamingVideoIntelligenceServiceTransport
from .grpc import StreamingVideoIntelligenceServiceGrpcTransport
from .grpc_asyncio import StreamingVideoIntelligenceServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[StreamingVideoIntelligenceServiceTransport]]
_transport_registry['grpc'] = StreamingVideoIntelligenceServiceGrpcTransport
_transport_registry['grpc_asyncio'] = StreamingVideoIntelligenceServiceGrpcAsyncIOTransport
__all__ = (
'StreamingVideoIntelligenceServiceTransport',
'StreamingVideoIntelligenceServiceGrpcTransport',
'StreamingVideoIntelligenceServiceGrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
e3d5233ed380d8424d31d8ec58827f40bb02cd72 | 38d7109b78e0d009759586f49f506ac25eac6c5f | /orca/topology/manager.py | 87b235f773094269199a88aef782950de6a366a9 | [
"Apache-2.0"
] | permissive | MoonkiHong/orca | 19694dfe01a1bfbed9e4911b3c80e66ee78874bc | 28267c23eff2886f7b22a539c6e77faa2a2a6223 | refs/heads/master | 2021-01-14T15:21:15.785498 | 2020-02-23T12:57:25 | 2020-02-23T12:57:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import cotyledon
from orca.graph import drivers as graph_drivers
from orca.graph.graph import Graph
from orca.topology import linker, probe
from orca.topology.alerts.elastalert import manager as es
from orca.topology.alerts.falco import manager as falco
from orca.topology.alerts.prometheus import manager as prom
from orca.topology.infra.istio import manager as istio
from orca.topology.infra.k8s import manager as k8s
from orca.topology.infra.kiali import manager as kiali
class Manager(cotyledon.ServiceManager):
def __init__(self):
super().__init__()
def initialize(self):
graph = self._init_graph()
linker_dispatcher = linker.Dispatcher()
graph.add_listener(linker_dispatcher)
probe_managers = [k8s, istio, prom, falco, es, kiali]
for probe_manager in probe_managers:
for probe_inst in probe_manager.initialize_probes(graph):
self.add(probe.ProbeService, workers=1, args=(probe_inst,))
for linker_inst in probe_manager.initialize_linkers(graph):
linker_dispatcher.add_linker(linker_inst)
def _init_graph(self):
# TODO: read graph backend from config
graph_client = graph_drivers.DriverFactory.get('neo4j')
return Graph(graph_client)
| [
"[email protected]"
] | |
275e33e9e41f58b015be34ecee98851acc81ef13 | 540eca7619a4b91424f1d1f269e9ef2c31e2321b | /test/functional/abandonconflict.py | 6919a062c4c70bd4043a8f696a20358c3b68652f | [
"MIT"
] | permissive | Roshanthalal/RECAP-Core | 66406b8327ae233dd507b232e049a9acf10539b1 | ade84a0a11fe6d3769e7256c8f5117b9480d7e60 | refs/heads/master | 2022-11-19T10:05:32.551958 | 2020-07-27T05:03:06 | 2020-07-27T05:03:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,714 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.66668")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.66668btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.66668"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.66668"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.66668") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.6666")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| [
"[email protected]"
] | |
99607a1473dacf9adfb44123dad7e292c20b1475 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21109.py | 89613ead4826be15b1709bbf5083157639370ada | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # Is an infinite for loop bad practice?
# Endless loop
for p in players:
# Do game things...
| [
"[email protected]"
] | |
5e06516278e173e3b4afa26269f8a92ff15e0680 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /micronet_challenge/counting.py | fc60cb7e50cb23b0261ed8cfb1c9bd63868554a1 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 16,442 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines an API for counting parameters and operations.
## Defining the Operation Count API
- `input_size` is an int, since square image assumed.
- `strides` is a tuple, but assumed to have same stride in both dimensions.
- Supported `paddings` are `same' and `valid`.
- `use_bias` is boolean.
- `activation` is one of the following `relu`, `swish`, `sigmoid`, None
- kernel_shapes for `Conv2D` dimensions must be in the following order:
`k_size, k_size, c_in, c_out`
- kernel_shapes for `FullyConnected` dimensions must be in the following order:
`c_in, c_out`
- kernel_shapes for `DepthWiseConv2D` dimensions must be like the following:
`k_size, k_size, c_in==c_out, 1`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
"""Operation definition for 2D convolution.
Attributes:
input_size: int, Dimensions of the input image (square assumed).
kernel_shape: list, of length 4. Shape of the convolutional kernel.
strides: list, of length 2. Stride with which the kernel is applied.
padding: str, padding added to the input image.
use_bias: bool, if true a bias term is added to the output.
activation: str, type of activation applied to the output.
""" # pylint: disable=pointless-string-statement
Conv2D = collections.namedtuple(
'Conv2D', ['input_size', 'kernel_shape', 'strides', 'padding', 'use_bias',
'activation'])
"""Operation definition for 2D depthwise convolution.
Only difference compared to Conv2D is the kernel_shape[3] = 1.
"""
DepthWiseConv2D = collections.namedtuple(
'DepthWiseConv2D', ['input_size', 'kernel_shape', 'strides', 'padding',
'use_bias', 'activation'])
"""Operation definition for Global Average Pooling.
Attributes:
input_size: int, Dimensions of the input image (square assumed).
n_channels: int, Number of output dimensions.
"""
GlobalAvg = collections.namedtuple('GlobalAvg', ['input_size', 'n_channels'])
"""Operation definitions for elementwise multiplication and addition.
Attributes:
input_size: int, Dimensions of the input image (square assumed).
n_channels: int, Number of output dimensions.
"""
Scale = collections.namedtuple('Scale', ['input_size', 'n_channels'])
Add = collections.namedtuple('Add', ['input_size', 'n_channels'])
"""Operation definitions for elementwise multiplication and addition.
Attributes:
kernel_shape: list, of length 2. Shape of the weight matrix.
use_bias: bool, if true a bias term is added to the output.
activation: str, type of activation applied to the output.
"""
FullyConnected = collections.namedtuple(
'FullyConnected', ['kernel_shape', 'use_bias', 'activation'])
def get_flops_per_activation(activation):
"""Returns the number of multiplication ands additions of an activation.
Args:
activation: str, type of activation applied to the output.
Returns:
n_muls, n_adds
"""
if activation == 'relu':
# For the purposes of the "freebie" quantization scoring, ReLUs can be
# assumed to be performed on 16-bit inputs. Thus, we track them as
# multiplications in our accounting, which can also be assumed to be
# performed on reduced precision inputs.
return 1, 0
elif activation == 'swish': # Swish: x / (1 + exp(-bx))
return 3, 1
elif activation == 'sigmoid': # Sigmoid: exp(x) / (1 + exp(x))
return 2, 1
else:
raise ValueError('activation: %s is not valid' % activation)
def get_sparse_size(tensor_shape, param_bits, sparsity):
"""Given a tensor shape returns #bits required to store the tensor sparse.
If sparsity is greater than 0, we do have to store a bit mask to represent
sparsity.
Args:
tensor_shape: list<int>, shape of the tensor
param_bits: int, number of bits the elements of the tensor represented in.
sparsity: float, sparsity level. 0 means dense.
Returns:
int, number of bits required to represented the tensor in sparse format.
"""
n_elements = np.prod(tensor_shape)
c_size = n_elements * param_bits * (1 - sparsity)
if sparsity > 0:
c_size += n_elements # 1 bit binary mask.
return c_size
def get_conv_output_size(image_size, filter_size, padding, stride):
"""Calculates the output size of convolution.
The input, filter and the strides are assumed to be square.
Arguments:
image_size: int, Dimensions of the input image (square assumed).
filter_size: int, Dimensions of the kernel (square assumed).
padding: str, padding added to the input image. 'same' or 'valid'
stride: int, stride with which the kernel is applied (square assumed).
Returns:
int, output size.
"""
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
else:
raise NotImplementedError('Padding: %s should be `same` or `valid`.'
% padding)
out_size = np.ceil((image_size - filter_size + 1. + 2 * pad) / stride)
return int(out_size)
def count_ops(op, sparsity, param_bits):
"""Given a operation class returns the flop and parameter statistics.
Args:
op: namedtuple, operation definition.
sparsity: float, sparsity of parameterized operations. Sparsity only effects
Conv and FC layers; since activations are dense.
param_bits: int, number of bits required to represent a parameter.
Returns:
param_count: number of bits required to store parameters
n_mults: number of multiplications made per input sample.
n_adds: number of multiplications made per input sample.
"""
flop_mults = flop_adds = param_count = 0
if isinstance(op, Conv2D):
# Square kernel expected.
assert op.kernel_shape[0] == op.kernel_shape[1]
k_size, _, c_in, c_out = op.kernel_shape
# Size of the possibly sparse convolutional tensor.
param_count += get_sparse_size(
[k_size, k_size, c_in, c_out], param_bits, sparsity)
# Square stride expected.
assert op.strides[0] == op.strides[1]
stride = op.strides[0]
# Each application of the kernel can be thought as a dot product between
# the flattened kernel and patches of the image.
vector_length = (k_size * k_size * c_in) * (1 - sparsity)
# Number of elements in the output is OUT_SIZE * OUT_SIZE * OUT_CHANNEL
n_output_elements = get_conv_output_size(op.input_size, k_size, op.padding,
stride) ** 2 * c_out
# Each output is the product of a one dot product. Dot product of two
# vectors of size n needs n multiplications and n - 1 additions.
flop_mults += vector_length * n_output_elements
flop_adds += (vector_length - 1) * n_output_elements
if op.use_bias:
# For each output channel we need a bias term.
param_count += c_out * param_bits
# If we have bias we need one more addition per dot product.
flop_adds += n_output_elements
if op.activation:
# We would apply the activaiton to every single output element.
n_muls, n_adds = get_flops_per_activation(op.activation)
flop_mults += n_muls * n_output_elements
flop_adds += n_adds * n_output_elements
elif isinstance(op, DepthWiseConv2D):
# Square kernel expected.
assert op.kernel_shape[0] == op.kernel_shape[1]
# Last dimension of the kernel should be 1.
assert op.kernel_shape[3] == 1
k_size, _, channels, _ = op.kernel_shape
# Size of the possibly sparse convolutional tensor.
param_count += get_sparse_size(
[k_size, k_size, channels], param_bits, sparsity)
# Square stride expected.
assert op.strides[0] == op.strides[1]
stride = op.strides[0]
# Each application of the kernel can be thought as a dot product between
# the flattened kernel and patches of the image.
vector_length = (k_size * k_size) * (1 - sparsity)
# Number of elements in the output tensor.
n_output_elements = get_conv_output_size(op.input_size, k_size, op.padding,
stride) ** 2 * channels
# Each output is the product of a one dot product. Dot product of two
# vectors of size n needs n multiplications and n - 1 additions.
flop_mults += vector_length * n_output_elements
flop_adds += (vector_length - 1) * n_output_elements
if op.use_bias:
# For each output channel we need a bias term.
param_count += channels * param_bits
# If we have bias we need one more addition per dot product.
flop_adds += n_output_elements
if op.activation:
# We would apply the activaiton to every single output element.
n_muls, n_adds = get_flops_per_activation(op.activation)
flop_mults += n_muls * n_output_elements
flop_adds += n_adds * n_output_elements
elif isinstance(op, GlobalAvg):
# For each output channel we will make a division.
flop_mults += op.n_channels
# We have to add values over spatial dimensions.
flop_adds += (op.input_size * op.input_size - 1) * op.n_channels
elif isinstance(op, Scale):
# Number of elements many multiplications.
flop_mults += op.input_size * op.input_size * op.n_channels
elif isinstance(op, Add):
# Number of elements many additions.
flop_adds += op.input_size * op.input_size * op.n_channels
elif isinstance(op, FullyConnected):
c_in, c_out = op.kernel_shape
# Size of the possibly sparse weight matrix.
param_count += get_sparse_size(
[c_in, c_out], param_bits, sparsity)
# number of non-zero elements for the sparse dot product.
n_elements = c_in * (1 - sparsity)
flop_mults += n_elements * c_out
# We have one less addition than the number of multiplications per output
# channel.
flop_adds += (n_elements - 1) * c_out
if op.use_bias:
param_count += c_out * param_bits
flop_adds += c_out
if op.activation:
n_muls, n_adds = get_flops_per_activation(op.activation)
flop_mults += n_muls * c_out
flop_adds += n_adds * c_out
else:
raise ValueError('Encountered unknown operation %s.' % str(op))
return param_count, flop_mults, flop_adds
# Info
def get_info(op):
"""Given an op extracts some common information."""
input_size, kernel_size, in_channels, out_channels = [-1] * 4
if isinstance(op, (DepthWiseConv2D, Conv2D)):
# square kernel assumed.
kernel_size, _, in_channels, out_channels = op.kernel_shape
input_size = op.input_size
elif isinstance(op, GlobalAvg):
in_channels = op.n_channels
out_channels = 1
input_size = op.input_size
elif isinstance(op, (Add, Scale)):
in_channels = op.n_channels
out_channels = op.n_channels
input_size = op.input_size
elif isinstance(op, FullyConnected):
in_channels, out_channels = op.kernel_shape
input_size = 1
else:
raise ValueError('Encountered unknown operation %s.' % str(op))
return input_size, kernel_size, in_channels, out_channels
class MicroNetCounter(object):
"""Counts operations using given information.
"""
_header_str = '{:25} {:>10} {:>13} {:>13} {:>13} {:>15} {:>10} {:>10} {:>10}'
_line_str = ('{:25s} {:10d} {:13d} {:13d} {:13d} {:15.3f} {:10.3f}'
' {:10.3f} {:10.3f}')
def __init__(self, all_ops, add_bits_base=32, mul_bits_base=32):
self.all_ops = all_ops
# Full precision add is counted one.
self.add_bits_base = add_bits_base
# Full precision multiply is counted one.
self.mul_bits_base = mul_bits_base
def _aggregate_list(self, counts):
return np.array(counts).sum(axis=0)
def process_counts(self, total_params, total_mults, total_adds,
mul_bits, add_bits):
# converting to Mbytes.
total_params = int(total_params) / 8. / 1e6
total_mults = total_mults * mul_bits / self.mul_bits_base / 1e6
total_adds = total_adds * add_bits / self.add_bits_base / 1e6
return total_params, total_mults, total_adds
def _print_header(self):
output_string = self._header_str.format(
'op_name', 'inp_size', 'kernel_size', 'in channels', 'out channels',
'params(MBytes)', 'mults(M)', 'adds(M)', 'MFLOPS')
print(output_string)
print(''.join(['=']*125))
def _print_line(self, name, input_size, kernel_size, in_channels,
out_channels, param_count, flop_mults, flop_adds, mul_bits,
add_bits, base_str=None):
"""Prints a single line of operation counts."""
op_pc, op_mu, op_ad = self.process_counts(param_count, flop_mults,
flop_adds, mul_bits, add_bits)
if base_str is None:
base_str = self._line_str
output_string = base_str.format(
name, input_size, kernel_size, in_channels, out_channels, op_pc,
op_mu, op_ad, op_mu + op_ad)
print(output_string)
def print_summary(self, sparsity, param_bits, add_bits, mul_bits,
summarize_blocks=True):
"""Prints all operations with given options.
Args:
sparsity: float, between 0,1 defines how sparse each parametric layer is.
param_bits: int, bits in which parameters are stored.
add_bits: float, number of bits used for accumulator.
mul_bits: float, number of bits inputs represented for multiplication.
summarize_blocks: bool, if True counts within a block are aggregated and
reported in a single line.
"""
self._print_header()
# Let's count starting from zero.
total_params, total_mults, total_adds = [0] * 3
for op_name, op_template in self.all_ops:
if op_name.startswith('block'):
if not summarize_blocks:
# If debug print the ops inside a block.
for block_op_name, block_op_template in op_template:
param_count, flop_mults, flop_adds = count_ops(block_op_template,
sparsity, param_bits)
temp_res = get_info(block_op_template)
input_size, kernel_size, in_channels, out_channels = temp_res
self._print_line('%s_%s' % (op_name, block_op_name), input_size,
kernel_size, in_channels, out_channels,
param_count, flop_mults, flop_adds, mul_bits,
add_bits)
# Count and sum all ops within a block.
param_count, flop_mults, flop_adds = self._aggregate_list(
[count_ops(template, sparsity, param_bits)
for _, template in op_template])
# Let's extract the input_size and in_channels from the first operation.
input_size, _, in_channels, _ = get_info(op_template[0][1])
# Since we don't know what is inside a block we don't know the following
# fields.
kernel_size = out_channels = -1
else:
# If it is a single operation just count.
param_count, flop_mults, flop_adds = count_ops(op_template, sparsity,
param_bits)
temp_res = get_info(op_template)
input_size, kernel_size, in_channels, out_channels = temp_res
# At this point param_count, flop_mults, flop_adds should be read.
total_params += param_count
total_mults += flop_mults
total_adds += flop_adds
# Print the operation.
self._print_line(op_name, input_size, kernel_size, in_channels,
out_channels, param_count, flop_mults, flop_adds,
mul_bits, add_bits)
# Print Total values.
# New string since we are passing empty strings instead of integers.
out_str = ('{:25s} {:10s} {:13s} {:13s} {:13s} {:15.3f} {:10.3f} {:10.3f} '
'{:10.3f}')
self._print_line(
'total', '', '', '', '', total_params, total_mults, total_adds,
mul_bits, add_bits, base_str=out_str)
| [
"[email protected]"
] | |
7f2d1e41aec4d8c1e88832f5a46b4b9cbe6b8fa1 | fe683ebe3cbf794dd41121d67bff86278f2721cf | /src/nanocurrency/__init__.py | 4ed3cec39a3578103a5ea9c514610109bacfda29 | [
"CC0-1.0"
] | permissive | marcosmmb/pynanocurrency | 3966ab61dc10c1b892c0fdd3d62c41b7df2a038a | dd1a4f093068447f9f2421b708843df4e6eb17c9 | refs/heads/master | 2020-04-26T06:44:21.634468 | 2019-03-02T02:45:49 | 2019-03-02T02:45:49 | 173,374,825 | 1 | 0 | null | 2019-03-01T22:11:47 | 2019-03-01T22:11:47 | null | UTF-8 | Python | false | false | 450 | py | """
nanocurrency
~~~~~~~~~~~~
pynanocurrency is a Python library allowing to work with NANO cryptocurrency
functions such as block creation and manipulation, account generation and
proof-of-work validation and solving
"""
from .accounts import *
from .blocks import *
from .exceptions import *
from .units import *
from .work import *
from .util import *
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| [
"[email protected]"
] | |
e8a25edf6cf55a7d26838fd9f347dbeacacfc73f | 188fa261446cee9fc1b56029e884c8e74364a7f4 | /huseinhouse.com/MBTI-Study/soaning/function.py | c72af9fc818dc9cea5ec2ca6b50fa4a8414cc176 | [
"MIT"
] | permissive | huseinzol05/Hackathon-Huseinhouse | 026cc1346afb127aa2675be94a818ebf35f72bb5 | 6796af2fe02f10d7860ac8db27bd24fa27b8bb01 | refs/heads/master | 2021-06-29T23:04:35.027212 | 2020-09-07T04:01:49 | 2020-09-07T04:01:49 | 147,768,669 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,469 | py | import textract
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
import sklearn.datasets
import nltk
nltk.data.path.append('/home/husein/nltk_data/')
from textblob import TextBlob
import random
import collections
from collections import OrderedDict
from fuzzywuzzy import fuzz
import numpy as np
def clearstring(string):
string = re.sub('[^A-Za-z ]+', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string.lower()
df = pd.read_csv('processed_mbti.csv')
label = df.type.unique()
labelset = LabelEncoder().fit_transform(df.type)
trainset = df.posts.values
for i in range(trainset.shape[0]):
trainset[i] = ' '.join(trainset[i].split('|||'))
def separate_dataset(trainset):
datastring = []
datatarget = []
for i in range(len(trainset.data)):
data_ = trainset.data[i].split('\n')
data_ = filter(None, data_)
for n in range(len(data_)):
data_[n] = clearstring(data_[n])
datastring += data_
for n in range(len(data_)):
datatarget.append(trainset.target[i])
return datastring, datatarget
job = sklearn.datasets.load_files(container_path = 'jobdescription', decode_error = 'replace')
job.data, job.target = separate_dataset(job)
c = list(zip(job.data, job.target))
random.shuffle(c)
job.data, job.target = zip(*c)
dev_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1, 2))), ('clf', SGDClassifier(loss = 'modified_huber', penalty = 'l2', alpha = 1e-4, n_iter = 100, random_state = 42))])
dev_clf.fit(job.data, job.target)
clf = Pipeline([('vect', CountVectorizer()), ('clf', SGDClassifier(loss = 'modified_huber', penalty = 'l2', alpha = 1e-4, n_iter = 100, random_state = 42))])
clf.fit(trainset, labelset)
def clearstring_pdf(string):
string = re.sub(r'[^\x00-\x7F]', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string
def get_detail(text):
text = filter(None, [clearstring_pdf(t) for t in text.split('\n')])
blobs = [TextBlob(i).tags for i in text]
nouns = []
for blob in blobs:
nouns += [b[0] for b in blob if b[1] == 'NNP' or b[1] == 'NN']
nouns = [n.lower() for n in nouns][15:]
prob = dev_clf.predict_proba(text)
prob = np.mean(prob, axis = 0)
dict_prob = {}
for i in range(prob.shape[0]):
dict_prob[job.target_names[i]] = float(prob[i])
personality = clf.predict_proba([' '.join(text)])[0]
unique = np.unique(personality)
loc = np.where(personality == unique[-1])[0]
personalities = []
for i in loc:
personalities += list(label[i])
personalities_unique, personalities_count = np.unique(personalities, return_counts = True)
personalities_count = (personalities_count * 1.0) / np.sum(personalities_count)
counts = collections.Counter(personalities)
new_list = sorted(personalities, key = lambda x: -counts[x])
new_list = ''.join(list(OrderedDict.fromkeys(new_list))[:4])
new_type = label[np.argmax([fuzz.ratio(new_list, i) for i in label])]
nouns_unique, nouns_count = np.unique(nouns, return_counts = True)
return {'developer': dict_prob, 'personality_percent': personalities_count.tolist(), 'personality': personalities_unique.tolist(), 'type': new_type,
'nouns': nouns_unique.tolist(), 'nouns_count': nouns_count.tolist()}
| [
"[email protected]"
] | |
b0fb2160ae3308b14f82c19fd930021f3db68660 | b8d286c69d89ea42f532c2784ec2aa1633c57d8f | /tests/test_devices/test_snmp_handler_interface.py | 18fd9b196f28aff00a5d291019c027a4e5ac06f7 | [
"Apache-2.0"
] | permissive | dirkakrid/cloudshell-networking-devices | 460f85268998f6403fb1a0567d8303e0e92ace27 | 6e62b8ab4c1d8dbe8a68d6ff2d34094b3b90a548 | refs/heads/master | 2021-01-21T19:40:12.388282 | 2017-05-10T13:20:01 | 2017-05-10T13:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | import unittest
from cloudshell.devices.snmp_handler_interface import SnmpHandlerInterface
class TestSnmpHandlerInterface(unittest.TestCase):
def setUp(self):
class TestedClass(SnmpHandlerInterface):
pass
self.tested_class = TestedClass
def test_get_snmp_service(self):
"""Check that instance can't be instantiated without implementation of the "get_snmp_service" method"""
with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class TestedClass with "
"abstract methods get_snmp_service"):
self.tested_class()
| [
"[email protected]"
] | |
e47193ed11bd9d0fe2046a3291cee676d776ccd3 | ac8ffabf4d7339c5466e53dafc3f7e87697f08eb | /python_solutions/1425.constrained-subsequence-sum.py | 4038454a87a6aa43af1ce0804ead5dafa93468e9 | [] | no_license | h4hany/leetcode | 4cbf23ea7c5b5ecfd26aef61bfc109741f881591 | 9e4f6f1a2830bd9aab1bba374c98f0464825d435 | refs/heads/master | 2023-01-09T17:39:06.212421 | 2020-11-12T07:26:39 | 2020-11-12T07:26:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce, lru_cache
from typing import List
import itertools
import math
import heapq
import string
true = True
false = False
MIN, MAX = -0x3f3f3f3f, 0x3f3f3f3f
#
# @lc app=leetcode id=1425 lang=python3
#
# [1425] Constrained Subsequence Sum
#
# https://leetcode.com/problems/constrained-subsequence-sum/description/
#
# algorithms
# Hard (43.60%)
# Total Accepted: 6.9K
# Total Submissions: 15.8K
# Testcase Example: '[10,2,-10,5,20]\n2'
#
# Given an integer array nums and an integer k, return the maximum sum of a
# non-empty subsequence of that array such that for every two consecutive
# integers in the subsequence, nums[i] and nums[j], where i < j, the condition
# j - i <= k is satisfied.
#
# A subsequence of an array is obtained by deleting some number of elements
# (can be zero) from the array, leaving the remaining elements in their
# original order.
#
#
# Example 1:
#
#
# Input: nums = [10,2,-10,5,20], k = 2
# Output: 37
# Explanation: The subsequence is [10, 2, 5, 20].
#
#
# Example 2:
#
#
# Input: nums = [-1,-2,-3], k = 1
# Output: -1
# Explanation: The subsequence must be non-empty, so we choose the largest
# number.
#
#
# Example 3:
#
#
# Input: nums = [10,-2,-10,-5,20], k = 2
# Output: 23
# Explanation: The subsequence is [10, -2, -5, 20].
#
#
#
# Constraints:
#
#
# 1 <= k <= nums.length <= 10^5
# -10^4 <= nums[i] <= 10^4
#
#
#
class Solution:
def constrainedSubsetSum(self, nums: List[int], k: int) -> int:
'''d is a decreasing deque, caching the previous k maximum sum
'''
d = deque()
ans = float('-inf')
for i in range(len(nums)):
nums[i] += d[0] if d else 0
ans = max(ans, nums[i])
# both >= and > are fine
while d and nums[i] >= d[-1]:
d.pop()
if nums[i] > 0:
d.append(nums[i])
while d and i >= k and d[0] == nums[i - k]:
d.popleft()
return ans
sol = Solution()
# nums = [10,2,-10,5,20], = 2
# nums = [-1,-2,-3], k = 1÷
nums = [10, -2, -10, -5, 20]
k = 2
print(sol.constrainedSubsetSum(nums, k))
| [
"[email protected]"
] | |
af815ff89cb5d9b1273a7cc8d98677c1eba8e801 | 51a2fb45db6a074c7bd5af32c8ee8471251436f4 | /Project/企业微信/PageObject实战/PO/Login_PO.py | 9d221d18c4bee9c5f5fe323ce5232fbca8a85d1b | [] | no_license | JiangHuYiXiao/Web-Autotest-Python | c5e2cf61a5a62d132df048d3218dfb973be8784e | 65b30360337b56b6ca4eba21f729c922f1665489 | refs/heads/master | 2021-08-26T07:46:42.957744 | 2021-08-12T02:24:11 | 2021-08-12T02:24:11 | 253,945,694 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2021/2/26 16:01
# @Software : Web-Autotest-Python
# @Python_verison : 3.7
from selenium.webdriver.remote.webdriver import WebDriver
from Project.企业微信.PageObject实战1.PO.Register_PO import Register
class Login():
def __init__(self,driver:WebDriver): # 复用上一个PO的driver,driver:WebDriver指定为WebDriver类型后,可以使用find方法
self._driver = driver
def scan(self):
pass
def click_register(self):
# 点击立即注册
self._driver.find_element_by_css_selector('.login_registerBar_link').click()
return Register(self._driver)
| [
"[email protected]"
] | |
896e40d4255f32860379afbe8a7112b45b7b29b6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /tbz5ji3ocwzAeLQNa_11.py | fce155cea3aba702b57ec473a17f33d65c5f76ee | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | """
Given a two-dimensional list of `maze` and a list of `directions`. Your task
is to follow the given directions.
* If you can reach the endpoint before all your moves have gone, return `"Finish"`.
* If you hit any walls or go outside the maze border, return `"Dead"`.
* If you find yourself still in the maze after using all the moves, return `"Lost"`.
The maze list will look like this:
maze = [
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 3, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 2, 1]
]
# 0 = Safe place to walk
# 1 = Wall
# 2 = Start Point
# 3 = Finish Point
# N = North, E = East, W = West and S = South
See the below examples for a better understanding:
### Examples
exit_maze(maze, ["N", "E", "E"]) ➞ "Dead"
# Hitting the wall should return "Dead".
exit_maze(maze, ["N", "N", "N", "E"]) ➞ "Lost"
# Couldn't reach the finish point.
exit_maze(maze, ["N", "W", "W", "W", "N", "N", "N", "N", "W", "W", "S", "S", "S", "S", "W", "W", "N", "N", "N", "N", "N", "N", "N"]) ➞ "Finish"
### Notes
N/A
"""
def Starting_Point(maze):
for i in range(len(maze)):
for j in range(len(maze)):
if maze[i][j] == 2:
return j,i
def exit_maze(maze, directions):
col , line = Starting_Point(maze)
for moves in directions:
if moves == "N": line-=1
elif moves == "S": line+=1
elif moves == "W": col-=1
else: col+=1
if line > len(maze)-1 or col > len(maze)-1:return 'Dead'
if maze[line][col] == 1:return 'Dead'
elif maze[line][col] == 3:return 'Finish'
return 'Lost'
| [
"[email protected]"
] | |
b66bd06abd3bf875afad489cae81330f335e27e8 | cc78de009a8e7805f9f6a852774e0384b11bfdcb | /testcase/api/login/postman_login_post.py | 18b31c0cc29699bb94201354eab342239c45f319 | [] | no_license | williamzxl/app_test_many_devices | c1806e54c17a84f18a04c3808604633c2deba052 | dd5434018fadd11d5462903cafaafbb5b387c24a | refs/heads/master | 2020-03-29T21:20:51.239295 | 2019-03-05T03:13:56 | 2019-03-05T03:13:56 | 150,361,766 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import requests
import json
url = "https://proxy.langlib.com/accounts/loginByAccount"
headers = {
'platform': "Android",
'appkey': "CEE_AA8F55B916AB",
'appversion': "10000005",
'appsecret': "3DB5159C-EB1E-47FE-8584-47115EF5E443",
'app': "cee",
'content-length': "55",
'host': "proxy.langlib.com",
'accept-encoding': "gzip",
'Connection': 'Keep-Alive',
'user-agent': "okhttp/3.11.0",
'content-type': "application/json",
'cache-control': "no-cache",
}
body = {"UserCredential": "[email protected]", "Password": "111111"}
response = requests.request("POST", url, headers=headers, json=body)
content = (json.loads(response.text))
print(content.get("AccessToken"))
print(type(content)) | [
"[email protected]"
] | |
d4656c1a3c9c77ace9534ffe71b6a42d0cfb0ff6 | 97959e8f049e1877931e85b9965fc48e1d7b7616 | /27.py | 452de52776bed556ec31c7166f8038ef60774ca9 | [] | no_license | mahabaleshwarabairi/mahabaleshwarabairi | 403911a7e9a0ec93730845eea6e27ee55363113f | 27935ac9165f049c48153455768709e6d8c8a374 | refs/heads/master | 2023-04-26T08:26:17.574774 | 2021-06-05T08:36:35 | 2021-06-05T08:36:35 | 351,385,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 11:13:22 2020
@author: Mahabaleshwara.Bairi
"""
states={
'TN' : {'capital':'Chenai','language':'Tamil'},
'KL': {'capital':'trivandarm','language':'Keralla'},
'KK':{'capital':'BNG','language':'KAN'}
}
labels ={'capital':'capital city',
'language':'spoken language'}
state=input("Please enter state:")
fetch=input("capital (C) or L..?")
if fetch=='C': key='capital'
if fetch=='L':key='language'
if state in states:
print("%s 's %s is %s" %(state,labels[key],states[state][key]))
| [
"[email protected]"
] | |
d25ad941d53fda126c646c173f4cb243d4f51f3b | a123ea0f8d9be8acb48c772646586d6f7e260b99 | /workshop/d14/d14/urls.py | 2e876e1f7e48501258f60bcfff920b7bb5aae84b | [] | no_license | pjh6315/TIL-C9 | 8007057cdaa3b9e9c38c3cdcf2b5addc88eefac8 | 681f9c95665302adb64d02022ede7b94cce5e47e | refs/heads/master | 2020-04-17T17:17:55.652973 | 2019-02-12T08:46:07 | 2019-02-12T08:46:07 | 166,777,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | """d14 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from pages import views
urlpatterns = [
path('detail/<str:name>',views.detail),
path('info/',views.info),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
e9a1fae3749f53ba0f93c11b8432510a4e7f8af0 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/pandas/tests/util/test_assert_interval_array_equal.py | 8a537308063a13fd91d44a09de45484ac058bbb0 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:82edae2255ec8fcb3927eb50fad5b0eda9ea8dbaaa2cb6dc12188b8a9fac142b
size 2364
| [
"[email protected]"
] | |
d1b92393bf9fbe4433948c91091bb196969f8bc8 | 8dde6f201657946ad0cfeacab41831f681e6bc6f | /1167. Minimum Cost to Connect Sticks.py | 272df4236aaf86c111809a73d668d5d9118eacac | [] | no_license | peraktong/LEETCODE_Jason | c5d4a524ba69b1b089f18ce4a53dc8f50ccbb88c | 06961cc468211b9692cd7a889ee38d1cd4e1d11e | refs/heads/master | 2022-04-12T11:34:38.738731 | 2020-04-07T21:17:04 | 2020-04-07T21:17:04 | 219,398,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import heapq
class Solution(object):
def connectSticks(self, sticks):
"""
:type sticks: List[int]
:rtype: int
"""
# always start with lowest since they are added the most
ans = 0
heapq.heapify(sticks)
while len(sticks) > 1:
x, y = heapq.heappop(sticks), heapq.heappop(sticks)
ans += x + y
heapq.heappush(sticks, x + y)
return ans
| [
"[email protected]"
] | |
02abde4a40072f9b6ee47ca15157b48df39d4a60 | 25dda94672497e3287a7403e283fb279ad171b79 | /practice/정렬/선택 정렬.py | eba9f36bb4a0839f774abd388f00bb2e48c35c01 | [] | no_license | woorud/Algorithm | c94b844e8c96a446c5fdee5c0abb159bfee384d7 | f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541 | refs/heads/master | 2023-02-23T13:53:28.645036 | 2021-01-29T12:24:23 | 2021-01-29T12:24:23 | 230,908,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(len(array)):
min_index = i
for j in range(i+1, len(array)):
if array[min_index] > array[j]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
print(array) | [
"[email protected]"
] | |
854490c2c1af86b3ae92e8bbb3ef416c4db34c91 | 714ba6b4a821fd33eb3a581701175d1ba986fcaf | /BUILD | b8cb64af035a500d727bd4d6032573912b91e4f8 | [
"Apache-2.0"
] | permissive | OldJohn86/power_dist | 9c648943f268c23aaa664ef1ea2730c6e9b2a446 | f781401e9e8b3e03826feb3902058f022cc1a164 | refs/heads/main | 2023-04-07T01:24:22.323244 | 2021-04-10T15:21:23 | 2021-04-10T15:21:23 | 329,294,259 | 0 | 0 | Apache-2.0 | 2021-01-13T12:01:11 | 2021-01-13T12:01:11 | null | UTF-8 | Python | false | false | 691 | # -*- python -*-
# Copyright 2020 Josh Pieper, [email protected].
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
filegroup(
name = "target",
srcs = [
"//fw:power_dist",
],
)
| [
"[email protected]"
] | ||
3f36a70b59cf9d6ad4fd514c74134960e5deec10 | b6bcfd935f7876fc65416e7340fda1c9b0516fd7 | /pyscf/fci/selected_ci.py | a578f20aea6156c46e77cfd1a9106d8ae2094558 | [
"Apache-2.0"
] | permissive | lzypotato/pyscf | 62f849b9a3ec8480c3da63a5822ea780608796b2 | 94c21e2e9745800c7efc7256de0d628fc60afc36 | refs/heads/master | 2020-09-06T22:45:04.191935 | 2019-06-18T06:04:48 | 2019-06-18T06:04:48 | 220,578,540 | 1 | 0 | Apache-2.0 | 2019-11-09T02:13:16 | 2019-11-09T02:13:15 | null | UTF-8 | Python | false | false | 41,236 | py | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Selected CI
Simple usage::
>>> from pyscf import gto, scf, ao2mo, fci
>>> mol = gto.M(atom='C 0 0 0; C 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)
>>> h2 = ao2mo.kernel(mol, mf.mo_coeff)
>>> e = fci.selected_ci.kernel(h1, h2, mf.mo_coeff.shape[1], mol.nelectron)[0]
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import rdm
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_2e(eri, civec_strs, norb, nelec, link_index=None):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
link_index = _all_linkstr_index(ci_strs, norb, nelec)
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
eri = ao2mo.restore(1, eri, norb)
eri1 = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
idx,idy = numpy.tril_indices(norb, -1)
idx = idx * norb + idy
eri1 = lib.take_2d(eri1.reshape(norb**2,-1), idx, idx) * 2
fcivec = ci_coeff.reshape(na,nb)
# (bb|bb)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
ci1T = numpy.zeros((nb,na))
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ci1T.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
ci1 = lib.transpose(ci1T, out=fcivecT)
else:
ci1 = numpy.zeros_like(fcivec)
# (aa|aa)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIcontract_2e_aaaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
h_ps = numpy.einsum('pqqs->ps', eri)
eri1 = eri * 2
for k in range(norb):
eri1[:,:,k,k] += h_ps/nelec[0]
eri1[k,k,:,:] += h_ps/nelec[1]
eri1 = ao2mo.restore(4, eri1, norb)
# (bb|aa)
libfci.SCIcontract_2e_bbaa(eri1.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
cd_indexa.ctypes.data_as(ctypes.c_void_p),
cd_indexb.ctypes.data_as(ctypes.c_void_p))
return _as_SCIvector(ci1.reshape(ci_coeff.shape), ci_strs)
def select_strs(myci, eri, eri_pq_max, civec_max, strs, norb, nelec):
strs = numpy.asarray(strs, dtype=numpy.int64)
nstrs = len(strs)
nvir = norb - nelec
strs_add = numpy.empty((nstrs*(nelec*nvir)**2//4), dtype=numpy.int64)
libfci.SCIselect_strs.restype = ctypes.c_int
nadd = libfci.SCIselect_strs(strs_add.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
eri_pq_max.ctypes.data_as(ctypes.c_void_p),
civec_max.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(myci.select_cutoff),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
strs_add = sorted(set(strs_add[:nadd]) - set(strs))
return numpy.asarray(strs_add, dtype=numpy.int64)
def enlarge_space(myci, civec_strs, eri, norb, nelec):
if isinstance(civec_strs, (tuple, list)):
nelec, (strsa, strsb) = _unpack(civec_strs[0], nelec)[1:]
ci_coeff = lib.asarray(civec_strs)
else:
ci_coeff, nelec, (strsa, strsb) = _unpack(civec_strs, nelec)
na = len(strsa)
nb = len(strsb)
ci0 = ci_coeff.reshape(-1,na,nb)
civec_a_max = lib.norm(ci0, axis=2).max(axis=0)
civec_b_max = lib.norm(ci0, axis=1).max(axis=0)
ci_aidx = numpy.where(civec_a_max > myci.ci_coeff_cutoff)[0]
ci_bidx = numpy.where(civec_b_max > myci.ci_coeff_cutoff)[0]
civec_a_max = civec_a_max[ci_aidx]
civec_b_max = civec_b_max[ci_bidx]
strsa = strsa[ci_aidx]
strsb = strsb[ci_bidx]
eri = ao2mo.restore(1, eri, norb)
eri_pq_max = abs(eri.reshape(norb**2,-1)).max(axis=1).reshape(norb,norb)
strsa_add = select_strs(myci, eri, eri_pq_max, civec_a_max, strsa, norb, nelec[0])
strsb_add = select_strs(myci, eri, eri_pq_max, civec_b_max, strsb, norb, nelec[1])
strsa = numpy.append(strsa, strsa_add)
strsb = numpy.append(strsb, strsb_add)
aidx = numpy.argsort(strsa)
bidx = numpy.argsort(strsb)
ci_strs = (strsa[aidx], strsb[bidx])
aidx = numpy.where(aidx < len(ci_aidx))[0]
bidx = numpy.where(bidx < len(ci_bidx))[0]
ma = len(strsa)
mb = len(strsb)
cs = []
for i in range(ci0.shape[0]):
ci1 = numpy.zeros((ma,mb))
tmp = lib.take_2d(ci0[i], ci_aidx, ci_bidx)
lib.takebak_2d(ci1, tmp, aidx, bidx)
cs.append(_as_SCIvector(ci1, ci_strs))
if not isinstance(civec_strs, (tuple, list)) and civec_strs.ndim < 3:
cs = cs[0]
return cs
def cre_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
link_index = numpy.zeros((nstrs,nelec+nelec*nvir,4), dtype=numpy.int32)
libfci.SCIcre_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nstrs),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def cre_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return cre_des_linkstr(strs, norb, nelec, True)
def des_des_linkstr(strs, norb, nelec, tril=False):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 2:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter1 = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter1.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter1 = numpy.asarray(sorted(set(inter1[:ninter])), dtype=numpy.int64)
ninter = len(inter1)
inter = numpy.empty((ninter*nelec), dtype=numpy.int64)
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
inter1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec-1),
ctypes.c_int(ninter))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 2
link_index = numpy.zeros((ninter,nvir*nvir,4), dtype=numpy.int32)
libfci.SCIdes_des_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def des_des_linkstr_tril(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
return des_des_linkstr(strs, norb, nelec, True)
def gen_des_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec < 1:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nelec), dtype=numpy.int64)
libfci.SCIdes_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIdes_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
nvir += 1
link_index = numpy.zeros((ninter,nvir,4), dtype=numpy.int32)
libfci.SCIdes_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_linkstr(strs, norb, nelec):
'''Given intermediates, the link table to generate input strs
'''
if nelec == norb:
return None
strs = numpy.asarray(strs, dtype=numpy.int64)
nvir = norb - nelec
nstrs = len(strs)
inter = numpy.empty((nstrs*nvir), dtype=numpy.int64)
libfci.SCIcre_uniq_strs.restype = ctypes.c_int
ninter = libfci.SCIcre_uniq_strs(inter.ctypes.data_as(ctypes.c_void_p),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs))
inter = numpy.asarray(sorted(set(inter[:ninter])), dtype=numpy.int64)
ninter = len(inter)
link_index = numpy.zeros((ninter,nelec+1,4), dtype=numpy.int32)
libfci.SCIcre_linkstr(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_int(nstrs), ctypes.c_int(ninter),
strs.ctypes.data_as(ctypes.c_void_p),
inter.ctypes.data_as(ctypes.c_void_p))
return link_index
def make_hdiag(h1e, eri, ci_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(None, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
hdiag = numpy.empty(na*nb)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
occslsta = cistring._strs2occslst(ci_strs[0], norb)
occslstb = cistring._strs2occslst(ci_strs[1], norb)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nelec[0]), ctypes.c_int(nelec[1]),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def kernel_fixed_space(myci, h1e, eri, norb, nelec, ci_strs, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
ci0, nelec, ci_strs = _unpack(ci0, nelec, ci_strs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
if isinstance(ci0, _SCIvector):
if ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.reshape(-1)
precond = lambda x, e, *args: x/(hdiag-e+1e-4)
#e, c = lib.davidson(hop, ci0, precond, tol=myci.conv_tol)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel_float_space(myci, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
log = logger.new_logger(myci, verbose)
if tol is None: tol = myci.conv_tol
if lindep is None: lindep = myci.lindep
if max_cycle is None: max_cycle = myci.max_cycle
if max_space is None: max_space = myci.max_space
if max_memory is None: max_memory = myci.max_memory
if nroots is None: nroots = myci.nroots
if myci.verbose >= logger.WARN:
myci.check_sanity()
nelec = direct_spin1._unpack_nelec(nelec, myci.spin)
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
h2e = ao2mo.restore(1, h2e, norb)
# TODO: initial guess from CISD
if isinstance(ci0, _SCIvector):
if ci0.size == len(ci0._strs[0])*len(ci0._strs[1]):
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
else:
ci_strs = (numpy.asarray([int('1'*nelec[0], 2)]),
numpy.asarray([int('1'*nelec[1], 2)]))
ci0 = _as_SCIvector(numpy.ones((1,1)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
log.warn('''
Selected-CI space generated from HF ground state (by double exciting) is not enough for excited states.
HOMO->LUMO excitations are included in the initial guess.
NOTE: This may introduce excited states of different symmetry.\n''')
corea = '1' * (nelec[0]-1)
coreb = '1' * (nelec[1]-1)
ci_strs = (numpy.asarray([int('1'+corea, 2), int('10'+corea, 2)]),
numpy.asarray([int('1'+coreb, 2), int('10'+coreb, 2)]))
ci0 = _as_SCIvector(numpy.ones((2,2)), ci_strs)
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
if ci0.size < nroots:
raise RuntimeError('Not enough selected-CI space for %d states' % nroots)
ci_strs = ci0._strs
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
ci0 = myci.get_init_guess(ci_strs, norb, nelec, nroots, hdiag)
def hop(c):
hc = myci.contract_2e(h2e, _as_SCIvector(c, ci_strs), norb, nelec, link_index)
return hc.ravel()
precond = lambda x, e, *args: x/(hdiag-e+myci.level_shift)
namax = cistring.num_strings(norb, nelec[0])
nbmax = cistring.num_strings(norb, nelec[1])
e_last = 0
float_tol = myci.start_tol
tol_decay_rate = myci.tol_decay_rate
conv = False
for icycle in range(norb):
ci_strs = ci0[0]._strs
float_tol = max(float_tol*tol_decay_rate, tol*1e2)
log.debug('cycle %d ci.shape %s float_tol %g',
icycle, (len(ci_strs[0]), len(ci_strs[1])), float_tol)
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
#e, ci0 = lib.davidson(hop, ci0.reshape(-1), precond, tol=float_tol)
e, ci0 = myci.eig(hop, ci0, precond, tol=float_tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
if nroots > 1:
ci0 = [_as_SCIvector(c, ci_strs) for c in ci0]
de, e_last = min(e)-e_last, min(e)
log.info('cycle %d E = %s dE = %.8g', icycle, e+ecore, de)
else:
ci0 = [_as_SCIvector(ci0, ci_strs)]
de, e_last = e-e_last, e
log.info('cycle %d E = %.15g dE = %.8g', icycle, e+ecore, de)
if ci0[0].shape == (namax,nbmax) or abs(de) < tol*1e3:
conv = True
break
last_ci0_size = float(len(ci_strs[0])), float(len(ci_strs[1]))
ci0 = myci.enlarge_space(ci0, h2e, norb, nelec)
na = len(ci0[0]._strs[0])
nb = len(ci0[0]._strs[1])
if ((.99 < na/last_ci0_size[0] < 1.01) and
(.99 < nb/last_ci0_size[1] < 1.01)):
conv = True
break
ci_strs = ci0[0]._strs
log.debug('Extra CI in selected space %s', (len(ci_strs[0]), len(ci_strs[1])))
ci0 = [c.ravel() for c in ci0]
link_index = _all_linkstr_index(ci_strs, norb, nelec)
hdiag = myci.make_hdiag(h1e, eri, ci_strs, norb, nelec)
e, c = myci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, **kwargs)
na = len(ci_strs[0])
nb = len(ci_strs[1])
if nroots > 1:
for i, ei in enumerate(e+ecore):
log.info('Selected CI state %d E = %.15g', i, ei)
return e+ecore, [_as_SCIvector(ci.reshape(na,nb),ci_strs) for ci in c]
else:
log.info('Selected CI E = %.15g', e+ecore)
return e+ecore, _as_SCIvector(c.reshape(na,nb), ci_strs)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
select_cutoff=1e-3, ci_coeff_cutoff=1e-3, ecore=0, **kwargs):
return direct_spin1._kfactory(SelectedCI, h1e, eri, norb, nelec, ci0,
level_shift, tol, lindep, max_cycle,
max_space, nroots, davidson_only,
pspace_size, select_cutoff=select_cutoff,
ci_coeff_cutoff=ci_coeff_cutoff, ecore=ecore,
**kwargs)
def make_rdm1s(civec_strs, norb, nelec, link_index=None):
'''Spin separated 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta)
dm1[p,q] = <q^\dagger p>
The convention is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', ci_coeff, ci_coeff,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def make_rdm1(civec_strs, norb, nelec, link_index=None):
r'''Spin-traced 1-particle density matrix.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention is based on McWeeney's book, Eq (5.4.20)
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
rdm1a, rdm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
# dm[p,q,r,s] = <|p^+ q r^+ s|>
def make_rdm2s(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin separated 2-particle density matrices.
The return values include three density matrices:
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta), (beta,beta,beta,beta)
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
na, nlinka = cd_indexa.shape[:2]
nb, nlinkb = cd_indexb.shape[:2]
fcivec = ci_coeff.reshape(na,nb)
# (bb|aa) and (aa|bb)
dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, (cd_indexa,cd_indexb), 0)[1]
# (aa|aa)
dm2aa = numpy.zeros([norb]*4)
if nelec[0] > 1:
ma, mlinka = dd_indexa.shape[:2]
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2aa.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(ma), ctypes.c_int(mlinka),
dd_indexa.ctypes.data_as(ctypes.c_void_p))
# (bb|bb)
dm2bb = numpy.zeros([norb]*4)
if nelec[1] > 1:
mb, mlinkb = dd_indexb.shape[:2]
fcivecT = lib.transpose(fcivec)
libfci.SCIrdm2_aaaa(libfci.SCIrdm2kern_aaaa,
dm2bb.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
fcivecT.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(nb), ctypes.c_int(na),
ctypes.c_int(mb), ctypes.c_int(mlinkb),
dd_indexb.ctypes.data_as(ctypes.c_void_p))
return dm2aa, dm2ab, dm2bb
def make_rdm2(civec_strs, norb, nelec, link_index=None, **kwargs):
r'''Spin-traced two-particle density matrix.
2pdm[p,q,r,s] = :math:`\langle p_\alpha^\dagger r_\alpha^\dagger s_\alpha q_\alpha\rangle +
\langle p_\beta^\dagger r_\alpha^\dagger s_\alpha q_\beta\rangle +
\langle p_\alpha^\dagger r_\beta^\dagger s_\beta q_\alpha\rangle +
\langle p_\beta^\dagger r_\beta^\dagger s_\beta q_\beta\rangle`.
'''
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
dm2aa += dm2bb
dm2aa += dm2ab
dm2aa += dm2ab.transpose(2,3,0,1)
return dm2aa
def trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin separated transition 1-particle density matrices.
See also function :func:`make_rdm1s`
1pdm[p,q] = :math:`\langle q^\dagger p \rangle`
'''
cibra, nelec, ci_strs = _unpack(cibra_strs, nelec)
ciket, nelec1, ci_strs1 = _unpack(ciket_strs, nelec)
assert(all(ci_strs[0] == ci_strs1[0]) and
all(ci_strs[1] == ci_strs1[1]))
if link_index is None:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelec[1])
else:
cd_indexa, dd_indexa, cd_indexb, dd_indexb = link_index
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, (cd_indexa,cd_indexb))
return rdm1a, rdm1b
def trans_rdm1(cibra_strs, ciket_strs, norb, nelec, link_index=None):
r'''Spin traced transition 1-particle density matrices.
See also function :func:`make_rdm1`
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle
+ \langle q_\beta^\dagger p_\beta \rangle`
'''
rdm1a, rdm1b = trans_rdm1s(cibra_strs, ciket_strs, norb, nelec, link_index)
return rdm1a + rdm1b
def spin_square(civec_strs, norb, nelec):
'''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated
Hamiltonian)'''
ci1 = contract_ss(civec_strs, norb, nelec)
ss = numpy.einsum('ij,ij->', civec_strs.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def contract_ss(civec_strs, norb, nelec):
r''' S^2 |\Psi\rangle
'''
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
strsa, strsb = ci_strs
neleca, nelecb = nelec
ci_coeff = ci_coeff.reshape(len(strsa),len(strsb))
def gen_map(fstr_index, strs, nelec, des=True):
a_index = fstr_index(strs, norb, nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
sign = tab[:,3]
tab = tab[sign!=0]
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(gen_des_linkstr, strsa, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(gen_des_linkstr, strsb, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(gen_cre_linkstr, strsa, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(gen_cre_linkstr, strsb, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex):
if aindex is None or bindex is None:
return None
ma = len(aindex)
mb = len(bindex)
t1 = numpy.zeros((ma,mb))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(ci_coeff, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: t1[addra.reshape(-1,1),addrb] += citmp
lib.takebak_2d(t1, citmp, maska, maskb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: ci1[maska.reshape(-1,1), maskb] += citmp
lib.takebak_2d(ci1, citmp, addra, addrb)
ci1 = numpy.zeros_like(ci_coeff)
trans(ci1, ades, bcre) # S+*S-
trans(ci1, acre, bdes) # S-*S+
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*ci_coeff
return _as_SCIvector(ci1, ci_strs)
def to_fci(civec_strs, norb, nelec):
ci_coeff, nelec, ci_strs = _unpack(civec_strs, nelec)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
ci0 = numpy.zeros((na,nb))
lib.takebak_2d(ci0, ci_coeff, addrsa, addrsb)
return ci0
def from_fci(fcivec, ci_strs, norb, nelec):
fcivec, nelec, ci_strs = _unpack(fcivec, nelec, ci_strs)
addrsa = [cistring.str2addr(norb, nelec[0], x) for x in ci_strs[0]]
addrsb = [cistring.str2addr(norb, nelec[1], x) for x in ci_strs[1]]
na = cistring.num_strings(norb, nelec[0])
nb = cistring.num_strings(norb, nelec[1])
fcivec = fcivec.reshape(na,nb)
civec = lib.take_2d(fcivec, addrsa, addrsb)
return _as_SCIvector(civec, ci_strs)
class SelectedCI(direct_spin1.FCISolver):
ci_coeff_cutoff = getattr(__config__, 'fci_selected_ci_SCI_ci_coeff_cutoff', .5e-3)
select_cutoff = getattr(__config__, 'fci_selected_ci_SCI_select_cutoff', .5e-3)
conv_tol = getattr(__config__, 'fci_selected_ci_SCI_conv_tol', 1e-9)
start_tol = getattr(__config__, 'fci_selected_ci_SCI_start_tol', 3e-4)
tol_decay_rate = getattr(__config__, 'fci_selected_ci_SCI_tol_decay_rate', 0.3)
def __init__(self, mol=None):
direct_spin1.FCISolver.__init__(self, mol)
##################################################
# don't modify the following attributes, they are not input options
#self.converged = False
#self.ci = None
self._strs = None
keys = set(('ci_coeff_cutoff', 'select_cutoff', 'conv_tol',
'start_tol', 'tol_decay_rate'))
self._keys = self._keys.union(keys)
def dump_flags(self, verbose=None):
direct_spin1.FCISolver.dump_flags(self, verbose)
logger.info(self, 'ci_coeff_cutoff %g', self.ci_coeff_cutoff)
logger.info(self, 'select_cutoff %g', self.select_cutoff)
def contract_2e(self, eri, civec_strs, norb, nelec, link_index=None, **kwargs):
# The argument civec_strs is a CI vector in function FCISolver.contract_2e.
# Save and patch self._strs to make this contract_2e function compatible to
# FCISolver.contract_2e.
if getattr(civec_strs, '_strs', None) is not None:
self._strs = civec_strs._strs
else:
assert(civec_strs.size == len(self._strs[0])*len(self._strs[1]))
civec_strs = _as_SCIvector(civec_strs, self._strs)
return contract_2e(eri, civec_strs, norb, nelec, link_index)
def get_init_guess(self, ci_strs, norb, nelec, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
na = len(ci_strs[0])
nb = len(ci_strs[1])
ci0 = direct_spin1._get_init_guess(na, nb, nroots, hdiag)
return [_as_SCIvector(x, ci_strs) for x in ci0]
def make_hdiag(self, h1e, eri, ci_strs, norb, nelec):
return make_hdiag(h1e, eri, ci_strs, norb, nelec)
enlarge_space = enlarge_space
kernel = kernel_float_space
kernel_fixed_space = kernel_fixed_space
# def approx_kernel(self, h1e, eri, norb, nelec, ci0=None, link_index=None,
# tol=None, lindep=None, max_cycle=None,
# max_memory=None, verbose=None, **kwargs):
# ci_strs = getattr(ci0, '_strs', self._strs)
# return self.kernel_fixed_space(h1e, eri, norb, nelec, ci_strs,
# ci0, link_index, tol, lindep, 6,
# max_memory, verbose, **kwargs)
@lib.with_doc(spin_square.__doc__)
def spin_square(self, civec_strs, norb, nelec):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
return spin_square(_as_SCIvector_if_not(civec_strs, self._strs), norb, nelec)
def large_ci(self, civec_strs, norb, nelec, tol=.1, return_strs=True):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
ci, _, (strsa, strsb) = _unpack(civec_strs, nelec, self._strs)
addra, addrb = numpy.where(abs(ci) > tol)
if return_strs:
strsa = [bin(x) for x in strsa[addra]]
strsb = [bin(x) for x in strsb[addrb]]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa[addra], norb)
occslstb = cistring._strs2occslst(strsb[addrb], norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def contract_ss(self, fcivec, norb, nelec):
return contract_ss(fcivec, norb, nelec)
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm1s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, civec_strs, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
rdm1a, rdm1b = self.make_rdm1s(civec_strs, norb, nelec, link_index)
return rdm1a + rdm1b
@lib.with_doc(make_rdm2s.__doc__)
def make_rdm2s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2s(civec_strs, norb, nelec, link_index)
@lib.with_doc(make_rdm2.__doc__)
def make_rdm2(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
return make_rdm2(civec_strs, norb, nelec, link_index)
def make_rdm12s(self, civec_strs, norb, nelec, link_index=None, **kwargs):
neleca, nelecb = nelec = direct_spin1._unpack_nelec(nelec, self.spin)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2aa, dm2ab, dm2bb = make_rdm2s(civec_strs, norb, nelec, link_index)
if neleca > 1 and nelecb > 1:
dm1a = numpy.einsum('iikl->kl', dm2aa) / (neleca-1)
dm1b = numpy.einsum('iikl->kl', dm2bb) / (nelecb-1)
else:
dm1a, dm1b = make_rdm1s(civec_strs, norb, nelec, link_index)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(self, civec_strs, norb, nelec, link_index=None, **kwargs):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
nelec_tot = sum(nelec)
civec_strs = _as_SCIvector_if_not(civec_strs, self._strs)
dm2 = make_rdm2(civec_strs, norb, nelec, link_index)
if nelec_tot > 1:
dm1 = numpy.einsum('iikl->kl', dm2) / (nelec_tot-1)
else:
dm1 = make_rdm1(civec_strs, norb, nelec, link_index)
return dm1, dm2
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = direct_spin1._unpack_nelec(nelec, self.spin)
cibra = _as_SCIvector_if_not(cibra, self._strs)
ciket = _as_SCIvector_if_not(ciket, self._strs)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def gen_linkstr(self, norb, nelec, tril=True, spin=None, ci_strs=None):
if spin is None:
spin = self.spin
if ci_strs is None:
ci_strs = self._strs
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
if tril:
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelecb)
else:
cd_indexa = cre_des_linkstr(ci_strs[0], norb, neleca)
dd_indexa = des_des_linkstr(ci_strs[0], norb, neleca)
cd_indexb = cre_des_linkstr(ci_strs[1], norb, nelecb)
dd_indexb = des_des_linkstr(ci_strs[1], norb, nelecb)
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
SCI = SelectedCI
def _unpack(civec_strs, nelec, ci_strs=None, spin=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec, spin)
ci_strs = getattr(civec_strs, '_strs', ci_strs)
if ci_strs is not None:
strsa, strsb = ci_strs
strsa = numpy.asarray(strsa)
strsb = numpy.asarray(strsb)
ci_strs = (strsa, strsb)
return civec_strs, (neleca, nelecb), ci_strs
def _all_linkstr_index(ci_strs, norb, nelec):
cd_indexa = cre_des_linkstr_tril(ci_strs[0], norb, nelec[0])
dd_indexa = des_des_linkstr_tril(ci_strs[0], norb, nelec[0])
cd_indexb = cre_des_linkstr_tril(ci_strs[1], norb, nelec[1])
dd_indexb = des_des_linkstr_tril(ci_strs[1], norb, nelec[1])
return cd_indexa, dd_indexa, cd_indexb, dd_indexb
# numpy.ndarray does not allow to attach attribtues. Overwrite the
# numpy.ndarray class to tag the ._strs attribute
class _SCIvector(numpy.ndarray):
def __array_finalize__(self, obj):
self._strs = getattr(obj, '_strs', None)
# Whenever the contents of the array was modified (through ufunc), the tag
# should be expired. Overwrite the output of ufunc to restore ndarray type.
def __array_wrap__(self, out, context=None):
return numpy.ndarray.__array_wrap__(self, out, context).view(numpy.ndarray)
def _as_SCIvector(civec, ci_strs):
civec = civec.view(_SCIvector)
civec._strs = ci_strs
return civec
def _as_SCIvector_if_not(civec, ci_strs):
if getattr(civec, '_strs', None) is None:
civec = _as_SCIvector(civec, ci_strs)
return civec
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.fci import spin_op
from pyscf.fci import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 1., 2. , 3. )],
['H', ( 1., 2. , 4. )],
]
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.kernel()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.kernel(m._eri, m.mo_coeff, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
e1, c1 = kernel(h1e, eri, norb, nelec)
e2, c2 = direct_spin1.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.894559902235565, 'diff to FCI', e1-e2)
print(c1.shape, c2.shape)
dm1_1 = make_rdm1(c1, norb, nelec)
dm1_2 = direct_spin1.make_rdm1(c2, norb, nelec)
print(abs(dm1_1 - dm1_2).sum())
dm2_1 = make_rdm2(c1, norb, nelec)
dm2_2 = direct_spin1.make_rdm12(c2, norb, nelec)[1]
print(abs(dm2_1 - dm2_2).sum())
myci = SelectedCI()
e, c = kernel_fixed_space(myci, h1e, eri, norb, nelec, c1._strs)
print(e - -11.894559902235565)
print(myci.large_ci(c1, norb, nelec))
print(myci.spin_square(c1, norb, nelec)[0] -
spin_op.spin_square0(to_fci(c1, norb, nelec), norb, nelec)[0])
myci = SelectedCI()
myci = addons.fix_spin_(myci)
e1, c1 = myci.kernel(h1e, eri, norb, nelec)
print(e1, e1 - -11.89467612053687)
print(myci.spin_square(c1, norb, nelec))
| [
"[email protected]"
] | |
98a5436bba2136ae470959f0588878666f9736e7 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/create_listener_quic_config_option.py | ea75a64289e0119f8980032cb31fa3a3892e5e4e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,568 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateListenerQuicConfigOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'quic_listener_id': 'str',
'enable_quic_upgrade': 'bool'
}
attribute_map = {
'quic_listener_id': 'quic_listener_id',
'enable_quic_upgrade': 'enable_quic_upgrade'
}
def __init__(self, quic_listener_id=None, enable_quic_upgrade=None):
"""CreateListenerQuicConfigOption
The model defined in huaweicloud sdk
:param quic_listener_id: 监听器关联的QUIC监听器ID。指定的listener id必须已存在,且协议类型为QUIC,不能指定为null,否则与enable_quic_upgrade冲突。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:type quic_listener_id: str
:param enable_quic_upgrade: QUIC升级的开启状态。 True:开启QUIC升级; Flase:关闭QUIC升级(默认)。 开启HTTPS监听器升级QUIC监听器能力。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:type enable_quic_upgrade: bool
"""
self._quic_listener_id = None
self._enable_quic_upgrade = None
self.discriminator = None
self.quic_listener_id = quic_listener_id
if enable_quic_upgrade is not None:
self.enable_quic_upgrade = enable_quic_upgrade
@property
def quic_listener_id(self):
"""Gets the quic_listener_id of this CreateListenerQuicConfigOption.
监听器关联的QUIC监听器ID。指定的listener id必须已存在,且协议类型为QUIC,不能指定为null,否则与enable_quic_upgrade冲突。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:return: The quic_listener_id of this CreateListenerQuicConfigOption.
:rtype: str
"""
return self._quic_listener_id
@quic_listener_id.setter
def quic_listener_id(self, quic_listener_id):
"""Sets the quic_listener_id of this CreateListenerQuicConfigOption.
监听器关联的QUIC监听器ID。指定的listener id必须已存在,且协议类型为QUIC,不能指定为null,否则与enable_quic_upgrade冲突。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:param quic_listener_id: The quic_listener_id of this CreateListenerQuicConfigOption.
:type quic_listener_id: str
"""
self._quic_listener_id = quic_listener_id
@property
def enable_quic_upgrade(self):
"""Gets the enable_quic_upgrade of this CreateListenerQuicConfigOption.
QUIC升级的开启状态。 True:开启QUIC升级; Flase:关闭QUIC升级(默认)。 开启HTTPS监听器升级QUIC监听器能力。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:return: The enable_quic_upgrade of this CreateListenerQuicConfigOption.
:rtype: bool
"""
return self._enable_quic_upgrade
@enable_quic_upgrade.setter
def enable_quic_upgrade(self, enable_quic_upgrade):
"""Sets the enable_quic_upgrade of this CreateListenerQuicConfigOption.
QUIC升级的开启状态。 True:开启QUIC升级; Flase:关闭QUIC升级(默认)。 开启HTTPS监听器升级QUIC监听器能力。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:param enable_quic_upgrade: The enable_quic_upgrade of this CreateListenerQuicConfigOption.
:type enable_quic_upgrade: bool
"""
self._enable_quic_upgrade = enable_quic_upgrade
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateListenerQuicConfigOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
2bf79220a4a605d9b665cad56a6c298e21d55040 | 98beeffab0570eb7e4bd2785fc195658e18aa6dd | /SRC/common/IO/progressbar_delay.py | cfb2f4c580665e91b96fc9caa3cc476d6de797c5 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | shkeshavarz/OOF2 | 27f59bb04775b76ad250ecfd76118b3760647bba | 0f69f535d040875354cd34e8bbedeae142ff09a3 | refs/heads/master | 2021-01-15T15:32:10.713469 | 2016-01-13T14:44:20 | 2016-01-13T14:44:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # -*- python -*-
# $RCSfile: progressbar_delay.py,v $
# $Revision: 1.22 $
# $Author: langer $
# $Date: 2011/02/01 16:38:56 $
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
# Time, in milliseconds, between the time that a progressbar object is
# created and the time that it is installed in the ActivityViewer
# window.
delay = 2000
# Time in milliseconds between progress bar updates.
period = 200
def set_delay(menuitem, milliseconds):
global delay
delay = milliseconds
| [
"[email protected]"
] | |
698ce85576475ac479f25c8050a6685b8c37d9f0 | 9f8ce7b4b3fd8b2d0be51e559945feded81fb0b1 | /negotiator2/__init__.py | 38de5a17832bb501a1dcbfee6bd6900685236cad | [] | no_license | zimeon/negotiator2 | bf82d092492d11435a4db9bbf9f65211cd787f74 | 33dee19cd9fcf5db7cc6872c6608843f3bf1e9c8 | refs/heads/master | 2021-01-16T23:22:55.504372 | 2018-02-20T13:52:48 | 2018-02-20T13:52:48 | 95,739,871 | 0 | 0 | null | 2017-11-03T20:17:22 | 2017-06-29T04:53:53 | Python | UTF-8 | Python | false | false | 286 | py | """Imports for negotiator2."""
__version__ = '2.1.1'
from .negotiator import AcceptParameters, ContentType, Language, ContentNegotiator
from .memento import BadTimeMap, TimeMap, memento_parse_datetime, memento_datetime_string
from .util import conneg_on_accept, negotiate_on_datetime
| [
"[email protected]"
] | |
57b38b59e6438c0800acb06a2fcf6c7d6f16fc8f | e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488 | /thehive/komand_thehive/actions/get_case/schema.py | 4fd641e02f199012fe05d6e6f2d54aea73510dcd | [
"MIT"
] | permissive | OSSSP/insightconnect-plugins | ab7c77f91c46bd66b10db9da1cd7571dfc048ab7 | 846758dab745170cf1a8c146211a8bea9592e8ff | refs/heads/master | 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 | MIT | 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null | UTF-8 | Python | false | false | 3,744 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Retrieve a case by ID"
class Input:
ID = "id"
class Output:
CASE = "case"
class GetCaseInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "Case ID",
"description": "Case ID e.g. AV_ajI_oYMfcbXhqb9tS",
"order": 1
}
},
"required": [
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetCaseOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"case": {
"$ref": "#/definitions/case",
"title": "Case",
"description": "Get case output",
"order": 1
}
},
"required": [
"case"
],
"definitions": {
"case": {
"type": "object",
"title": "case",
"properties": {
"_type": {
"type": "string",
"title": "Type",
"description": "Case type",
"order": 3
},
"caseId": {
"type": "integer",
"title": "Case ID e.g. AV_ajI_oYMfcbXhqb9tS",
"description": "Case ID",
"order": 8
},
"createdAt": {
"type": "integer",
"title": "Created At",
"description": "Created at",
"order": 16
},
"createdBy": {
"type": "string",
"title": "Created By",
"description": "Case created by",
"order": 12
},
"customFields": {
"type": "object",
"title": "Custom Fields",
"description": "Case custom fields",
"order": 14
},
"description": {
"type": "string",
"title": "Description",
"order": 17
},
"flag": {
"type": "boolean",
"title": "Flag",
"description": "Case flags",
"order": 10
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 15
},
"metrics": {
"type": "object",
"title": "Metrics",
"description": "Case metrics",
"order": 9
},
"owner": {
"type": "string",
"title": "Owner",
"description": "Case owner",
"order": 13
},
"severity": {
"type": "integer",
"title": "Severity",
"description": "Case severity",
"order": 5
},
"startDate": {
"type": "integer",
"title": "Start Date",
"description": "Case start date",
"order": 2
},
"status": {
"type": "string",
"title": "Status",
"description": "Case status",
"order": 1
},
"tags": {
"type": "array",
"title": "Tags",
"description": "Case tags",
"items": {
"type": "string"
},
"order": 6
},
"title": {
"type": "string",
"title": "Title",
"description": "Case title",
"order": 7
},
"tlp": {
"type": "integer",
"title": "TLP",
"description": "Traffic Light Protocol level",
"order": 4
},
"user": {
"type": "string",
"title": "User",
"description": "Case user",
"order": 11
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"[email protected]"
] | |
f5b4ce49234aa51d775d4794255d26f32726d67e | ce5ce3764e75774c0b7eab47893987b9f311b1b9 | /.history/moments/models_20210605234329.py | 2a69731e929875c9c2fe37b29fe4471a44af790c | [] | no_license | iSecloud/django-wechat | 410fb8a23b50dc2343c2f0167bbae560bf6e9694 | faaff9bb7f9454a63b2f8411d3577169b470baad | refs/heads/main | 2023-05-15T06:53:16.252422 | 2021-06-07T14:00:35 | 2021-06-07T14:00:35 | 374,685,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class WeChatUser(models.Model):
user = models.OneToOneField(User, models.CASCADE)
motto = models.CharField(max_length=100, null=True, blank=True)
pic = models.CharField(max_length=50, null=True, blank=True)
region = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.user.username
class Status(models.Model):
user = models.ForeignKey(WeChatUser, models.CASCADE)
text = models.CharField(max_length=280)
pic = models.CharField(max_length=100, null=True, blank=True)
pub_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text
class Meta:
ordering = ['-id']
#定义回复和点赞的数据表
class Reply(models.Model):
status = models.ForeignKey(Status, models.CASCADE)
author = models.CharField(max_length=100)
type = models.CharField(max_length=20, choices=[("0", "like"), ("1", "comment")])
text = models.CharField(max_length=300, null=True, blank=True) #允许空评论
at_person = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
at_person_name = ""
if self.at_person:
at_person_name = "@{}".format(self.at_person)
return "{}{} says {}".format(self.author, at_person_name, self.text)
| [
"[email protected]"
] | |
da954a0ed8caf750124be9eb6582ad919fab6903 | 258c045a25c2a4d2027c5510272b2e40fb7938ca | /ellipse_polaire.py | 680918cb63cc02de291149e3ce3fe6c1e7f16f3c | [
"MIT"
] | permissive | harryturr/electron-charging-rings-detector | 5395791aba2ce2264659b49c9ce4ad14c29c896a | 7eeefcf2c2be7aba56daf965fe80727887b01eb7 | refs/heads/master | 2022-07-07T08:27:15.412680 | 2019-10-29T23:58:57 | 2019-10-29T23:58:57 | 218,405,876 | 0 | 0 | MIT | 2022-06-21T23:20:08 | 2019-10-29T23:54:51 | OpenEdge ABL | UTF-8 | Python | false | false | 3,696 | py | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import features_automatic_detection
nombre_angle = 1000
features_automatic_detection.largeur_max = 256
features_automatic_detection.hauteur_max = 256
def ellipse_conversion(liste):
coeff_poly_0 = -4 * liste[1] ** 2 - (liste[0] - liste[2]) ** 2
coeff_poly_1 = (liste[0] - liste[2]) ** 2 + 4 * liste[1] ** 2
coeff_poly_2 = -liste[1] ** 2
cool = np.roots([coeff_poly_0, coeff_poly_1, coeff_poly_2])
bons_beta = []
if len(cool) == 0:
return [1, 0]
else:
if cool[0] >= 0:
beta_reel_0 = np.sqrt(cool[0])
beta_reel_1 = -np.sqrt(cool[0])
bons_beta.append(beta_reel_0)
bons_beta.append(beta_reel_1)
if cool[1] >= 0:
beta_reel_2 = np.sqrt(cool[1])
beta_reel_3 = -np.sqrt(cool[1])
bons_beta.append(beta_reel_2)
bons_beta.append(beta_reel_3)
index = bons_beta[0]
absol = abs(
(liste[0] - liste[2]) * np.sqrt(1 - bons_beta[0] ** 2) * bons_beta[0]
+ liste[1] * (1 - 2 * bons_beta[0] ** 2)
)
n = len(bons_beta)
for p in range(n):
if abs(bons_beta[p]):
value = (liste[0] - liste[2]) * np.sqrt(
1 - bons_beta[p] ** 2
) * bons_beta[p] + liste[1] * (1 - 2 * bons_beta[p] ** 2)
absolu = abs(value)
if absolu < absol:
absol = absolu
index = bons_beta[p]
return [np.sqrt(1 - index ** 2), index]
def changement_de_coordonnées_ellipse(liste):
[alpha, beta] = ellipse_conversion(liste)
new_a = liste[0] * alpha ** 2 + liste[2] * beta ** 2 - 2 * liste[1] * alpha * beta
new_b = 0
new_c = liste[2] * alpha ** 2 + liste[0] * beta ** 2 + 2 * liste[1] * alpha * beta
new_d = liste[3] * alpha - liste[4] * beta
new_e = liste[4] * alpha + liste[3] * beta
new_f = liste[5]
new_coordonnees = [new_a, new_b, new_c, new_d, new_e, new_f]
return new_coordonnees
def polaire_apres_rotation(liste):
new = changement_de_coordonnées_ellipse(liste)
K = new[3] ** 2 / new[0] + new[4] ** 2 / new[2] - new[5]
rayon_x = np.sqrt(K / new[0])
rayon_y = np.sqrt(K / new[2])
centre_x = -new[3] / new[0]
centre_y = -new[4] / new[2]
[alpha, beta] = ellipse_conversion(liste)
ellipse_polaire = [rayon_x, rayon_y, centre_x, centre_y, alpha, beta]
return ellipse_polaire
def tracer_ellipse(liste):
[rayon_x, rayon_y, centre_x, centre_y, alpha, beta] = polaire_apres_rotation(liste)
liste = []
for t in range(nombre_angle):
cos = np.cos(2 * np.pi * t / nombre_angle)
sin = np.sin(2 * np.pi * t / nombre_angle)
[grand_X, grand_Y] = [centre_x + rayon_x * cos, centre_y + rayon_y * sin]
[x, y] = [alpha * grand_X + beta * grand_Y, -beta * grand_X + alpha * grand_Y]
[int_x, int_y] = [int(x), int(y)]
if int_x >= 0 and int_x < 256 and int_y >= 0 and int_y < 256:
liste.append([int_x, int_y])
return liste
def enlever_doublon(liste):
n = len(liste)
new = []
if n == 0:
return []
if n == 1:
new.append(liste[0])
return new
else:
for i in range(0, n - 1):
if liste[i] != liste[i + 1]:
new.append(liste[i])
new.append(liste[n - 1])
return new
def ellipse_final(liste):
liste_0 = tracer_ellipse(liste)
liste_1 = enlever_doublon(liste_0)
return liste_1
| [
"[email protected]"
] | |
654e36ee1139fd4e26f34bff8a5ad866723502e8 | 9e2bd8e828d3aeedc9b5034d847a8e1e3a381cfa | /rltk/io/adapter/__init__.py | 75fd0cceb723c9a2948f88da45ca6d33433231d1 | [
"MIT"
] | permissive | rpatil524/rltk | 0a55c6d5f02ccf2991dc458fb38a0bf4f0caa151 | aee10ed5dd561583e60db3373ed82fe1208da1e9 | refs/heads/master | 2021-11-01T10:41:12.158504 | 2021-10-06T23:41:52 | 2021-10-06T23:41:52 | 183,799,148 | 0 | 0 | MIT | 2021-09-04T03:42:53 | 2019-04-27T16:53:12 | Python | UTF-8 | Python | false | false | 624 | py | from rltk.io.adapter.key_value_adapter import KeyValueAdapter
from rltk.io.adapter.memory_key_value_adapter import MemoryKeyValueAdapter
from rltk.io.adapter.dbm_key_value_adapter import DbmKeyValueAdapter
from rltk.io.adapter.redis_key_value_adapter import RedisKeyValueAdapter
from rltk.io.adapter.hbase_key_value_adapter import HBaseKeyValueAdapter
from rltk.io.adapter.key_set_adapter import KeySetAdapter
from rltk.io.adapter.memory_key_set_adapter import MemoryKeySetAdapter
from rltk.io.adapter.redis_key_set_adapter import RedisKeySetAdapter
from rltk.io.adapter.leveldb_key_set_adapter import LevelDbKeySetAdapter
| [
"[email protected]"
] | |
0e961003dcb191f892a1ebafa66c42a9f3c130d3 | 78011517bc7fe931f736b81297d0603f7dc01819 | /Python/kettle_set_mode.py | bd205cdc3f9d36d16e56196725915e22b9f34f4c | [] | no_license | BorisE/RedmondKettle | 47c040f90be4ccf7cee76720d793b4ab908ccfc3 | dec016e65a8cd9663719c279ef6bb98fda60f923 | refs/heads/master | 2021-05-17T18:17:43.429333 | 2020-04-05T18:08:14 | 2020-04-05T18:08:14 | 250,914,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | #!/usr/bin/python
# coding: utf-8
#from kettle.kettleclass import RedmondKettler
#from kettle.logclass import logclass
from kettle.logclass import log
import sys
#Use main wrapper library
from kettle_wrappers_lib import *
#Private part
if __name__ == "__main__":
log.debug(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
log.debug(f"Argument {i:>6}: {arg}")
# kettle_mode_heat mode target_temp duration_correction
try:
mode = str(sys.argv[1])
mode=mode if len(str(mode))==2 else "0"+str(mode)
except:
mode = "00"
try:
target_temp = int(sys.argv[2])
if mode == "01" or mode == "02":
target_temp = min(target_temp, 90) #max allowed 90 in mode 1 & 2
except:
target_temp = 100
try:
dutation_correction = int(sys.argv[3])
except:
dutation_correction = 0
#Init Kettler Object
kettler = Setup_Kettler()
if kettler:
log.info("Kettle setup was successfully completed, can proceed with commands further")
#kettler.sendStart()
log.info ("Setting kettle parameters: MODE=%s, TARGET_TEMP=%s, DURATION_CORRECTION=%s"%(mode,target_temp,dutation_correction))
mainMethodAnswer = False
if kettler.sendSetMode(mode, target_temp, dutation_correction):
log.info ("Successfully set")
mainMethodAnswer = True
else:
log.error ("Error setting kettle parameters")
mainMethodAnswer = False
json_data = Make_status_JSON (kettler, mainMethodAnswer)
print (json_data)
kettler.disconnect()
| [
"[email protected]"
] | |
9ca6db7957cc31ae72b2b5347b7994b5447e1f08 | 1f63dde39fcc5f8be29f2acb947c41f1b6f1683e | /Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/kernel_tests/rnn_test.py | 09160c41e472bbfb860179d0c5268646f827273e | [
"Apache-2.0",
"MIT"
] | permissive | koobonil/Boss2D | 09ca948823e0df5a5a53b64a10033c4f3665483a | e5eb355b57228a701495f2660f137bd05628c202 | refs/heads/master | 2022-10-20T09:02:51.341143 | 2019-07-18T02:13:44 | 2019-07-18T02:13:44 | 105,999,368 | 7 | 2 | MIT | 2022-10-04T23:31:12 | 2017-10-06T11:57:07 | C++ | UTF-8 | Python | false | false | 22,334 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import timeit
import numpy as np
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def __call__(self, input_, state, scope=None):
return (input_, state + 1)
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
5d8c2d955c93665a6a101d866aabccc05a6eec22 | ed9e1b622dad6b559cd0fe6fa23d6a27f857dc7f | /galsim/config/input_powerspectrum.py | c80cacd70793e9d12c16b6aa092dc249e98deb81 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ajwheeler/GalSim | 40d6f8c64789b601ed2547eefed05f1577592613 | cf0ef33e5f83da1b13a0617d362d8357056d6f22 | refs/heads/master | 2021-01-22T06:14:31.486159 | 2017-04-20T01:20:20 | 2017-04-20T01:20:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,937 | py | # Copyright (c) 2012-2017 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
from __future__ import print_function
import galsim
import math
# This file adds input type nfw_halo and value types PowerSpectrumShear and
# PowerSpectrumMagnification.
# A PowerSpectrum input type requires a special initialization at the start of each image
# to build the shear grid. This is done in SetupPowerSpecrum. There are also a couple of
# parameters that are specific to that step, which we want to ignore when getting the
# initialization kwargs, so we define a special GetPowerSpectrumKwargs function here.
from .input import InputLoader
class PowerSpectrumLoader(InputLoader):
def getKwargs(self, config, base, logger):
"""Parse the config dict and return the kwargs needed to build the PowerSpectrum object.
@param config The configuration dict for 'power_spectrum'
@param base The base configuration dict
@param logger If given, a logger object to log progress.
@returns kwargs, safe
"""
# Ignore these parameters here, since they are for the buildGrid step, not the
# initialization of the PowerSpectrum object.
ignore = ['grid_spacing', 'interpolant']
opt = galsim.PowerSpectrum._opt_params
return galsim.config.GetAllParams(config, base, opt=opt, ignore=ignore)
def setupImage(self, input_obj, config, base, logger=None):
"""Set up the PowerSpectrum input object's gridded values based on the
size of the image and the grid spacing.
@param input_obj The PowerSpectrum object to use
@param config The configuration dict for 'power_spectrum'
@param base The base configuration dict.
@param logger If given, a logger object to log progress.
"""
if 'grid_spacing' in config:
grid_spacing = galsim.config.ParseValue(config, 'grid_spacing', base, float)[0]
elif 'grid_xsize' in base and 'grid_ysize' in base:
# Then we have a tiled image. Can use the tile spacing as the grid spacing.
grid_size = min(base['grid_xsize'], base['grid_ysize'])
# This size is in pixels, so we need to convert to arcsec using the pixel scale.
# Note: we use the (max) pixel scale at the image center. This isn't
# necessarily optimal, but it seems like the best choice for a non-trivial WCS.
scale = base['wcs'].maxLinearScale(base['image_center'])
grid_spacing = grid_size * scale
else:
raise AttributeError("power_spectrum.grid_spacing required for non-tiled images")
if 'grid_xsize' in base and base['grid_xsize'] == base['grid_ysize']:
# PowerSpectrum can only do a square FFT, so make it the larger of the two n's.
nx_grid = int(math.ceil(base['image_xsize']/base['grid_xsize']))
ny_grid = int(math.ceil(base['image_ysize']/base['grid_ysize']))
ngrid = max(nx_grid, ny_grid)
# Normally that's good, but if tiles aren't square, need to drop through to the
# second option.
else:
image_size = max(base['image_xsize'], base['image_ysize'])
scale = base['wcs'].maxLinearScale(base['image_center'])
ngrid = int(math.ceil(image_size * scale / grid_spacing))
if 'interpolant' in config:
interpolant = galsim.config.ParseValue(config, 'interpolant', base, str)[0]
else:
interpolant = None
# We don't care about the output here. This just builds the grid, which we'll
# access for each object using its position.
if base['wcs'].isCelestial():
world_center = galsim.PositionD(0,0)
else:
world_center = base['wcs'].toWorld(base['image_center'])
rng = galsim.config.check_for_rng(base, logger, 'PowerSpectrum')
input_obj.buildGrid(grid_spacing=grid_spacing, ngrid=ngrid, center=world_center,
rng=rng, interpolant=interpolant)
# Make sure this process gives consistent results regardless of the number of processes
# being used.
if not isinstance(input_obj, galsim.PowerSpectrum) and rng is not None:
# Then input_obj is really a proxy, which means the rng was pickled, so we need to
# discard the same number of random calls from the one in the config dict.
rng.discard(input_obj.nRandCallsForBuildGrid())
# Register this as a valid input type
from .input import RegisterInputType
RegisterInputType('power_spectrum', PowerSpectrumLoader(galsim.PowerSpectrum))
# There are two value types associated with this: PowerSpectrumShear and
# PowerSpectrumMagnification.
def _GenerateFromPowerSpectrumShear(config, base, value_type):
"""@brief Return a shear calculated from a PowerSpectrum object.
"""
power_spectrum = galsim.config.GetInputObj('power_spectrum', config, base, 'PowerSpectrumShear')
if 'world_pos' not in base:
raise ValueError("PowerSpectrumShear requested, but no position defined.")
pos = base['world_pos']
# There aren't any parameters for this, so just make sure num is the only (optional)
# one present.
galsim.config.CheckAllParams(config, opt={ 'num' : int })
try:
g1,g2 = power_spectrum.getShear(pos)
shear = galsim.Shear(g1=g1,g2=g2)
except KeyboardInterrupt:
raise
except Exception as e:
import warnings
warnings.warn("Warning: PowerSpectrum shear is invalid -- probably strong lensing! " +
"Using shear = 0.")
shear = galsim.Shear(g1=0,g2=0)
#print(base['obj_num'],'PS shear = ',shear)
return shear, False
def _GenerateFromPowerSpectrumMagnification(config, base, value_type):
"""@brief Return a magnification calculated from a PowerSpectrum object.
"""
power_spectrum = galsim.config.GetInputObj('power_spectrum', config, base,
'PowerSpectrumMagnification')
if 'world_pos' not in base:
raise ValueError("PowerSpectrumMagnification requested, but no position defined.")
pos = base['world_pos']
opt = { 'max_mu' : float, 'num' : int }
kwargs = galsim.config.GetAllParams(config, base, opt=opt)[0]
mu = power_spectrum.getMagnification(pos)
max_mu = kwargs.get('max_mu', 25.)
if not max_mu > 0.:
raise ValueError(
"Invalid max_mu=%f (must be > 0) for type = PowerSpectrumMagnification"%max_mu)
if mu < 0 or mu > max_mu:
import warnings
warnings.warn("Warning: PowerSpectrum mu = %f means strong lensing! Using mu=%f"%(
mu,max_mu))
mu = max_mu
#print(base['obj_num'],'PS mu = ',mu)
return mu, False
# Register these as valid value types
from .value import RegisterValueType
RegisterValueType('PowerSpectrumShear', _GenerateFromPowerSpectrumShear, [ galsim.Shear ],
input_type='power_spectrum')
RegisterValueType('PowerSpectrumMagnification', _GenerateFromPowerSpectrumMagnification, [ float ],
input_type='power_spectrum')
| [
"[email protected]"
] | |
d1a8a52cfbf35438a187599eb96006576f455b17 | 1f2b05dbe818ff922269717389187e5ced71d198 | /blog/feeds.py | 1abe7ca5675208ef1cc195782b747a4c6430f792 | [
"BSD-2-Clause"
] | permissive | Pythonian/suorganizer | e665b0c642b62172156bbbd6537485d66709c339 | c835cf1647b2b980d3eaf744c9dd91f33dec7e33 | refs/heads/master | 2022-04-24T10:19:09.722026 | 2020-04-19T09:07:37 | 2020-04-19T09:07:37 | 256,956,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from django.contrib.syndication.views import Feed
from django.urls import reverse_lazy
from django.utils.feedgenerator import (
Atom1Feed, Rss201rev2Feed)
from .models import Post
class BasePostFeedMixin():
title = "Latest Startup Organizer Blog Posts"
link = reverse_lazy('blog_post_list')
description = subtitle = (
"Stay up to date on the "
"hottest startup news.")
def items(self):
# uses Post.Meta.ordering
return Post.objects.published()[:10]
def item_title(self, item):
return item.formatted_title()
def item_description(self, item):
return item.short_text()
def item_link(self, item):
return item.get_absolute_url()
class AtomPostFeed(BasePostFeedMixin, Feed):
feed_type = Atom1Feed
class Rss2PostFeed(BasePostFeedMixin, Feed):
feed_type = Rss201rev2Feed
| [
"[email protected]"
] | |
9c91e73a5440b8c28e4620927f5a5026b41dba99 | d49fbd7874b70a93cbc551afed1b87e3e47617a8 | /django/example/functions/auth.py | 77a3a3bacddb35d18816e9b9d8c1217f663a1b2b | [] | no_license | gitter-badger/tutorials-4 | bbdbb673e978118f9fec3212baa13f6f99226be0 | 3ce1cdb7c6d26f6df4d6bb94e82f83e8cab9389b | refs/heads/master | 2020-04-04T20:52:28.181616 | 2018-10-28T22:05:17 | 2018-10-28T22:05:17 | 156,264,177 | 0 | 0 | null | 2018-11-05T18:32:17 | 2018-11-05T18:32:16 | null | UTF-8 | Python | false | false | 470 | py | from attr import attrib, attrs
from django.contrib.auth import login, password_validation
from django.core.exceptions import ValidationError
def validate_password(raw_password):
try:
password_validation.validate_password(raw_password)
except ValidationError as error:
return False, error
else:
return True, None
@attrs
class StoreUserInSession:
request = attrib()
def do(self, user):
login(self.request, user)
| [
"[email protected]"
] | |
252dcf468e2f7a8486144abbbbd8991296a8ff2c | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/ETFMM_K/YW_ETFMM_SZSJ_403_K.py | 3b875095e4f9841ff364c4ae9f79cb0c40b3692f | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFMM_SZSJ_403_K(xtp_test_case):
# YW_ETFMM_SZSJ_403_K
def test_YW_ETFMM_SZSJ_403_K(self):
title = '深圳A股股票交易日即成剩撤委托卖-错误的业务类型'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000370,
'errorMSG': queryOrderErrorMsg(11000370),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '14', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_IPOS'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stkparm['随机中间价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
abaf7f138942cdf3f08632884ead902bc339f791 | 9cc325b00adba5f1b1d8334d98fcca5e0b995d3e | /setup.py | 557da98bf3b4a4d3cc652154b1d3263ee63fdf58 | [] | no_license | voronind/fuzzy-fabric | 63fb87d92c224c2c27f8fc3da00bcd799eac03e4 | 9afa7426c9ea91be14a706ecbc887432b447615d | refs/heads/master | 2021-05-28T00:43:00.968044 | 2014-08-25T14:35:59 | 2014-08-25T14:35:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # coding=utf8
from setuptools import setup, find_packages
setup(
name='fuzzy-fabric',
version='0.6.3',
author='Dmitry Voronin',
author_email='[email protected]',
url='https://github.com/dimka665/fuzzy-fabric',
description='Fuzzy Functions For Fabric',
packages=find_packages(),
package_data={
'': [
'templates/.*',
'templates/*.*',
'templates/nginx/*.*',
]
},
install_requires=[
'Fabric',
'virtualenv',
'virtualenvwrapper',
],
entry_points={
'console_scripts': [
'ff = fuzzy_fabric.main:main',
]
},
license='MIT License',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='fuzzy functions for fabric',
)
| [
"[email protected]"
] | |
6b5cb6b20a8f5f1ca794c0c3f95dd7536d9baba6 | d109b64bfa8c80a6ec7d647beeadf9fe1c667fac | /class0925/clist.py | 5480aaf0d2d17a6e7ea731723916f1a863406239 | [] | no_license | jumbokh/micropython_class | d34dd0a2be39d421d3bbf31dbb7bfd39b5f6ac6f | 950be81582dba970e9c982e2e06fa21d9e9a0fdd | refs/heads/master | 2022-10-10T22:27:02.759185 | 2022-10-01T14:44:31 | 2022-10-01T14:44:31 | 173,898,623 | 4 | 3 | null | 2020-03-31T09:57:23 | 2019-03-05T07:40:38 | Jupyter Notebook | UTF-8 | Python | false | false | 349 | py | clist=[[0,0,0],[255,255,255],[255,0,0],[0,255,0],
[0,0,255],[255,255,0],[0,255,255],[255,0,255],
[192,192,192,],[128,128,128],[128,0,0],[128,0,0],
[128,128,0],[0,128,128],[0,0,128]]
hlist=[[59,96,233],[104,42,67],[213,227,227],[216,18,47],
[12,238,108],[255,246,58],[236,184,20],[240,93,197],
[16,173,186],[0,222,255],[59,96,233],[102,98,133]]
| [
"[email protected]"
] | |
4a0710140ef441c276e39701404ea8f661acf36a | 249298bde8b03da659171947b29b8761b7115201 | /pollux/adaptdl/adaptdl/checkpoint.py | ed158092630eecbb9441a42570d4b78a92aa3f4f | [
"Apache-2.0"
] | permissive | gudiandian/ElasticFlow | cd4ce1f97f17cb878aa79865277ab64fa8ba7f89 | 0ffc17d257f2923de6478c4331ea64d858e7ab53 | refs/heads/main | 2023-04-18T14:45:49.998762 | 2023-01-05T13:13:10 | 2023-01-05T13:13:10 | 541,545,148 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,657 | py | # Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides functionality to Save and load arbitrary state as part of
checkpoint-restart elasticity. The `State` class can be subclassed to define
how to save/load any state to/from persistent storage, so it can be restored
after the current job restarts and resumed from where it left off.
"""
import os
from adaptdl.env import checkpoint_path, replica_rank
# FIXME: Keeping global state like this will result in memory leaks for
# applications which do not restart too often.
_STATES_TO_NAMES = {}
_NAMES_TO_STATES = {}
class State(object):
"""
This class implements An arbitrary piece of state which can be saved and
loaded as part of a checkpoint, and synchronized across all replicas.
Should be sub-classed to define custom save, load, and sync logic.
"""
def __init__(self, name):
"""
Initialize the state object with a unique identifier `name`, which is
used to refer to the saved object in persistent storage. No two `State`
objects may share the same `name`.
Arguments:
name (str): Unique name of this `State` object.
Raises:
ValueError: If a `State` object with the given name already exists.
"""
if name in _NAMES_TO_STATES:
raise ValueError("State '{}' already exists".format(name))
_NAMES_TO_STATES[name] = self
_STATES_TO_NAMES[self] = name
def save(self, fileobj):
"""
This method should be overridden by subclasses to define how the state
is saved. Is invoked by `save_all_states` and `save_state` to save the
state into persistent storage.
Arguments:
fileobj (BinaryIO): A binary writable file object.
"""
pass
def load(self, fileobj):
"""
This method should be overridden by subclasses to define how the state
is loaded. Is invoked by `load_state` to load the state from persistent
storage.
Arguments:
fileobj (BinaryIO): A binary readable file object.
"""
pass
def sync(self):
"""
This method should be overridden by subclasses to define how the state
is synchronized across replicas. This might be necessary to make sure
the state is consistent before saving it to persistent storage. Is
invoked by `save_state` before saving the state.
"""
pass
def save_all_states():
"""
Invokes `save_state` on all `State` objects for which `State.skip` is True.
This function can be used to trigger a global checkpoint and save every
`State` in the current job.
"""
for state in _STATES_TO_NAMES:
save_state(state)
def save_state(state, sync=True):
"""
Saves a `State` object to persistent storage. First invokes `State.sync` on
all replicas if `sync` is `True` (default), and then invokes `State.save`
on the replica of rank 0 only.
Arguments:
state (State): The `State` object to save to persistent storage.
sync (bool): Whether `State.sync` should be invoked.
"""
if sync:
state.sync()
if replica_rank() == 0:
name = _STATES_TO_NAMES[state]
if checkpoint_path() is not None:
with open(os.path.join(checkpoint_path(), name), "wb") as f:
state.save(f)
def load_state(state):
"""
Load the given `State` object from persistent storage. If the object was
previously saved, then State.load will be invoked with a readable file
object to load from.
Arguments:
state (State): `State` object to load from persistent storage.
Returns:
`True` if state was previously saved and `State.load` was invoked,
`False` otherwise.
"""
if checkpoint_path() is None:
return False
try:
name = _STATES_TO_NAMES[state]
with open(os.path.join(checkpoint_path(), name), "rb") as f:
state.load(f)
return True
except FileNotFoundError:
return False
| [
"[email protected]"
] | |
a5ee1872c6c373df25f50d062c448a2600297ef8 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Outside_Air_Flow_Sensor.py | 68914369d58d176ddefe3473259f05153cfd0d36 | [] | no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Air_Flow_Sensor import Air_Flow_Sensor
from brick.brickschema.org.schema._1_0_2.Brick.Outside_Air import Outside_Air
class Outside_Air_Flow_Sensor(Air_Flow_Sensor,Outside_Air):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Outside_Air_Flow_Sensor
| [
"[email protected]"
] | |
01c272af2d82ab2ed9b1ab5b5f39606aed3d5c01 | 42c63d5f9c724c99ba93f77bdead51891fcf8623 | /OpenStack-Mitaka-src/python-manilaclient/manilaclient/v2/share_export_locations.py | 0d0cb3163ea350a24e7e37fcc6849e45d5fa6087 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | liyongle/openstack-mitaka | 115ae819d42ed9bf0922a8c0ab584fa99a3daf92 | 5ccd31c6c3b9aa68b9db1bdafcf1b029e8e37b33 | refs/heads/master | 2021-07-13T04:57:53.488114 | 2019-03-07T13:26:25 | 2019-03-07T13:26:25 | 174,311,782 | 0 | 1 | null | 2020-07-24T01:44:47 | 2019-03-07T09:18:55 | Python | UTF-8 | Python | false | false | 1,883 | py | # Copyright 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manilaclient import api_versions
from manilaclient import base
from manilaclient.openstack.common.apiclient import base as common_base
class ShareExportLocation(common_base.Resource):
"""Resource class for a share export location."""
def __repr__(self):
return "<ShareExportLocation: %s>" % self.id
def __getitem__(self, key):
return self._info[key]
class ShareExportLocationManager(base.ManagerWithFind):
"""Manage :class:`ShareExportLocation` resources."""
resource_class = ShareExportLocation
@api_versions.wraps("2.9")
def list(self, share):
"""List all share export locations."""
share_id = common_base.getid(share)
return self._list("/shares/%s/export_locations" % share_id,
"export_locations")
@api_versions.wraps("2.9")
def get(self, share, export_location):
"""Get a share export location."""
share_id = common_base.getid(share)
export_location_id = common_base.getid(export_location)
return self._get(
"/shares/%(share_id)s/export_locations/%(export_location_id)s" % {
"share_id": share_id,
"export_location_id": export_location_id}, "export_location")
| [
"[email protected]"
] | |
85de194799b259a616254f8b20da8a630ac9d0a6 | e1eaed6dde62fc54eb317d28dbd18e0740e3e8f3 | /official/vision/image_classification/efficientnet/tfhub_export.py | d3518a1304c8c761cfaabdcc96dead70dd9b0097 | [
"Apache-2.0"
] | permissive | nlpming/models | cf5008d2e66d2b66b6d61423e214f2f9f9fbe472 | 3cbf0748529d787dd09fa3ed031e557f0ddfa268 | refs/heads/master | 2021-12-03T03:29:16.042489 | 2021-11-23T14:09:10 | 2021-11-23T14:09:10 | 206,007,973 | 0 | 0 | Apache-2.0 | 2019-09-03T06:47:46 | 2019-09-03T06:47:46 | null | UTF-8 | Python | false | false | 2,317 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export TF-Hub SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from official.vision.image_classification.efficientnet import efficientnet_model
FLAGS = flags.FLAGS
flags.DEFINE_string("model_name", None, "EfficientNet model name.")
flags.DEFINE_string("model_path", None, "File path to TF model checkpoint.")
flags.DEFINE_string("export_path", None,
"TF-Hub SavedModel destination path to export.")
def export_tfhub(model_path, hub_destination, model_name):
"""Restores a tf.keras.Model and saves for TF-Hub."""
model_configs = dict(efficientnet_model.MODEL_CONFIGS)
config = model_configs[model_name]
image_input = tf.keras.layers.Input(
shape=(None, None, 3), name="image_input", dtype=tf.float32)
x = image_input * 255.0
ouputs = efficientnet_model.efficientnet(x, config)
hub_model = tf.keras.Model(image_input, ouputs)
ckpt = tf.train.Checkpoint(model=hub_model)
ckpt.restore(model_path).assert_existing_objects_matched()
hub_model.save(
os.path.join(hub_destination, "classification"), include_optimizer=False)
feature_vector_output = hub_model.get_layer(name="top_pool").get_output_at(0)
hub_model2 = tf.keras.Model(image_input, feature_vector_output)
hub_model2.save(
os.path.join(hub_destination, "feature-vector"), include_optimizer=False)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
export_tfhub(FLAGS.model_path, FLAGS.export_path, FLAGS.model_name)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
31214babd69af863c6c00e938f64103d02fbd00b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch20_2020_03_04_19_28_32_953906.py | 1c8d2050fbb9fd530886f4bcd029d1f07091e621 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | # Pergunta quantos km
km= float(input('quantos km:'))
def P(qts_km):
if qts_km <= 200:
y=qts_km*0.5
return y
else:
y=200*0.5 + (qts_km-200)*0.45
return y
y = P(km)
print ('{0:2f} .format(y)) | [
"[email protected]"
] | |
7a6cdd72513023707903c252305e7238ce9bbccf | f9f1f887629855bbf12ecb0b7358fed5946b3caa | /.history/app_blog_forum/views_20201117201247.py | b670732826fc49cafc6e9dd7d657644d12d97833 | [] | no_license | hibamohi5/blog_forum | 4f687cee3ca6bdb1d0302b3657a77c01945404b3 | d6380eb7149355c79276b738da7da94c2ee03570 | refs/heads/main | 2023-01-14T18:33:53.043754 | 2020-11-20T01:52:22 | 2020-11-20T01:52:22 | 314,417,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_new_user(request):
errors = User.objects.user_registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/view')
def view_trip(request, trip_id):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
trip = Trip.objects.get(id=trip_id)
context = {
"user": user,
"trip": trip
}
return render(request, "view_trip.html", context)
| [
"[email protected]"
] | |
ac08592f24581115477dc1e3fe0e6907fc2e9860 | 2941b312fc83ff08f5b5f362cf700e7ff8352cd3 | /kats/tests/models/test_data_validation.py | cf7402fdd68cec674de17cc8bb5223d1b0844e51 | [
"MIT"
] | permissive | rbagd/Kats | 5e1ac0b288f1250423921f7ada812c1198c55269 | 4f86a332d0afc790ab1d833fd8ffe6782a8be93b | refs/heads/main | 2023-08-21T20:35:42.141281 | 2021-09-22T09:27:18 | 2021-09-22T09:27:18 | 407,527,411 | 0 | 0 | MIT | 2021-09-17T12:11:43 | 2021-09-17T12:11:42 | null | UTF-8 | Python | false | false | 1,870 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
import unittest
from unittest import TestCase
import pandas as pd
from kats.consts import TimeSeriesData
from kats.data.utils import load_air_passengers
def load_data(file_name):
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
class DataValidationTest(TestCase):
def setUp(self):
self.TSData = load_air_passengers()
def test_data_validation(self) -> None:
# add the extra data point to break the frequency.
extra_point = pd.DataFrame(
[["1900-01-01", 2], ["2020-01-01", 2]], columns=["time", "y"]
)
DATA = self.TSData.to_dataframe()
data_with_extra_point = DATA.copy().append(extra_point)
tsData_with_missing_point = TimeSeriesData(data_with_extra_point)
tsData_with_missing_point.validate_data(
validate_frequency=False, validate_dimension=False
)
tsData_with_missing_point.validate_data(
validate_frequency=False, validate_dimension=True
)
with self.assertRaises(ValueError, msg="Frequency validation should fail."):
tsData_with_missing_point.validate_data(
validate_frequency=True, validate_dimension=False
)
with self.assertRaises(ValueError, msg="Frequency validation should fail."):
tsData_with_missing_point.validate_data(
validate_frequency=True, validate_dimension=True
)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
c466642b454d777e22b14c619db8d707e72673fd | 615e3cdc2c136b2f66b5c553d375823d3580fd08 | /exercicio/venv/Scripts/easy_install-3.7-script.py | d7f8ad6ddd000dbb1b0c6fc876af7b1dc51e5a7a | [] | no_license | Android-Ale/PracticePython | 859a084e224cfb52eed573e38d7d9dc91f405885 | cab2ac7593deb22e6bb05a95ecd19a8ea2c96b0a | refs/heads/master | 2023-05-06T06:33:36.724569 | 2021-05-15T00:12:06 | 2021-05-15T00:12:06 | 369,307,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!C:\Users\Alpha\PycharmProjects\exercicio\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.